VITESS_THROTTLER
@@ -3407,6 +3407,14 @@ alter_statement:
Type: ForceCutOverAllMigrationType,
}
}
+| ALTER comment_opt VITESS_MIGRATION STRING CUTOVER_THRESHOLD STRING
+ {
+ $$ = &AlterMigration{
+ Type: SetCutOverThresholdMigrationType,
+ UUID: string($4),
+ Threshold: $6,
+ }
+ }
partitions_options_opt:
{
@@ -8301,6 +8309,7 @@ non_reserved_keyword:
| COUNT %prec FUNCTION_CALL_NON_KEYWORD
| CSV
| CURRENT
+| CUTOVER_THRESHOLD
| DATA
| DATE %prec STRING_TYPE_PREFIX_NON_KEYWORD
| DATE_ADD %prec FUNCTION_CALL_NON_KEYWORD
diff --git a/go/vt/vitessdriver/convert.go b/go/vt/vitessdriver/convert.go
index 7ba95db4147..aa8bcedc7ee 100644
--- a/go/vt/vitessdriver/convert.go
+++ b/go/vt/vitessdriver/convert.go
@@ -43,10 +43,8 @@ func (cv *converter) ToNative(v sqltypes.Value) (any, error) {
return v.ToUint64()
case v.IsFloat():
return v.ToFloat64()
- case v.Type() == sqltypes.Datetime, v.Type() == sqltypes.Timestamp:
- return datetimeToNative(v, cv.location)
- case v.Type() == sqltypes.Date:
- return dateToNative(v, cv.location)
+ case v.Type() == sqltypes.Datetime, v.Type() == sqltypes.Timestamp, v.Type() == sqltypes.Date:
+ return v.ToTimeInLocation(cv.location)
case v.IsQuoted() || v.Type() == sqltypes.Bit || v.Type() == sqltypes.Decimal:
out, err = v.ToBytes()
case v.Type() == sqltypes.Expression:
diff --git a/go/vt/vitessdriver/time.go b/go/vt/vitessdriver/time.go
index 70ec2d679ae..c6526197d9d 100644
--- a/go/vt/vitessdriver/time.go
+++ b/go/vt/vitessdriver/time.go
@@ -17,83 +17,12 @@ limitations under the License.
package vitessdriver
import (
- "errors"
"time"
"vitess.io/vitess/go/sqltypes"
)
-// ErrInvalidTime is returned when we fail to parse a datetime
-// string from MySQL. This should never happen unless things are
-// seriously messed up.
-var ErrInvalidTime = errors.New("invalid MySQL time string")
-
var isoTimeFormat = "2006-01-02 15:04:05.999999"
-var isoNullTime = "0000-00-00 00:00:00.000000"
-var isoTimeLength = len(isoTimeFormat)
-
-// parseISOTime pases a time string in MySQL's textual datetime format.
-// This is very similar to ISO8601, with some differences:
-//
-// - There is no T separator between the date and time sections;
-// a space is used instead.
-// - There is never a timezone section in the string, as these datetimes
-// are not timezone-aware. There isn't a Z value for UTC times for
-// the same reason.
-//
-// Note that this function can handle both DATE (which should _always_ have
-// a length of 10) and DATETIME strings (which have a variable length, 18+
-// depending on the number of decimal sub-second places).
-//
-// Also note that this function handles the case where MySQL returns a NULL
-// time (with a string where all sections are zeroes) by returning a zeroed
-// out time.Time object. NULL time strings are not considered a parsing error.
-//
-// See: isoTimeFormat
-func parseISOTime(tstr string, loc *time.Location, minLen, maxLen int) (t time.Time, err error) {
- tlen := len(tstr)
- if tlen < minLen || tlen > maxLen {
- err = ErrInvalidTime
- return
- }
-
- if tstr == isoNullTime[:tlen] {
- // This is what MySQL would send when the date is NULL,
- // so return an empty time.Time instead.
- // This is not a parsing error
- return
- }
-
- if loc == nil {
- loc = time.UTC
- }
-
- // Since the time format returned from MySQL never has a Timezone
- // section, ParseInLocation will initialize the time.Time struct
- // with the default `loc` we're passing here.
- return time.ParseInLocation(isoTimeFormat[:tlen], tstr, loc)
-}
-
-// datetimeToNative converts a Datetime Value into a time.Time
-func datetimeToNative(v sqltypes.Value, loc *time.Location) (time.Time, error) {
- // Valid format string offsets for a DATETIME
- // |DATETIME |19+
- // |------------------|------|
- // "2006-01-02 15:04:05.999999"
- return parseISOTime(v.ToString(), loc, 19, isoTimeLength)
-}
-
-// dateToNative converts a Date Value into a time.Time.
-// Note that there's no specific type in the Go stdlib to represent
-// dates without time components, so the returned Time will have
-// their hours/mins/seconds zeroed out.
-func dateToNative(v sqltypes.Value, loc *time.Location) (time.Time, error) {
- // Valid format string offsets for a DATE
- // |DATE |10
- // |---------|
- // "2006-01-02 00:00:00.000000"
- return parseISOTime(v.ToString(), loc, 10, 10)
-}
// NewDatetime builds a Datetime Value
func NewDatetime(t time.Time, defaultLoc *time.Location) sqltypes.Value {
diff --git a/go/vt/vitessdriver/time_test.go b/go/vt/vitessdriver/time_test.go
deleted file mode 100644
index 949d8f43354..00000000000
--- a/go/vt/vitessdriver/time_test.go
+++ /dev/null
@@ -1,175 +0,0 @@
-/*
-Copyright 2019 The Vitess Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package vitessdriver
-
-import (
- "reflect"
- "testing"
- "time"
-
- "vitess.io/vitess/go/sqltypes"
-)
-
-var randomLocation = time.FixedZone("Nowhere", 3*60*60)
-
-func DatetimeValue(str string) sqltypes.Value {
- return sqltypes.TestValue(sqltypes.Datetime, str)
-}
-
-func DateValue(str string) sqltypes.Value {
- return sqltypes.TestValue(sqltypes.Date, str)
-}
-
-func TestDatetimeToNative(t *testing.T) {
-
- tcases := []struct {
- val sqltypes.Value
- loc *time.Location
- out time.Time
- err bool
- }{{
- val: DatetimeValue("1899-08-24 17:20:00"),
- out: time.Date(1899, 8, 24, 17, 20, 0, 0, time.UTC),
- }, {
- val: DatetimeValue("1952-03-11 01:02:03"),
- loc: time.Local,
- out: time.Date(1952, 3, 11, 1, 2, 3, 0, time.Local),
- }, {
- val: DatetimeValue("1952-03-11 01:02:03"),
- loc: randomLocation,
- out: time.Date(1952, 3, 11, 1, 2, 3, 0, randomLocation),
- }, {
- val: DatetimeValue("1952-03-11 01:02:03"),
- loc: time.UTC,
- out: time.Date(1952, 3, 11, 1, 2, 3, 0, time.UTC),
- }, {
- val: DatetimeValue("1899-08-24 17:20:00.000000"),
- out: time.Date(1899, 8, 24, 17, 20, 0, 0, time.UTC),
- }, {
- val: DatetimeValue("1899-08-24 17:20:00.000001"),
- out: time.Date(1899, 8, 24, 17, 20, 0, int(1*time.Microsecond), time.UTC),
- }, {
- val: DatetimeValue("1899-08-24 17:20:00.123456"),
- out: time.Date(1899, 8, 24, 17, 20, 0, int(123456*time.Microsecond), time.UTC),
- }, {
- val: DatetimeValue("1899-08-24 17:20:00.222"),
- out: time.Date(1899, 8, 24, 17, 20, 0, int(222*time.Millisecond), time.UTC),
- }, {
- val: DatetimeValue("1899-08-24 17:20:00.1234567"),
- err: true,
- }, {
- val: DatetimeValue("1899-08-24 17:20:00.1"),
- out: time.Date(1899, 8, 24, 17, 20, 0, int(100*time.Millisecond), time.UTC),
- }, {
- val: DatetimeValue("0000-00-00 00:00:00"),
- out: time.Time{},
- }, {
- val: DatetimeValue("0000-00-00 00:00:00.0"),
- out: time.Time{},
- }, {
- val: DatetimeValue("0000-00-00 00:00:00.000"),
- out: time.Time{},
- }, {
- val: DatetimeValue("0000-00-00 00:00:00.000000"),
- out: time.Time{},
- }, {
- val: DatetimeValue("0000-00-00 00:00:00.0000000"),
- err: true,
- }, {
- val: DatetimeValue("1899-08-24T17:20:00.000000"),
- err: true,
- }, {
- val: DatetimeValue("1899-02-31 17:20:00.000000"),
- err: true,
- }, {
- val: DatetimeValue("1899-08-24 17:20:00."),
- out: time.Date(1899, 8, 24, 17, 20, 0, 0, time.UTC),
- }, {
- val: DatetimeValue("0000-00-00 00:00:00.000001"),
- err: true,
- }, {
- val: DatetimeValue("1899-08-24 17:20:00 +02:00"),
- err: true,
- }, {
- val: DatetimeValue("1899-08-24"),
- err: true,
- }, {
- val: DatetimeValue("This is not a valid timestamp"),
- err: true,
- }}
-
- for _, tcase := range tcases {
- got, err := datetimeToNative(tcase.val, tcase.loc)
- if tcase.err && err == nil {
- t.Errorf("datetimeToNative(%v, %#v) succeeded; expected error", tcase.val, tcase.loc)
- }
- if !tcase.err && err != nil {
- t.Errorf("datetimeToNative(%v, %#v) failed: %v", tcase.val, tcase.loc, err)
- }
- if !reflect.DeepEqual(got, tcase.out) {
- t.Errorf("datetimeToNative(%v, %#v): %v, want %v", tcase.val, tcase.loc, got, tcase.out)
- }
- }
-}
-
-func TestDateToNative(t *testing.T) {
- tcases := []struct {
- val sqltypes.Value
- loc *time.Location
- out time.Time
- err bool
- }{{
- val: DateValue("1899-08-24"),
- out: time.Date(1899, 8, 24, 0, 0, 0, 0, time.UTC),
- }, {
- val: DateValue("1952-03-11"),
- loc: time.Local,
- out: time.Date(1952, 3, 11, 0, 0, 0, 0, time.Local),
- }, {
- val: DateValue("1952-03-11"),
- loc: randomLocation,
- out: time.Date(1952, 3, 11, 0, 0, 0, 0, randomLocation),
- }, {
- val: DateValue("0000-00-00"),
- out: time.Time{},
- }, {
- val: DateValue("1899-02-31"),
- err: true,
- }, {
- val: DateValue("1899-08-24 17:20:00"),
- err: true,
- }, {
- val: DateValue("0000-00-00 00:00:00"),
- err: true,
- }, {
- val: DateValue("This is not a valid timestamp"),
- err: true,
- }}
-
- for _, tcase := range tcases {
- got, err := dateToNative(tcase.val, tcase.loc)
- if tcase.err && err == nil {
- t.Errorf("dateToNative(%v, %#v) succeeded; expected error", tcase.val, tcase.loc)
- }
- if !tcase.err && err != nil {
- t.Errorf("dateToNative(%v, %#v) failed: %v", tcase.val, tcase.loc, err)
- }
- if !reflect.DeepEqual(got, tcase.out) {
- t.Errorf("dateToNative(%v, %#v): %v, want %v", tcase.val, tcase.loc, got, tcase.out)
- }
- }
-}
diff --git a/go/vt/vtadmin/api.go b/go/vt/vtadmin/api.go
index cef8816504a..4f91459d9ed 100644
--- a/go/vt/vtadmin/api.go
+++ b/go/vt/vtadmin/api.go
@@ -59,6 +59,7 @@ import (
"vitess.io/vitess/go/vt/vtadmin/rbac"
"vitess.io/vitess/go/vt/vtadmin/sort"
"vitess.io/vitess/go/vt/vtadmin/vtadminproto"
+ "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver"
"vitess.io/vitess/go/vt/vtctl/workflow"
"vitess.io/vitess/go/vt/vtenv"
"vitess.io/vitess/go/vt/vterrors"
@@ -488,6 +489,31 @@ func (api *API) ApplySchema(ctx context.Context, req *vtadminpb.ApplySchemaReque
return nil, err
}
+ // Parser with default options. New() itself initializes with default MySQL version.
+ parser, err := sqlparser.New(sqlparser.Options{
+ TruncateUILen: 512,
+ TruncateErrLen: 0,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Split the sql statement received from request.
+ sqlParts, err := parser.SplitStatementToPieces(req.Sql)
+ if err != nil {
+ return nil, err
+ }
+
+ req.Request.Sql = sqlParts
+
+ // Set the callerID if not empty.
+ if req.CallerId != "" {
+ req.Request.CallerId = &vtrpcpb.CallerID{Principal: req.CallerId}
+ }
+
+ // Set the default wait replicas timeout.
+ req.Request.WaitReplicasTimeout = protoutil.DurationToProto(grpcvtctldserver.DefaultWaitReplicasTimeout)
+
return c.ApplySchema(ctx, req.Request)
}
diff --git a/go/vt/vtadmin/http/schema_migrations.go b/go/vt/vtadmin/http/schema_migrations.go
index e0207989648..3da6026fe9f 100644
--- a/go/vt/vtadmin/http/schema_migrations.go
+++ b/go/vt/vtadmin/http/schema_migrations.go
@@ -34,19 +34,26 @@ func ApplySchema(ctx context.Context, r Request, api *API) *JSONResponse {
decoder := json.NewDecoder(r.Body)
defer r.Body.Close()
- var req vtctldatapb.ApplySchemaRequest
- if err := decoder.Decode(&req); err != nil {
+ var body struct {
+ Sql string `json:"sql"`
+ CallerId string `json:"caller_id"`
+ Request vtctldatapb.ApplySchemaRequest `json:"request"`
+ }
+
+ if err := decoder.Decode(&body); err != nil {
return NewJSONResponse(nil, &errors.BadRequest{
Err: err,
})
}
vars := mux.Vars(r.Request)
- req.Keyspace = vars["keyspace"]
+ body.Request.Keyspace = vars["keyspace"]
resp, err := api.server.ApplySchema(ctx, &vtadminpb.ApplySchemaRequest{
ClusterId: vars["cluster_id"],
- Request: &req,
+ Sql: body.Sql,
+ CallerId: body.CallerId,
+ Request: &body.Request,
})
return NewJSONResponse(resp, err)
diff --git a/go/vt/vtctl/grpcvtctldserver/server.go b/go/vt/vtctl/grpcvtctldserver/server.go
index e280a410e02..c3dc22d21b4 100644
--- a/go/vt/vtctl/grpcvtctldserver/server.go
+++ b/go/vt/vtctl/grpcvtctldserver/server.go
@@ -303,7 +303,9 @@ func (s *VtctldServer) ApplySchema(ctx context.Context, req *vtctldatapb.ApplySc
}
for _, shard := range execResult.SuccessShards {
- resp.RowsAffectedByShard[shard.Shard] = shard.Result.RowsAffected
+ for _, result := range shard.Results {
+ resp.RowsAffectedByShard[shard.Shard] += result.RowsAffected
+ }
}
return resp, err
@@ -2096,14 +2098,12 @@ func (s *VtctldServer) UpdateThrottlerConfig(ctx context.Context, req *vtctldata
throttlerConfig.CheckAsCheckSelf = false
}
if req.ThrottledApp != nil && req.ThrottledApp.Name != "" {
- // TODO(shlomi) in v22: replace the following line with the commented out block
- throttlerConfig.ThrottledApps[req.ThrottledApp.Name] = req.ThrottledApp
- // timeNow := time.Now()
- // if protoutil.TimeFromProto(req.ThrottledApp.ExpiresAt).After(timeNow) {
- // throttlerConfig.ThrottledApps[req.ThrottledApp.Name] = req.ThrottledApp
- // } else {
- // delete(throttlerConfig.ThrottledApps, req.ThrottledApp.Name)
- // }
+ timeNow := time.Now()
+ if protoutil.TimeFromProto(req.ThrottledApp.ExpiresAt).After(timeNow) {
+ throttlerConfig.ThrottledApps[req.ThrottledApp.Name] = req.ThrottledApp
+ } else {
+ delete(throttlerConfig.ThrottledApps, req.ThrottledApp.Name)
+ }
}
return throttlerConfig
}
diff --git a/go/vt/vtctl/grpcvtctldserver/testutil/test_tmclient.go b/go/vt/vtctl/grpcvtctldserver/testutil/test_tmclient.go
index b9c83c3658d..768fae5bff4 100644
--- a/go/vt/vtctl/grpcvtctldserver/testutil/test_tmclient.go
+++ b/go/vt/vtctl/grpcvtctldserver/testutil/test_tmclient.go
@@ -188,6 +188,8 @@ type TabletManagerClient struct {
EventJitter time.Duration
ErrorAfter time.Duration
}
+ // Backing Up - keyed by tablet alias.
+ TabletsBackupState map[string]bool
// keyed by tablet alias.
ChangeTagsResult map[string]struct {
Response *tabletmanagerdatapb.ChangeTagsResponse
@@ -1080,6 +1082,9 @@ func (fake *TabletManagerClient) ReplicationStatus(ctx context.Context, tablet *
}
if result, ok := fake.ReplicationStatusResults[key]; ok {
+ if _, ok = fake.TabletsBackupState[key]; ok {
+ result.Position.BackupRunning = fake.TabletsBackupState[key]
+ }
return result.Position, result.Error
}
diff --git a/go/vt/vtctl/reparentutil/emergency_reparenter.go b/go/vt/vtctl/reparentutil/emergency_reparenter.go
index ef30f48e8ac..70faf8958c7 100644
--- a/go/vt/vtctl/reparentutil/emergency_reparenter.go
+++ b/go/vt/vtctl/reparentutil/emergency_reparenter.go
@@ -258,7 +258,7 @@ func (erp *EmergencyReparenter) reparentShardLocked(ctx context.Context, ev *eve
// 2. Remove the tablets with the Must_not promote rule
// 3. Remove cross-cell tablets if PreventCrossCellPromotion is specified
// Our final primary candidate MUST belong to this list of valid candidates
- validCandidateTablets, err = erp.filterValidCandidates(validCandidateTablets, stoppedReplicationSnapshot.reachableTablets, prevPrimary, opts)
+ validCandidateTablets, err = erp.filterValidCandidates(validCandidateTablets, stoppedReplicationSnapshot.reachableTablets, stoppedReplicationSnapshot.tabletsBackupState, prevPrimary, opts)
if err != nil {
return err
}
@@ -737,9 +737,12 @@ func (erp *EmergencyReparenter) identifyPrimaryCandidate(
return nil, vterrors.Errorf(vtrpc.Code_INTERNAL, "unreachable - did not find a valid primary candidate even though the valid candidate list was non-empty")
}
-// filterValidCandidates filters valid tablets, keeping only the ones which can successfully be promoted without any constraint failures and can make forward progress on being promoted
-func (erp *EmergencyReparenter) filterValidCandidates(validTablets []*topodatapb.Tablet, tabletsReachable []*topodatapb.Tablet, prevPrimary *topodatapb.Tablet, opts EmergencyReparentOptions) ([]*topodatapb.Tablet, error) {
+// filterValidCandidates filters valid tablets, keeping only the ones which can successfully be promoted without any
+// constraint failures and can make forward progress on being promoted. It will filter out candidates taking backups
+// if possible.
+func (erp *EmergencyReparenter) filterValidCandidates(validTablets []*topodatapb.Tablet, tabletsReachable []*topodatapb.Tablet, tabletsBackupState map[string]bool, prevPrimary *topodatapb.Tablet, opts EmergencyReparentOptions) ([]*topodatapb.Tablet, error) {
var restrictedValidTablets []*topodatapb.Tablet
+ var notPreferredValidTablets []*topodatapb.Tablet
for _, tablet := range validTablets {
tabletAliasStr := topoproto.TabletAliasString(tablet.Alias)
// Remove tablets which have MustNot promote rule since they must never be promoted
@@ -766,9 +769,20 @@ func (erp *EmergencyReparenter) filterValidCandidates(validTablets []*topodatapb
}
continue
}
- restrictedValidTablets = append(restrictedValidTablets, tablet)
+ // Put candidates that are running a backup in a separate list
+ backingUp, ok := tabletsBackupState[tabletAliasStr]
+ if ok && backingUp {
+ erp.logger.Infof("Setting %s in list of valid candidates taking a backup", tabletAliasStr)
+ notPreferredValidTablets = append(notPreferredValidTablets, tablet)
+ } else {
+ restrictedValidTablets = append(restrictedValidTablets, tablet)
+ }
+ }
+ if len(restrictedValidTablets) > 0 {
+ return restrictedValidTablets, nil
}
- return restrictedValidTablets, nil
+
+ return notPreferredValidTablets, nil
}
// findErrantGTIDs tries to find errant GTIDs for the valid candidates and returns the updated list of valid candidates.
diff --git a/go/vt/vtctl/reparentutil/emergency_reparenter_test.go b/go/vt/vtctl/reparentutil/emergency_reparenter_test.go
index ea6e768d036..3669c34dc11 100644
--- a/go/vt/vtctl/reparentutil/emergency_reparenter_test.go
+++ b/go/vt/vtctl/reparentutil/emergency_reparenter_test.go
@@ -4463,27 +4463,55 @@ func TestEmergencyReparenter_filterValidCandidates(t *testing.T) {
}
)
allTablets := []*topodatapb.Tablet{primaryTablet, replicaTablet, rdonlyTablet, replicaCrossCellTablet, rdonlyCrossCellTablet}
+ noTabletsTakingBackup := map[string]bool{
+ topoproto.TabletAliasString(primaryTablet.Alias): false, topoproto.TabletAliasString(replicaTablet.Alias): false,
+ topoproto.TabletAliasString(rdonlyTablet.Alias): false, topoproto.TabletAliasString(replicaCrossCellTablet.Alias): false,
+ topoproto.TabletAliasString(rdonlyCrossCellTablet.Alias): false,
+ }
+ replicaTakingBackup := map[string]bool{
+ topoproto.TabletAliasString(primaryTablet.Alias): false, topoproto.TabletAliasString(replicaTablet.Alias): true,
+ topoproto.TabletAliasString(rdonlyTablet.Alias): false, topoproto.TabletAliasString(replicaCrossCellTablet.Alias): false,
+ topoproto.TabletAliasString(rdonlyCrossCellTablet.Alias): false,
+ }
tests := []struct {
- name string
- durability string
- validTablets []*topodatapb.Tablet
- tabletsReachable []*topodatapb.Tablet
- prevPrimary *topodatapb.Tablet
- opts EmergencyReparentOptions
- filteredTablets []*topodatapb.Tablet
- errShouldContain string
+ name string
+ durability string
+ validTablets []*topodatapb.Tablet
+ tabletsReachable []*topodatapb.Tablet
+ tabletsTakingBackup map[string]bool
+ prevPrimary *topodatapb.Tablet
+ opts EmergencyReparentOptions
+ filteredTablets []*topodatapb.Tablet
+ errShouldContain string
}{
{
- name: "filter must not",
- durability: "none",
- validTablets: allTablets,
- tabletsReachable: allTablets,
- filteredTablets: []*topodatapb.Tablet{primaryTablet, replicaTablet, replicaCrossCellTablet},
+ name: "filter must not",
+ durability: "none",
+ validTablets: allTablets,
+ tabletsReachable: allTablets,
+ tabletsTakingBackup: noTabletsTakingBackup,
+ filteredTablets: []*topodatapb.Tablet{primaryTablet, replicaTablet, replicaCrossCellTablet},
}, {
- name: "filter cross cell",
- durability: "none",
- validTablets: allTablets,
- tabletsReachable: allTablets,
+ name: "host taking backup must not be on the list when there are other candidates",
+ durability: "none",
+ validTablets: allTablets,
+ tabletsReachable: []*topodatapb.Tablet{replicaTablet, replicaCrossCellTablet, rdonlyTablet, rdonlyCrossCellTablet},
+ tabletsTakingBackup: replicaTakingBackup,
+ filteredTablets: []*topodatapb.Tablet{replicaCrossCellTablet},
+ }, {
+ name: "host taking backup must be the only one on the list when there are no other candidates",
+ durability: "none",
+ validTablets: allTablets,
+ tabletsReachable: []*topodatapb.Tablet{replicaTablet, rdonlyTablet, rdonlyCrossCellTablet},
+ tabletsTakingBackup: replicaTakingBackup,
+ filteredTablets: []*topodatapb.Tablet{replicaTablet},
+ }, {
+ name: "filter cross cell",
+ durability: "none",
+ validTablets: allTablets,
+ tabletsReachable: allTablets,
+ tabletsTakingBackup: noTabletsTakingBackup,
+
prevPrimary: &topodatapb.Tablet{
Alias: &topodatapb.TabletAlias{
Cell: "zone-1",
@@ -4494,11 +4522,12 @@ func TestEmergencyReparenter_filterValidCandidates(t *testing.T) {
},
filteredTablets: []*topodatapb.Tablet{primaryTablet, replicaTablet},
}, {
- name: "filter establish",
- durability: "cross_cell",
- validTablets: []*topodatapb.Tablet{primaryTablet, replicaTablet},
- tabletsReachable: []*topodatapb.Tablet{primaryTablet, replicaTablet, rdonlyTablet, rdonlyCrossCellTablet},
- filteredTablets: nil,
+ name: "filter establish",
+ durability: "cross_cell",
+ validTablets: []*topodatapb.Tablet{primaryTablet, replicaTablet},
+ tabletsReachable: []*topodatapb.Tablet{primaryTablet, replicaTablet, rdonlyTablet, rdonlyCrossCellTablet},
+ tabletsTakingBackup: noTabletsTakingBackup,
+ filteredTablets: nil,
}, {
name: "filter mixed",
durability: "cross_cell",
@@ -4510,34 +4539,38 @@ func TestEmergencyReparenter_filterValidCandidates(t *testing.T) {
opts: EmergencyReparentOptions{
PreventCrossCellPromotion: true,
},
- validTablets: allTablets,
- tabletsReachable: allTablets,
- filteredTablets: []*topodatapb.Tablet{replicaCrossCellTablet},
+ validTablets: allTablets,
+ tabletsReachable: allTablets,
+ tabletsTakingBackup: noTabletsTakingBackup,
+ filteredTablets: []*topodatapb.Tablet{replicaCrossCellTablet},
}, {
- name: "error - requested primary must not",
- durability: "none",
- validTablets: allTablets,
- tabletsReachable: allTablets,
+ name: "error - requested primary must not",
+ durability: "none",
+ validTablets: allTablets,
+ tabletsReachable: allTablets,
+ tabletsTakingBackup: noTabletsTakingBackup,
opts: EmergencyReparentOptions{
NewPrimaryAlias: rdonlyTablet.Alias,
},
errShouldContain: "proposed primary zone-1-0000000003 has a must not promotion rule",
}, {
- name: "error - requested primary not in same cell",
- durability: "none",
- validTablets: allTablets,
- tabletsReachable: allTablets,
- prevPrimary: primaryTablet,
+ name: "error - requested primary not in same cell",
+ durability: "none",
+ validTablets: allTablets,
+ tabletsReachable: allTablets,
+ tabletsTakingBackup: noTabletsTakingBackup,
+ prevPrimary: primaryTablet,
opts: EmergencyReparentOptions{
PreventCrossCellPromotion: true,
NewPrimaryAlias: replicaCrossCellTablet.Alias,
},
errShouldContain: "proposed primary zone-2-0000000002 is is a different cell as the previous primary",
}, {
- name: "error - requested primary cannot establish",
- durability: "cross_cell",
- validTablets: allTablets,
- tabletsReachable: []*topodatapb.Tablet{primaryTablet, replicaTablet, rdonlyTablet, rdonlyCrossCellTablet},
+ name: "error - requested primary cannot establish",
+ durability: "cross_cell",
+ validTablets: allTablets,
+ tabletsTakingBackup: noTabletsTakingBackup,
+ tabletsReachable: []*topodatapb.Tablet{primaryTablet, replicaTablet, rdonlyTablet, rdonlyCrossCellTablet},
opts: EmergencyReparentOptions{
NewPrimaryAlias: primaryTablet.Alias,
},
@@ -4551,7 +4584,7 @@ func TestEmergencyReparenter_filterValidCandidates(t *testing.T) {
tt.opts.durability = durability
logger := logutil.NewMemoryLogger()
erp := NewEmergencyReparenter(nil, nil, logger)
- tabletList, err := erp.filterValidCandidates(tt.validTablets, tt.tabletsReachable, tt.prevPrimary, tt.opts)
+ tabletList, err := erp.filterValidCandidates(tt.validTablets, tt.tabletsReachable, tt.tabletsTakingBackup, tt.prevPrimary, tt.opts)
if tt.errShouldContain != "" {
require.Error(t, err)
require.Contains(t, err.Error(), tt.errShouldContain)
diff --git a/go/vt/vtctl/reparentutil/reparent_sorter_test.go b/go/vt/vtctl/reparentutil/reparent_sorter_test.go
index ae5d56e884e..87e7b253d54 100644
--- a/go/vt/vtctl/reparentutil/reparent_sorter_test.go
+++ b/go/vt/vtctl/reparentutil/reparent_sorter_test.go
@@ -22,7 +22,6 @@ import (
"github.com/stretchr/testify/require"
"vitess.io/vitess/go/mysql/replication"
-
topodatapb "vitess.io/vitess/go/vt/proto/topodata"
)
diff --git a/go/vt/vtctl/reparentutil/replication.go b/go/vt/vtctl/reparentutil/replication.go
index 8642de84fc7..17dbaeae015 100644
--- a/go/vt/vtctl/reparentutil/replication.go
+++ b/go/vt/vtctl/reparentutil/replication.go
@@ -165,9 +165,10 @@ func SetReplicationSource(ctx context.Context, ts *topo.Server, tmc tmclient.Tab
// replicationSnapshot stores the status maps and the tablets that were reachable
// when trying to stopReplicationAndBuildStatusMaps.
type replicationSnapshot struct {
- statusMap map[string]*replicationdatapb.StopReplicationStatus
- primaryStatusMap map[string]*replicationdatapb.PrimaryStatus
- reachableTablets []*topodatapb.Tablet
+ statusMap map[string]*replicationdatapb.StopReplicationStatus
+ primaryStatusMap map[string]*replicationdatapb.PrimaryStatus
+ reachableTablets []*topodatapb.Tablet
+ tabletsBackupState map[string]bool
}
// stopReplicationAndBuildStatusMaps stops replication on all replicas, then
@@ -193,9 +194,10 @@ func stopReplicationAndBuildStatusMaps(
errChan = make(chan concurrency.Error)
allTablets []*topodatapb.Tablet
res = &replicationSnapshot{
- statusMap: map[string]*replicationdatapb.StopReplicationStatus{},
- primaryStatusMap: map[string]*replicationdatapb.PrimaryStatus{},
- reachableTablets: []*topodatapb.Tablet{},
+ statusMap: map[string]*replicationdatapb.StopReplicationStatus{},
+ primaryStatusMap: map[string]*replicationdatapb.PrimaryStatus{},
+ reachableTablets: []*topodatapb.Tablet{},
+ tabletsBackupState: map[string]bool{},
}
)
@@ -237,6 +239,20 @@ func stopReplicationAndBuildStatusMaps(
err = vterrors.Wrapf(err, "error when getting replication status for alias %v: %v", alias, err)
}
} else {
+ isTakingBackup := false
+
+ // Prefer the most up-to-date information regarding whether the tablet is taking a backup from the After
+ // replication status, but fall back to the Before status if After is nil.
+ if stopReplicationStatus.After != nil {
+ isTakingBackup = stopReplicationStatus.After.BackupRunning
+ } else if stopReplicationStatus.Before != nil {
+ isTakingBackup = stopReplicationStatus.Before.BackupRunning
+ }
+
+ m.Lock()
+ res.tabletsBackupState[alias] = isTakingBackup
+ m.Unlock()
+
var sqlThreadRunning bool
// Check if the sql thread was running for the tablet
sqlThreadRunning, err = SQLThreadWasRunning(stopReplicationStatus)
diff --git a/go/vt/vtctl/reparentutil/replication_test.go b/go/vt/vtctl/reparentutil/replication_test.go
index 4a449b1189c..1b36186efb8 100644
--- a/go/vt/vtctl/reparentutil/replication_test.go
+++ b/go/vt/vtctl/reparentutil/replication_test.go
@@ -283,6 +283,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) {
waitForAllTablets bool
expectedStatusMap map[string]*replicationdatapb.StopReplicationStatus
expectedPrimaryStatusMap map[string]*replicationdatapb.PrimaryStatus
+ expectedTakingBackup map[string]bool
expectedTabletsReachable []*topodatapb.Tablet
shouldErr bool
}{
@@ -339,6 +340,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) {
After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-9"},
},
},
+ expectedTakingBackup: map[string]bool{"zone1-0000000100": false, "zone1-0000000101": false},
expectedPrimaryStatusMap: map[string]*replicationdatapb.PrimaryStatus{},
expectedTabletsReachable: []*topodatapb.Tablet{{
Type: topodatapb.TabletType_REPLICA,
@@ -407,6 +409,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) {
After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-9"},
},
},
+ expectedTakingBackup: map[string]bool{"zone1-0000000100": false, "zone1-0000000101": false},
expectedPrimaryStatusMap: map[string]*replicationdatapb.PrimaryStatus{},
expectedTabletsReachable: []*topodatapb.Tablet{{
Type: topodatapb.TabletType_REPLICA,
@@ -491,6 +494,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) {
After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-9"},
},
},
+ expectedTakingBackup: map[string]bool{"zone1-0000000100": false, "zone1-0000000101": false},
expectedPrimaryStatusMap: map[string]*replicationdatapb.PrimaryStatus{},
expectedTabletsReachable: []*topodatapb.Tablet{{
Type: topodatapb.TabletType_REPLICA,
@@ -585,6 +589,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) {
After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-9"},
},
},
+ expectedTakingBackup: map[string]bool{"zone1-0000000100": false, "zone1-0000000101": false},
expectedPrimaryStatusMap: map[string]*replicationdatapb.PrimaryStatus{},
expectedTabletsReachable: []*topodatapb.Tablet{{
Type: topodatapb.TabletType_REPLICA,
@@ -678,6 +683,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) {
After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-9"},
},
},
+ expectedTakingBackup: map[string]bool{"zone1-0000000100": false, "zone1-0000000101": false},
expectedPrimaryStatusMap: map[string]*replicationdatapb.PrimaryStatus{},
expectedTabletsReachable: []*topodatapb.Tablet{{
Type: topodatapb.TabletType_REPLICA,
@@ -750,6 +756,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) {
Uid: 101,
},
}},
+ expectedTakingBackup: map[string]bool{"zone1-0000000101": false},
expectedPrimaryStatusMap: map[string]*replicationdatapb.PrimaryStatus{},
shouldErr: false,
},
@@ -811,6 +818,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) {
After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-9"},
},
},
+ expectedTakingBackup: map[string]bool{"zone1-0000000101": false},
expectedPrimaryStatusMap: map[string]*replicationdatapb.PrimaryStatus{
"zone1-0000000100": {
Position: "primary-position-100",
@@ -885,6 +893,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) {
After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-9"},
},
},
+ expectedTakingBackup: map[string]bool{"zone1-0000000101": false},
expectedPrimaryStatusMap: map[string]*replicationdatapb.PrimaryStatus{}, // zone1-0000000100 fails to demote, so does not appear
expectedTabletsReachable: []*topodatapb.Tablet{{
Type: topodatapb.TabletType_REPLICA,
@@ -1008,6 +1017,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) {
Uid: 101,
},
}},
+ expectedTakingBackup: map[string]bool{"zone1-0000000101": false},
expectedPrimaryStatusMap: map[string]*replicationdatapb.PrimaryStatus{},
shouldErr: false,
},
@@ -1057,6 +1067,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) {
After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-9"},
},
},
+ expectedTakingBackup: map[string]bool{"zone1-0000000101": false},
expectedPrimaryStatusMap: map[string]*replicationdatapb.PrimaryStatus{},
expectedTabletsReachable: []*topodatapb.Tablet{{
Type: topodatapb.TabletType_REPLICA,
@@ -1252,8 +1263,78 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) {
},
}},
stopReplicasTimeout: time.Minute,
+ expectedTakingBackup: map[string]bool{"zone1-0000000100": false, "zone1-0000000101": false, "zone1-0000000102": false},
expectedPrimaryStatusMap: map[string]*replicationdatapb.PrimaryStatus{},
shouldErr: false,
+ }, {
+ name: "Handle nil replication status After. No segfaulting when determining backup status, and fall back to Before status",
+ durability: "none",
+ tmc: &stopReplicationAndBuildStatusMapsTestTMClient{
+ stopReplicationAndGetStatusResults: map[string]*struct {
+ StopStatus *replicationdatapb.StopReplicationStatus
+ Err error
+ }{
+ "zone1-0000000100": {
+ StopStatus: &replicationdatapb.StopReplicationStatus{
+ Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning), BackupRunning: true},
+ After: nil,
+ },
+ },
+ "zone1-0000000101": {
+ StopStatus: &replicationdatapb.StopReplicationStatus{
+ Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning), BackupRunning: true},
+ After: nil,
+ },
+ },
+ },
+ },
+ tabletMap: map[string]*topo.TabletInfo{
+ "zone1-0000000100": {
+ Tablet: &topodatapb.Tablet{
+ Type: topodatapb.TabletType_REPLICA,
+ Alias: &topodatapb.TabletAlias{
+ Cell: "zone1",
+ Uid: 100,
+ },
+ },
+ },
+ "zone1-0000000101": {
+ Tablet: &topodatapb.Tablet{
+ Type: topodatapb.TabletType_REPLICA,
+ Alias: &topodatapb.TabletAlias{
+ Cell: "zone1",
+ Uid: 101,
+ },
+ },
+ },
+ },
+ ignoredTablets: sets.New[string](),
+ expectedStatusMap: map[string]*replicationdatapb.StopReplicationStatus{
+ "zone1-0000000100": {
+ Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning), BackupRunning: true},
+ After: nil,
+ },
+ "zone1-0000000101": {
+ Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning), BackupRunning: true},
+ After: nil,
+ },
+ },
+ expectedTakingBackup: map[string]bool{"zone1-0000000100": true, "zone1-0000000101": true},
+ expectedPrimaryStatusMap: map[string]*replicationdatapb.PrimaryStatus{},
+ expectedTabletsReachable: []*topodatapb.Tablet{{
+ Type: topodatapb.TabletType_REPLICA,
+ Alias: &topodatapb.TabletAlias{
+ Cell: "zone1",
+ Uid: 100,
+ },
+ }, {
+ Type: topodatapb.TabletType_REPLICA,
+ Alias: &topodatapb.TabletAlias{
+ Cell: "zone1",
+ Uid: 101,
+ },
+ }},
+ shouldErr: false,
},
}
@@ -1279,6 +1360,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) {
for idx, tablet := range res.reachableTablets {
assert.True(t, topoproto.IsTabletInList(tablet, tt.expectedTabletsReachable), "TabletsReached[%d] not found - %s", idx, topoproto.TabletAliasString(tablet.Alias))
}
+ assert.Equal(t, tt.expectedTakingBackup, res.tabletsBackupState)
})
}
}
diff --git a/go/vt/vtctl/reparentutil/util.go b/go/vt/vtctl/reparentutil/util.go
index fd701f8c69b..c4c23e65c7e 100644
--- a/go/vt/vtctl/reparentutil/util.go
+++ b/go/vt/vtctl/reparentutil/util.go
@@ -58,7 +58,8 @@ const (
// cell as the current primary, and to be different from avoidPrimaryAlias. The
// tablet with the most advanced replication position is chosen to minimize the
// amount of time spent catching up with the current primary. Further ties are
-// broken by the durability rules.
+// broken by the durability rules. Tablets taking backups are excluded from
+// consideration.
// Note that the search for the most advanced replication position will race
// with transactions being executed on the current primary, so when all tablets
// are at roughly the same position, then the choice of new primary-elect will
@@ -126,13 +127,17 @@ func ElectNewPrimary(
tb := tablet
errorGroup.Go(func() error {
// find and store the positions for the tablet
- pos, replLag, err := findPositionAndLagForTablet(groupCtx, tb, logger, tmc, opts.WaitReplicasTimeout)
+ pos, replLag, takingBackup, err := findTabletPositionLagBackupStatus(groupCtx, tb, logger, tmc, opts.WaitReplicasTimeout)
mu.Lock()
defer mu.Unlock()
if err == nil && (opts.TolerableReplLag == 0 || opts.TolerableReplLag >= replLag) {
- validTablets = append(validTablets, tb)
- tabletPositions = append(tabletPositions, pos)
- innodbBufferPool = append(innodbBufferPool, innodbBufferPoolData[topoproto.TabletAliasString(tb.Alias)])
+ if takingBackup {
+ reasonsToInvalidate.WriteString(fmt.Sprintf("\n%v is taking a backup", topoproto.TabletAliasString(tablet.Alias)))
+ } else {
+ validTablets = append(validTablets, tb)
+ tabletPositions = append(tabletPositions, pos)
+ innodbBufferPool = append(innodbBufferPool, innodbBufferPoolData[topoproto.TabletAliasString(tb.Alias)])
+ }
} else {
reasonsToInvalidate.WriteString(fmt.Sprintf("\n%v has %v replication lag which is more than the tolerable amount", topoproto.TabletAliasString(tablet.Alias), replLag))
}
@@ -150,7 +155,7 @@ func ElectNewPrimary(
return nil, vterrors.Errorf(vtrpc.Code_INTERNAL, "cannot find a tablet to reparent to%v", reasonsToInvalidate.String())
}
- // sort the tablets for finding the best primary
+ // sort preferred tablets for finding the best primary
err = sortTabletsForReparent(validTablets, tabletPositions, innodbBufferPool, opts.durability)
if err != nil {
return nil, err
@@ -159,9 +164,9 @@ func ElectNewPrimary(
return validTablets[0].Alias, nil
}
-// findPositionAndLagForTablet processes the replication position and lag for a single tablet and
+// findTabletPositionLagBackupStatus processes the replication position and lag for a single tablet and
// returns it. It is safe to call from multiple goroutines.
-func findPositionAndLagForTablet(ctx context.Context, tablet *topodatapb.Tablet, logger logutil.Logger, tmc tmclient.TabletManagerClient, waitTimeout time.Duration) (replication.Position, time.Duration, error) {
+func findTabletPositionLagBackupStatus(ctx context.Context, tablet *topodatapb.Tablet, logger logutil.Logger, tmc tmclient.TabletManagerClient, waitTimeout time.Duration) (replication.Position, time.Duration, bool, error) {
logger.Infof("getting replication position from %v", topoproto.TabletAliasString(tablet.Alias))
ctx, cancel := context.WithTimeout(ctx, waitTimeout)
@@ -172,10 +177,10 @@ func findPositionAndLagForTablet(ctx context.Context, tablet *topodatapb.Tablet,
sqlErr, isSQLErr := sqlerror.NewSQLErrorFromError(err).(*sqlerror.SQLError)
if isSQLErr && sqlErr != nil && sqlErr.Number() == sqlerror.ERNotReplica {
logger.Warningf("no replication statue from %v, using empty gtid set", topoproto.TabletAliasString(tablet.Alias))
- return replication.Position{}, 0, nil
+ return replication.Position{}, 0, false, nil
}
logger.Warningf("failed to get replication status from %v, ignoring tablet: %v", topoproto.TabletAliasString(tablet.Alias), err)
- return replication.Position{}, 0, err
+ return replication.Position{}, 0, false, err
}
// Use the relay log position if available, otherwise use the executed GTID set (binary log position).
@@ -186,10 +191,10 @@ func findPositionAndLagForTablet(ctx context.Context, tablet *topodatapb.Tablet,
pos, err := replication.DecodePosition(positionString)
if err != nil {
logger.Warningf("cannot decode replica position %v for tablet %v, ignoring tablet: %v", positionString, topoproto.TabletAliasString(tablet.Alias), err)
- return replication.Position{}, 0, err
+ return replication.Position{}, 0, status.BackupRunning, err
}
- return pos, time.Second * time.Duration(status.ReplicationLagSeconds), nil
+ return pos, time.Second * time.Duration(status.ReplicationLagSeconds), status.BackupRunning, nil
}
// FindCurrentPrimary returns the current primary tablet of a shard, if any. The
diff --git a/go/vt/vtctl/reparentutil/util_test.go b/go/vt/vtctl/reparentutil/util_test.go
index f4e9092fc3f..ac44da8175a 100644
--- a/go/vt/vtctl/reparentutil/util_test.go
+++ b/go/vt/vtctl/reparentutil/util_test.go
@@ -139,6 +139,112 @@ func TestElectNewPrimary(t *testing.T) {
},
errContains: nil,
},
+ {
+ name: "Two good replicas, but one of them is taking a backup so we pick the other one",
+ tmc: &chooseNewPrimaryTestTMClient{
+ // both zone1-101 and zone1-102 are equivalent from a replicaiton PoV, but zone1-102 is taking a backup
+ replicationStatuses: map[string]*replicationdatapb.Status{
+ "zone1-0000000101": {
+ Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5",
+ BackupRunning: true,
+ },
+ "zone1-0000000102": {
+ Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5",
+ BackupRunning: false,
+ },
+ },
+ },
+ tolerableReplLag: 50 * time.Second,
+ shardInfo: topo.NewShardInfo("testkeyspace", "-", &topodatapb.Shard{
+ PrimaryAlias: &topodatapb.TabletAlias{
+ Cell: "zone1",
+ Uid: 100,
+ },
+ }, nil),
+ tabletMap: map[string]*topo.TabletInfo{
+ "primary": {
+ Tablet: &topodatapb.Tablet{
+ Alias: &topodatapb.TabletAlias{
+ Cell: "zone1",
+ Uid: 100,
+ },
+ Type: topodatapb.TabletType_PRIMARY,
+ },
+ },
+ "replica1": {
+ Tablet: &topodatapb.Tablet{
+ Alias: &topodatapb.TabletAlias{
+ Cell: "zone1",
+ Uid: 101,
+ },
+ Type: topodatapb.TabletType_REPLICA,
+ },
+ },
+ "replica2": {
+ Tablet: &topodatapb.Tablet{
+ Alias: &topodatapb.TabletAlias{
+ Cell: "zone1",
+ Uid: 102,
+ },
+ Type: topodatapb.TabletType_REPLICA,
+ },
+ },
+ },
+ avoidPrimaryAlias: &topodatapb.TabletAlias{
+ Cell: "zone1",
+ Uid: 0,
+ },
+ expected: &topodatapb.TabletAlias{
+ Cell: "zone1",
+ Uid: 102,
+ },
+ errContains: nil,
+ },
+ {
+ name: "Only one replica, but it's taking a backup. We don't elect it.",
+ tmc: &chooseNewPrimaryTestTMClient{
+ // both zone1-101 and zone1-102 are equivalent from a replicaiton PoV, but zone1-102 is taking a backup
+ replicationStatuses: map[string]*replicationdatapb.Status{
+ "zone1-0000000101": {
+ Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5",
+ BackupRunning: true,
+ },
+ },
+ },
+ tolerableReplLag: 50 * time.Second,
+ shardInfo: topo.NewShardInfo("testkeyspace", "-", &topodatapb.Shard{
+ PrimaryAlias: &topodatapb.TabletAlias{
+ Cell: "zone1",
+ Uid: 100,
+ },
+ }, nil),
+ tabletMap: map[string]*topo.TabletInfo{
+ "primary": {
+ Tablet: &topodatapb.Tablet{
+ Alias: &topodatapb.TabletAlias{
+ Cell: "zone1",
+ Uid: 100,
+ },
+ Type: topodatapb.TabletType_PRIMARY,
+ },
+ },
+ "replica1": {
+ Tablet: &topodatapb.Tablet{
+ Alias: &topodatapb.TabletAlias{
+ Cell: "zone1",
+ Uid: 101,
+ },
+ Type: topodatapb.TabletType_REPLICA,
+ },
+ },
+ },
+ avoidPrimaryAlias: &topodatapb.TabletAlias{
+ Cell: "zone1",
+ Uid: 0,
+ },
+ expected: nil,
+ errContains: []string{"zone1-0000000101 is taking a backup"},
+ },
{
name: "new primary alias provided - no tolerable replication lag",
tolerableReplLag: 0,
@@ -414,6 +520,67 @@ func TestElectNewPrimary(t *testing.T) {
},
errContains: nil,
},
+ {
+ name: "Two replicas, first one with too much lag, another one taking a backup - none is a good candidate",
+ tmc: &chooseNewPrimaryTestTMClient{
+ // zone1-101 is behind zone1-102
+ replicationStatuses: map[string]*replicationdatapb.Status{
+ "zone1-0000000101": {
+ Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1",
+ ReplicationLagSeconds: 55,
+ },
+ "zone1-0000000102": {
+ Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5",
+ BackupRunning: true,
+ },
+ },
+ },
+ tolerableReplLag: 50 * time.Second,
+ shardInfo: topo.NewShardInfo("testkeyspace", "-", &topodatapb.Shard{
+ PrimaryAlias: &topodatapb.TabletAlias{
+ Cell: "zone1",
+ Uid: 100,
+ },
+ }, nil),
+ tabletMap: map[string]*topo.TabletInfo{
+ "primary": {
+ Tablet: &topodatapb.Tablet{
+ Alias: &topodatapb.TabletAlias{
+ Cell: "zone1",
+ Uid: 100,
+ },
+ Type: topodatapb.TabletType_PRIMARY,
+ },
+ },
+ "replica1": {
+ Tablet: &topodatapb.Tablet{
+ Alias: &topodatapb.TabletAlias{
+ Cell: "zone1",
+ Uid: 101,
+ },
+ Type: topodatapb.TabletType_REPLICA,
+ },
+ },
+ "replica2": {
+ Tablet: &topodatapb.Tablet{
+ Alias: &topodatapb.TabletAlias{
+ Cell: "zone1",
+ Uid: 102,
+ },
+ Type: topodatapb.TabletType_REPLICA,
+ },
+ },
+ },
+ avoidPrimaryAlias: &topodatapb.TabletAlias{
+ Cell: "zone1",
+ Uid: 0,
+ },
+ expected: nil,
+ errContains: []string{
+ "zone1-0000000101 has 55s replication lag which is more than the tolerable amount",
+ "zone1-0000000102 is taking a backup",
+ },
+ },
{
name: "found a replica - more advanced relay log position",
tmc: &chooseNewPrimaryTestTMClient{
@@ -881,12 +1048,13 @@ func TestFindPositionForTablet(t *testing.T) {
ctx := context.Background()
logger := logutil.NewMemoryLogger()
tests := []struct {
- name string
- tmc *testutil.TabletManagerClient
- tablet *topodatapb.Tablet
- expectedPosition string
- expectedLag time.Duration
- expectedErr string
+ name string
+ tmc *testutil.TabletManagerClient
+ tablet *topodatapb.Tablet
+ expectedPosition string
+ expectedLag time.Duration
+ expectedErr string
+ expectedTakingBackup bool
}{
{
name: "executed gtid set",
@@ -911,6 +1079,31 @@ func TestFindPositionForTablet(t *testing.T) {
},
expectedLag: 201 * time.Second,
expectedPosition: "MySQL56/3e11fa47-71ca-11e1-9e33-c80aa9429562:1-5",
+ }, {
+ name: "Host is taking a backup",
+ tmc: &testutil.TabletManagerClient{
+ ReplicationStatusResults: map[string]struct {
+ Position *replicationdatapb.Status
+ Error error
+ }{
+ "zone1-0000000100": {
+ Position: &replicationdatapb.Status{
+ Position: "MySQL56/3e11fa47-71ca-11e1-9e33-c80aa9429562:1-5",
+ ReplicationLagSeconds: 201,
+ },
+ },
+ },
+ TabletsBackupState: map[string]bool{"zone1-0000000100": true},
+ },
+ tablet: &topodatapb.Tablet{
+ Alias: &topodatapb.TabletAlias{
+ Cell: "zone1",
+ Uid: 100,
+ },
+ },
+ expectedLag: 201 * time.Second,
+ expectedTakingBackup: true,
+ expectedPosition: "MySQL56/3e11fa47-71ca-11e1-9e33-c80aa9429562:1-5",
}, {
name: "no replication status",
tmc: &testutil.TabletManagerClient{
@@ -981,7 +1174,7 @@ func TestFindPositionForTablet(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
- pos, lag, err := findPositionAndLagForTablet(ctx, test.tablet, logger, test.tmc, 10*time.Second)
+ pos, lag, takingBackup, err := findTabletPositionLagBackupStatus(ctx, test.tablet, logger, test.tmc, 10*time.Second)
if test.expectedErr != "" {
require.EqualError(t, err, test.expectedErr)
return
@@ -990,6 +1183,7 @@ func TestFindPositionForTablet(t *testing.T) {
posString := replication.EncodePosition(pos)
require.Equal(t, test.expectedPosition, posString)
require.Equal(t, test.expectedLag, lag)
+ require.Equal(t, test.expectedTakingBackup, takingBackup)
})
}
}
diff --git a/go/vt/vtctl/workflow/framework_test.go b/go/vt/vtctl/workflow/framework_test.go
index 249ff07cf41..a2c1b2ef8e3 100644
--- a/go/vt/vtctl/workflow/framework_test.go
+++ b/go/vt/vtctl/workflow/framework_test.go
@@ -271,6 +271,7 @@ type testTMClient struct {
vrQueries map[int][]*queryResult
createVReplicationWorkflowRequests map[uint32]*createVReplicationWorkflowRequestResponse
readVReplicationWorkflowRequests map[uint32]*readVReplicationWorkflowRequestResponse
+ updateVReplicationWorklowsRequests map[uint32]*tabletmanagerdatapb.UpdateVReplicationWorkflowsRequest
applySchemaRequests map[uint32]*applySchemaRequestResponse
primaryPositions map[uint32]string
vdiffRequests map[uint32]*vdiffRequestResponse
@@ -294,6 +295,7 @@ func newTestTMClient(env *testEnv) *testTMClient {
vrQueries: make(map[int][]*queryResult),
createVReplicationWorkflowRequests: make(map[uint32]*createVReplicationWorkflowRequestResponse),
readVReplicationWorkflowRequests: make(map[uint32]*readVReplicationWorkflowRequestResponse),
+ updateVReplicationWorklowsRequests: make(map[uint32]*tabletmanagerdatapb.UpdateVReplicationWorkflowsRequest),
applySchemaRequests: make(map[uint32]*applySchemaRequestResponse),
readVReplicationWorkflowsResponses: make(map[string][]*tabletmanagerdatapb.ReadVReplicationWorkflowsResponse),
primaryPositions: make(map[uint32]string),
@@ -496,6 +498,11 @@ func (tmc *testTMClient) ExecuteFetchAsAllPrivs(ctx context.Context, tablet *top
return nil, nil
}
+func (tmc *testTMClient) ExecuteFetchAsApp(ctx context.Context, tablet *topodatapb.Tablet, usePool bool, req *tabletmanagerdatapb.ExecuteFetchAsAppRequest) (*querypb.QueryResult, error) {
+ // Reuse VReplicationExec.
+ return tmc.VReplicationExec(ctx, tablet, string(req.Query))
+}
+
func (tmc *testTMClient) expectApplySchemaRequest(tabletID uint32, req *applySchemaRequestResponse) {
tmc.mu.Lock()
defer tmc.mu.Unlock()
@@ -617,6 +624,10 @@ func (tmc *testTMClient) HasVReplicationWorkflows(ctx context.Context, tablet *t
}, nil
}
+func (tmc *testTMClient) ResetSequences(ctx context.Context, tablet *topodatapb.Tablet, tables []string) error {
+ return nil
+}
+
func (tmc *testTMClient) ReadVReplicationWorkflows(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.ReadVReplicationWorkflowsRequest) (*tabletmanagerdatapb.ReadVReplicationWorkflowsResponse, error) {
tmc.mu.Lock()
defer tmc.mu.Unlock()
@@ -677,6 +688,19 @@ func (tmc *testTMClient) UpdateVReplicationWorkflow(ctx context.Context, tablet
}, nil
}
+func (tmc *testTMClient) UpdateVReplicationWorkflows(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.UpdateVReplicationWorkflowsRequest) (*tabletmanagerdatapb.UpdateVReplicationWorkflowsResponse, error) {
+ tmc.mu.Lock()
+ defer tmc.mu.Unlock()
+ if expect := tmc.updateVReplicationWorklowsRequests[tablet.Alias.Uid]; expect != nil {
+ if !proto.Equal(expect, req) {
+ return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "unexpected ReadVReplicationWorkflow request on tablet %s: got %+v, want %+v",
+ topoproto.TabletAliasString(tablet.Alias), req, expect)
+ }
+ }
+ delete(tmc.updateVReplicationWorklowsRequests, tablet.Alias.Uid)
+ return nil, nil
+}
+
func (tmc *testTMClient) ValidateVReplicationPermissions(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.ValidateVReplicationPermissionsRequest) (*tabletmanagerdatapb.ValidateVReplicationPermissionsResponse, error) {
return &tabletmanagerdatapb.ValidateVReplicationPermissionsResponse{
User: "vt_filtered",
@@ -736,6 +760,12 @@ func (tmc *testTMClient) AddVReplicationWorkflowsResponse(key string, resp *tabl
tmc.readVReplicationWorkflowsResponses[key] = append(tmc.readVReplicationWorkflowsResponses[key], resp)
}
+func (tmc *testTMClient) AddUpdateVReplicationRequests(tabletUID uint32, req *tabletmanagerdatapb.UpdateVReplicationWorkflowsRequest) {
+ tmc.mu.Lock()
+ defer tmc.mu.Unlock()
+ tmc.updateVReplicationWorklowsRequests[tabletUID] = req
+}
+
func (tmc *testTMClient) getVReplicationWorkflowsResponse(key string) *tabletmanagerdatapb.ReadVReplicationWorkflowsResponse {
if len(tmc.readVReplicationWorkflowsResponses) == 0 {
return nil
diff --git a/go/vt/vtctl/workflow/materializer_test.go b/go/vt/vtctl/workflow/materializer_test.go
index 746c5fe2bae..a583a101186 100644
--- a/go/vt/vtctl/workflow/materializer_test.go
+++ b/go/vt/vtctl/workflow/materializer_test.go
@@ -2588,7 +2588,7 @@ func TestCreateLookupVindexFailures(t *testing.T) {
err: "unique vindex 'from' should have only one column",
},
{
- description: "non-unique lookup should have more than one column",
+ description: "non-unique lookup can have only one column",
input: &vschemapb.Keyspace{
Vindexes: map[string]*vschemapb.Vindex{
"v": {
@@ -2601,7 +2601,7 @@ func TestCreateLookupVindexFailures(t *testing.T) {
},
},
},
- err: "non-unique vindex 'from' should have more than one column",
+ err: "",
},
{
description: "vindex not found",
diff --git a/go/vt/vtctl/workflow/resharder_test.go b/go/vt/vtctl/workflow/resharder_test.go
index 6353f36db9f..6fe1afb0c70 100644
--- a/go/vt/vtctl/workflow/resharder_test.go
+++ b/go/vt/vtctl/workflow/resharder_test.go
@@ -22,14 +22,20 @@ import (
"testing"
"time"
+ "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ "golang.org/x/exp/maps"
+ "vitess.io/vitess/go/ptr"
"vitess.io/vitess/go/sqltypes"
"vitess.io/vitess/go/vt/topo"
"vitess.io/vitess/go/vt/topo/topoproto"
+ "vitess.io/vitess/go/vt/vtgate/vindexes"
+ binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata"
tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata"
topodatapb "vitess.io/vitess/go/vt/proto/topodata"
+ vschemapb "vitess.io/vitess/go/vt/proto/vschema"
vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata"
)
@@ -65,6 +71,8 @@ func TestReshardCreate(t *testing.T) {
sourceKeyspace, targetKeyspace *testKeyspace
preFunc func(env *testEnv)
want *vtctldatapb.WorkflowStatusResponse
+ updateVReplicationRequest *tabletmanagerdatapb.UpdateVReplicationWorkflowsRequest
+ autoStart bool
wantErr string
}{
{
@@ -77,6 +85,11 @@ func TestReshardCreate(t *testing.T) {
KeyspaceName: targetKeyspaceName,
ShardNames: []string{"-80", "80-"},
},
+ autoStart: true,
+ updateVReplicationRequest: &tabletmanagerdatapb.UpdateVReplicationWorkflowsRequest{
+ AllWorkflows: true,
+ State: ptr.Of(binlogdatapb.VReplicationWorkflowState_Running),
+ },
want: &vtctldatapb.WorkflowStatusResponse{
ShardStreams: map[string]*vtctldatapb.WorkflowStatusResponse_ShardStreams{
"targetks/-80": {
@@ -137,6 +150,7 @@ func TestReshardCreate(t *testing.T) {
SourceShards: tc.sourceKeyspace.ShardNames,
TargetShards: tc.targetKeyspace.ShardNames,
Cells: []string{env.cell},
+ AutoStart: tc.autoStart,
}
for i := range tc.sourceKeyspace.ShardNames {
@@ -172,6 +186,9 @@ func TestReshardCreate(t *testing.T) {
"select vrepl_id, table_name, lastpk from _vt.copy_state where vrepl_id in (1) and id in (select max(id) from _vt.copy_state where vrepl_id in (1) group by vrepl_id, table_name)",
&sqltypes.Result{},
)
+ if tc.updateVReplicationRequest != nil {
+ env.tmc.AddUpdateVReplicationRequests(uint32(tabletUID), tc.updateVReplicationRequest)
+ }
}
if tc.preFunc != nil {
@@ -187,6 +204,300 @@ func TestReshardCreate(t *testing.T) {
if tc.want != nil {
require.Equal(t, tc.want, res)
}
+
+ // Expect updateVReplicationWorklowsRequests to be empty,
+ // if AutoStart is enabled. This is because we delete the specific
+ // key from the map in the testTMC, once updateVReplicationWorklows()
+ // with the expected request is called.
+ if tc.autoStart {
+ assert.Len(t, env.tmc.updateVReplicationWorklowsRequests, 0)
+ }
+ })
+ }
+}
+
+func TestReadRefStreams(t *testing.T) {
+ ctx := context.Background()
+
+ sourceKeyspace := &testKeyspace{
+ KeyspaceName: "sourceKeyspace",
+ ShardNames: []string{"-"},
+ }
+ targetKeyspace := &testKeyspace{
+ KeyspaceName: "targetKeyspace",
+ ShardNames: []string{"-"},
+ }
+
+ env := newTestEnv(t, ctx, defaultCellName, sourceKeyspace, targetKeyspace)
+ defer env.close()
+
+ s1, err := env.ts.UpdateShardFields(ctx, targetKeyspace.KeyspaceName, "-", func(si *topo.ShardInfo) error {
+ return nil
+ })
+ require.NoError(t, err)
+
+ sourceTablet, ok := env.tablets[sourceKeyspace.KeyspaceName][100]
+ require.True(t, ok)
+
+ env.tmc.schema = map[string]*tabletmanagerdatapb.SchemaDefinition{
+ "t1": {},
+ }
+
+ rules := make([]*binlogdatapb.Rule, len(env.tmc.schema))
+ for i, table := range maps.Keys(env.tmc.schema) {
+ rules[i] = &binlogdatapb.Rule{
+ Match: table,
+ Filter: fmt.Sprintf("select * from %s", table),
+ }
+ }
+
+ refKey := fmt.Sprintf("wf:%s:-", sourceKeyspace.KeyspaceName)
+
+ testCases := []struct {
+ name string
+ addVReplicationWorkflowsResponse *tabletmanagerdatapb.ReadVReplicationWorkflowsResponse
+ preRefStreams map[string]*refStream
+ wantRefStreamKeys []string
+ wantErr bool
+ errContains string
+ }{
+ {
+ name: "error for unnamed workflow",
+ addVReplicationWorkflowsResponse: &tabletmanagerdatapb.ReadVReplicationWorkflowsResponse{
+ Workflows: []*tabletmanagerdatapb.ReadVReplicationWorkflowResponse{
+ {
+ Workflow: "",
+ WorkflowType: binlogdatapb.VReplicationWorkflowType_Reshard,
+ },
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "populate ref streams",
+ addVReplicationWorkflowsResponse: &tabletmanagerdatapb.ReadVReplicationWorkflowsResponse{
+ Workflows: []*tabletmanagerdatapb.ReadVReplicationWorkflowResponse{
+ {
+ Workflow: "wf",
+ WorkflowType: binlogdatapb.VReplicationWorkflowType_Reshard,
+ Streams: []*tabletmanagerdatapb.ReadVReplicationWorkflowResponse_Stream{
+ {
+
+ Bls: &binlogdatapb.BinlogSource{
+ Keyspace: sourceKeyspace.KeyspaceName,
+ Shard: "-",
+ Tables: maps.Keys(env.tmc.schema),
+ Filter: &binlogdatapb.Filter{
+ Rules: rules,
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ wantRefStreamKeys: []string{refKey},
+ },
+ {
+ name: "mismatched streams with empty map",
+ preRefStreams: map[string]*refStream{},
+ addVReplicationWorkflowsResponse: &tabletmanagerdatapb.ReadVReplicationWorkflowsResponse{
+ Workflows: []*tabletmanagerdatapb.ReadVReplicationWorkflowResponse{
+ {
+ Workflow: "wf",
+ WorkflowType: binlogdatapb.VReplicationWorkflowType_Reshard,
+ Streams: []*tabletmanagerdatapb.ReadVReplicationWorkflowResponse_Stream{
+ {
+
+ Bls: &binlogdatapb.BinlogSource{
+ Keyspace: sourceKeyspace.KeyspaceName,
+ Shard: "-",
+ Tables: maps.Keys(env.tmc.schema),
+ Filter: &binlogdatapb.Filter{
+ Rules: rules,
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ wantErr: true,
+ errContains: "mismatch",
+ },
+ {
+ name: "mismatched streams",
+ preRefStreams: map[string]*refStream{
+ refKey: nil,
+ "nonexisting": nil,
+ },
+ addVReplicationWorkflowsResponse: &tabletmanagerdatapb.ReadVReplicationWorkflowsResponse{
+ Workflows: []*tabletmanagerdatapb.ReadVReplicationWorkflowResponse{
+ {
+ Workflow: "wf",
+ WorkflowType: binlogdatapb.VReplicationWorkflowType_Reshard,
+ Streams: []*tabletmanagerdatapb.ReadVReplicationWorkflowResponse_Stream{
+ {
+
+ Bls: &binlogdatapb.BinlogSource{
+ Keyspace: sourceKeyspace.KeyspaceName,
+ Shard: "-",
+ Tables: maps.Keys(env.tmc.schema),
+ Filter: &binlogdatapb.Filter{
+ Rules: rules,
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ wantErr: true,
+ errContains: "mismatch",
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ rs := &resharder{
+ s: env.ws,
+ keyspace: targetKeyspace.KeyspaceName,
+ sourceShards: []*topo.ShardInfo{s1},
+ sourcePrimaries: map[string]*topo.TabletInfo{
+ "-": {
+ Tablet: sourceTablet,
+ },
+ },
+ workflow: "wf",
+ vschema: &vschemapb.Keyspace{
+ Tables: map[string]*vschemapb.Table{
+ "t1": {
+ Type: vindexes.TypeReference,
+ },
+ },
+ },
+ refStreams: tc.preRefStreams,
+ }
+
+ workflowKey := env.tmc.GetWorkflowKey(sourceKeyspace.KeyspaceName, "-")
+
+ env.tmc.AddVReplicationWorkflowsResponse(workflowKey, tc.addVReplicationWorkflowsResponse)
+
+ err := rs.readRefStreams(ctx)
+ if !tc.wantErr {
+ assert.NoError(t, err)
+ for _, rk := range tc.wantRefStreamKeys {
+ assert.Contains(t, rs.refStreams, rk)
+ }
+ return
+ }
+
+ assert.Error(t, err)
+ assert.ErrorContains(t, err, tc.errContains)
+ })
+ }
+}
+
+func TestBlsIsReference(t *testing.T) {
+ testCases := []struct {
+ name string
+ bls *binlogdatapb.BinlogSource
+ tables map[string]*vschemapb.Table
+ expected bool
+ wantErr bool
+ errContains string
+ }{
+ {
+ name: "all references",
+ bls: &binlogdatapb.BinlogSource{
+ Filter: &binlogdatapb.Filter{
+ Rules: []*binlogdatapb.Rule{
+ {Match: "ref_table1"},
+ {Match: "ref_table2"},
+ },
+ },
+ },
+ tables: map[string]*vschemapb.Table{
+ "ref_table1": {Type: vindexes.TypeReference},
+ "ref_table2": {Type: vindexes.TypeReference},
+ },
+ expected: true,
+ },
+ {
+ name: "all sharded",
+ bls: &binlogdatapb.BinlogSource{
+ Filter: &binlogdatapb.Filter{
+ Rules: []*binlogdatapb.Rule{
+ {Match: "sharded_table1"},
+ {Match: "sharded_table2"},
+ },
+ },
+ },
+ tables: map[string]*vschemapb.Table{
+ "sharded_table1": {Type: vindexes.TypeTable},
+ "sharded_table2": {Type: vindexes.TypeTable},
+ },
+ expected: false,
+ },
+ {
+ name: "mixed reference and sharded tables",
+ bls: &binlogdatapb.BinlogSource{
+ Filter: &binlogdatapb.Filter{
+ Rules: []*binlogdatapb.Rule{
+ {Match: "ref_table"},
+ {Match: "sharded_table"},
+ },
+ },
+ },
+ tables: map[string]*vschemapb.Table{
+ "ref_table": {Type: vindexes.TypeReference},
+ "sharded_table": {Type: vindexes.TypeTable},
+ },
+ wantErr: true,
+ },
+ {
+ name: "rule table not found in vschema",
+ bls: &binlogdatapb.BinlogSource{
+ Filter: &binlogdatapb.Filter{
+ Rules: []*binlogdatapb.Rule{
+ {Match: "unknown_table"},
+ },
+ },
+ },
+ tables: map[string]*vschemapb.Table{},
+ wantErr: true,
+ errContains: "unknown_table",
+ },
+ {
+ name: "internal operation table ignored",
+ bls: &binlogdatapb.BinlogSource{
+ Filter: &binlogdatapb.Filter{
+ Rules: []*binlogdatapb.Rule{
+ {Match: "_vt_hld_6ace8bcef73211ea87e9f875a4d24e90_20200915120410_"},
+ },
+ },
+ },
+ tables: map[string]*vschemapb.Table{},
+ expected: false,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ rs := &resharder{
+ vschema: &vschemapb.Keyspace{
+ Tables: tc.tables,
+ },
+ }
+
+ result, err := rs.blsIsReference(tc.bls)
+
+ if tc.wantErr {
+ assert.ErrorContains(t, err, tc.errContains)
+ } else {
+ assert.NoError(t, err)
+ assert.Equal(t, tc.expected, result)
+ }
})
}
}
diff --git a/go/vt/vtctl/workflow/server.go b/go/vt/vtctl/workflow/server.go
index 7c49de58c9b..baea602b7a4 100644
--- a/go/vt/vtctl/workflow/server.go
+++ b/go/vt/vtctl/workflow/server.go
@@ -18,7 +18,6 @@ package workflow
import (
"context"
- "encoding/json"
"errors"
"fmt"
"math"
@@ -41,11 +40,9 @@ import (
"vitess.io/vitess/go/constants/sidecar"
"vitess.io/vitess/go/protoutil"
"vitess.io/vitess/go/ptr"
- "vitess.io/vitess/go/sets"
"vitess.io/vitess/go/sqlescape"
"vitess.io/vitess/go/sqltypes"
"vitess.io/vitess/go/trace"
- "vitess.io/vitess/go/vt/binlog/binlogplayer"
"vitess.io/vitess/go/vt/concurrency"
"vitess.io/vitess/go/vt/discovery"
"vitess.io/vitess/go/vt/key"
@@ -58,7 +55,6 @@ import (
"vitess.io/vitess/go/vt/topo/topoproto"
"vitess.io/vitess/go/vt/topotools"
"vitess.io/vitess/go/vt/vtctl/schematools"
- "vitess.io/vitess/go/vt/vtctl/workflow/common"
"vitess.io/vitess/go/vt/vtctl/workflow/vexec"
"vitess.io/vitess/go/vt/vtenv"
"vitess.io/vitess/go/vt/vterrors"
@@ -406,546 +402,28 @@ func (s *Server) GetWorkflows(ctx context.Context, req *vtctldatapb.GetWorkflows
span.Annotate("include_logs", req.IncludeLogs)
span.Annotate("shards", req.Shards)
- readReq := &tabletmanagerdatapb.ReadVReplicationWorkflowsRequest{}
- if req.Workflow != "" {
- readReq.IncludeWorkflows = []string{req.Workflow}
+ w := &workflowFetcher{
+ ts: s.ts,
+ tmc: s.tmc,
+ parser: s.SQLParser(),
+ logger: s.Logger(),
}
- if req.ActiveOnly {
- readReq.ExcludeStates = []binlogdatapb.VReplicationWorkflowState{binlogdatapb.VReplicationWorkflowState_Stopped}
- }
-
- // Guards access to the maps used throughout.
- m := sync.Mutex{}
- shards, err := common.GetShards(ctx, s.ts, req.Keyspace, req.Shards)
+ workflowsByShard, err := w.fetchWorkflowsByShard(ctx, req)
if err != nil {
return nil, err
}
- results := make(map[*topo.TabletInfo]*tabletmanagerdatapb.ReadVReplicationWorkflowsResponse, len(shards))
- readWorkflowsEg, readWorkflowsCtx := errgroup.WithContext(ctx)
- for _, shard := range shards {
- readWorkflowsEg.Go(func() error {
- si, err := s.ts.GetShard(readWorkflowsCtx, req.Keyspace, shard)
- if err != nil {
- return err
- }
- if si.PrimaryAlias == nil {
- return fmt.Errorf("%w %s/%s", vexec.ErrNoShardPrimary, req.Keyspace, shard)
- }
- primary, err := s.ts.GetTablet(readWorkflowsCtx, si.PrimaryAlias)
- if err != nil {
- return err
- }
- if primary == nil {
- return fmt.Errorf("%w %s/%s: tablet %v not found", vexec.ErrNoShardPrimary, req.Keyspace, shard, topoproto.TabletAliasString(si.PrimaryAlias))
- }
- // Clone the request so that we can set the correct DB name for tablet.
- req := readReq.CloneVT()
- wres, err := s.tmc.ReadVReplicationWorkflows(readWorkflowsCtx, primary.Tablet, req)
- if err != nil {
- return err
- }
- m.Lock()
- defer m.Unlock()
- results[primary] = wres
- return nil
- })
- }
- if readWorkflowsEg.Wait() != nil {
- return nil, err
- }
-
- copyStatesByShardStreamId := make(map[string][]*vtctldatapb.Workflow_Stream_CopyState, len(results))
-
- fetchCopyStates := func(ctx context.Context, tablet *topo.TabletInfo, streamIds []int32) error {
- span, ctx := trace.NewSpan(ctx, "workflow.Server.fetchCopyStates")
- defer span.Finish()
-
- span.Annotate("keyspace", req.Keyspace)
- span.Annotate("shard", tablet.Shard)
- span.Annotate("tablet_alias", tablet.AliasString())
-
- copyStates, err := s.getWorkflowCopyStates(ctx, tablet, streamIds)
- if err != nil {
- return err
- }
- m.Lock()
- defer m.Unlock()
-
- for _, copyState := range copyStates {
- shardStreamId := fmt.Sprintf("%s/%d", tablet.Shard, copyState.StreamId)
- copyStatesByShardStreamId[shardStreamId] = append(
- copyStatesByShardStreamId[shardStreamId],
- copyState,
- )
- }
-
- return nil
- }
-
- fetchCopyStatesEg, fetchCopyStatesCtx := errgroup.WithContext(ctx)
- for tablet, result := range results {
- tablet := tablet // loop closure
-
- streamIds := make([]int32, 0, len(result.Workflows))
- for _, wf := range result.Workflows {
- for _, stream := range wf.Streams {
- streamIds = append(streamIds, stream.Id)
- }
- }
-
- if len(streamIds) == 0 {
- continue
- }
-
- fetchCopyStatesEg.Go(func() error {
- return fetchCopyStates(fetchCopyStatesCtx, tablet, streamIds)
- })
- }
-
- if err := fetchCopyStatesEg.Wait(); err != nil {
+ copyStatesByShardStreamId, err := w.fetchCopyStatesByShardStream(ctx, workflowsByShard)
+ if err != nil {
return nil, err
}
- workflowsMap := make(map[string]*vtctldatapb.Workflow, len(results))
- sourceKeyspaceByWorkflow := make(map[string]string, len(results))
- sourceShardsByWorkflow := make(map[string]sets.Set[string], len(results))
- targetKeyspaceByWorkflow := make(map[string]string, len(results))
- targetShardsByWorkflow := make(map[string]sets.Set[string], len(results))
- maxVReplicationLagByWorkflow := make(map[string]float64, len(results))
- maxVReplicationTransactionLagByWorkflow := make(map[string]float64, len(results))
-
- // We guarantee the following invariants when this function is called for a
- // given workflow:
- // - workflow.Name != "" (more precisely, ".Name is set 'properly'")
- // - workflowsMap[workflow.Name] == workflow
- // - sourceShardsByWorkflow[workflow.Name] != nil
- // - targetShardsByWorkflow[workflow.Name] != nil
- // - workflow.ShardStatuses != nil
- scanWorkflow := func(ctx context.Context, workflow *vtctldatapb.Workflow, res *tabletmanagerdatapb.ReadVReplicationWorkflowResponse, tablet *topo.TabletInfo) error {
- // This is not called concurrently, but we still protect the maps to ensure
- // that we're concurrency-safe in the face of future changes (e.g. where other
- // things are running concurrently with this which also access these maps).
- m.Lock()
- defer m.Unlock()
- for _, rstream := range res.Streams {
- // The value in the pos column can be compressed and thus not
- // have a valid GTID consisting of valid UTF-8 characters so we
- // have to decode it so that it's properly decompressed first
- // when needed.
- pos := rstream.Pos
- if pos != "" {
- mpos, err := binlogplayer.DecodePosition(pos)
- if err != nil {
- return err
- }
- pos = mpos.String()
- }
-
- cells := strings.Split(res.Cells, ",")
- for i := range cells {
- cells[i] = strings.TrimSpace(cells[i])
- }
- options := res.Options
- if options != "" {
- if err := json.Unmarshal([]byte(options), &workflow.Options); err != nil {
- return err
- }
- }
- stream := &vtctldatapb.Workflow_Stream{
- Id: int64(rstream.Id),
- Shard: tablet.Shard,
- Tablet: tablet.Alias,
- BinlogSource: rstream.Bls,
- Position: pos,
- StopPosition: rstream.StopPos,
- State: rstream.State.String(),
- DbName: tablet.DbName(),
- TabletTypes: res.TabletTypes,
- TabletSelectionPreference: res.TabletSelectionPreference,
- Cells: cells,
- TransactionTimestamp: rstream.TransactionTimestamp,
- TimeUpdated: rstream.TimeUpdated,
- Message: rstream.Message,
- Tags: strings.Split(res.Tags, ","),
- RowsCopied: rstream.RowsCopied,
- ThrottlerStatus: &vtctldatapb.Workflow_Stream_ThrottlerStatus{
- ComponentThrottled: rstream.ComponentThrottled,
- TimeThrottled: rstream.TimeThrottled,
- },
- }
-
- // Merge in copy states, which we've already fetched.
- shardStreamId := fmt.Sprintf("%s/%d", tablet.Shard, stream.Id)
- if copyState, ok := copyStatesByShardStreamId[shardStreamId]; ok {
- stream.CopyStates = copyState
- }
-
- if rstream.TimeUpdated == nil {
- rstream.TimeUpdated = &vttimepb.Time{}
- }
-
- switch {
- case strings.Contains(strings.ToLower(stream.Message), "error"):
- stream.State = binlogdatapb.VReplicationWorkflowState_Error.String()
- case stream.State == binlogdatapb.VReplicationWorkflowState_Running.String() && len(stream.CopyStates) > 0:
- stream.State = binlogdatapb.VReplicationWorkflowState_Copying.String()
- case stream.State == binlogdatapb.VReplicationWorkflowState_Running.String() && int64(time.Now().Second())-rstream.TimeUpdated.Seconds > 10:
- stream.State = binlogdatapb.VReplicationWorkflowState_Lagging.String()
- }
-
- shardStreamKey := fmt.Sprintf("%s/%s", tablet.Shard, tablet.AliasString())
- shardStream, ok := workflow.ShardStreams[shardStreamKey]
- if !ok {
- ctx, cancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout)
- defer cancel()
-
- si, err := s.ts.GetShard(ctx, req.Keyspace, tablet.Shard)
- if err != nil {
- return err
- }
-
- shardStream = &vtctldatapb.Workflow_ShardStream{
- Streams: nil,
- TabletControls: si.TabletControls,
- IsPrimaryServing: si.IsPrimaryServing,
- }
-
- workflow.ShardStreams[shardStreamKey] = shardStream
- }
-
- shardStream.Streams = append(shardStream.Streams, stream)
- sourceShardsByWorkflow[workflow.Name].Insert(stream.BinlogSource.Shard)
- targetShardsByWorkflow[workflow.Name].Insert(tablet.Shard)
-
- if ks, ok := sourceKeyspaceByWorkflow[workflow.Name]; ok && ks != stream.BinlogSource.Keyspace {
- return vterrors.Wrapf(ErrMultipleSourceKeyspaces, "workflow = %v, ks1 = %v, ks2 = %v", workflow.Name, ks, stream.BinlogSource.Keyspace)
- }
-
- sourceKeyspaceByWorkflow[workflow.Name] = stream.BinlogSource.Keyspace
-
- if ks, ok := targetKeyspaceByWorkflow[workflow.Name]; ok && ks != tablet.Keyspace {
- return vterrors.Wrapf(ErrMultipleTargetKeyspaces, "workflow = %v, ks1 = %v, ks2 = %v", workflow.Name, ks, tablet.Keyspace)
- }
-
- targetKeyspaceByWorkflow[workflow.Name] = tablet.Keyspace
-
- if stream.TimeUpdated == nil {
- stream.TimeUpdated = &vttimepb.Time{}
- }
- timeUpdated := time.Unix(stream.TimeUpdated.Seconds, 0)
- vreplicationLag := time.Since(timeUpdated)
-
- // MaxVReplicationLag represents the time since we last processed any event
- // in the workflow.
- if currentMaxLag, ok := maxVReplicationLagByWorkflow[workflow.Name]; ok {
- if vreplicationLag.Seconds() > currentMaxLag {
- maxVReplicationLagByWorkflow[workflow.Name] = vreplicationLag.Seconds()
- }
- } else {
- maxVReplicationLagByWorkflow[workflow.Name] = vreplicationLag.Seconds()
- }
-
- workflow.WorkflowType = res.WorkflowType.String()
- workflow.WorkflowSubType = res.WorkflowSubType.String()
- workflow.DeferSecondaryKeys = res.DeferSecondaryKeys
-
- // MaxVReplicationTransactionLag estimates the actual statement processing lag
- // between the source and the target. If we are still processing source events it
- // is the difference b/w current time and the timestamp of the last event. If
- // heartbeats are more recent than the last event, then the lag is the time since
- // the last heartbeat as there can be an actual event immediately after the
- // heartbeat, but which has not yet been processed on the target.
- // We don't allow switching during the copy phase, so in that case we just return
- // a large lag. All timestamps are in seconds since epoch.
- if _, ok := maxVReplicationTransactionLagByWorkflow[workflow.Name]; !ok {
- maxVReplicationTransactionLagByWorkflow[workflow.Name] = 0
- }
- if rstream.TransactionTimestamp == nil {
- rstream.TransactionTimestamp = &vttimepb.Time{}
- }
- lastTransactionTime := rstream.TransactionTimestamp.Seconds
- if rstream.TimeHeartbeat == nil {
- rstream.TimeHeartbeat = &vttimepb.Time{}
- }
- lastHeartbeatTime := rstream.TimeHeartbeat.Seconds
- if stream.State == binlogdatapb.VReplicationWorkflowState_Copying.String() {
- maxVReplicationTransactionLagByWorkflow[workflow.Name] = math.MaxInt64
- } else {
- if lastTransactionTime == 0 /* no new events after copy */ ||
- lastHeartbeatTime > lastTransactionTime /* no recent transactions, so all caught up */ {
-
- lastTransactionTime = lastHeartbeatTime
- }
- now := time.Now().Unix() /* seconds since epoch */
- transactionReplicationLag := float64(now - lastTransactionTime)
- if transactionReplicationLag > maxVReplicationTransactionLagByWorkflow[workflow.Name] {
- maxVReplicationTransactionLagByWorkflow[workflow.Name] = transactionReplicationLag
- }
- }
- }
-
- return nil
- }
-
- for tablet, result := range results {
- // In the old implementation, we knew we had at most one (0 <= N <= 1)
- // workflow for each shard primary we queried. There might be multiple
- // rows (streams) comprising that workflow, so we would aggregate the
- // rows for a given primary into a single value ("the workflow",
- // ReplicationStatusResult in the old types).
- //
- // In this version, we have many (N >= 0) workflows for each shard
- // primary we queried, so we need to determine if each row corresponds
- // to a workflow we're already aggregating, or if it's a workflow we
- // haven't seen yet for that shard primary. We use the workflow name to
- // dedupe for this.
- for _, wfres := range result.Workflows {
- workflowName := wfres.Workflow
- workflow, ok := workflowsMap[workflowName]
- if !ok {
- workflow = &vtctldatapb.Workflow{
- Name: workflowName,
- ShardStreams: map[string]*vtctldatapb.Workflow_ShardStream{},
- }
-
- workflowsMap[workflowName] = workflow
- sourceShardsByWorkflow[workflowName] = sets.New[string]()
- targetShardsByWorkflow[workflowName] = sets.New[string]()
- }
-
- if err := scanWorkflow(ctx, workflow, wfres, tablet); err != nil {
- return nil, err
- }
- }
- }
-
- var (
- fetchLogsWG sync.WaitGroup
- vrepLogQuery = strings.TrimSpace(`
-SELECT
- id,
- vrepl_id,
- type,
- state,
- message,
- created_at,
- updated_at,
- count
-FROM
- _vt.vreplication_log
-WHERE vrepl_id IN %a
-ORDER BY
- vrepl_id ASC,
- id ASC
-`)
- )
-
- fetchStreamLogs := func(ctx context.Context, workflow *vtctldatapb.Workflow) {
- span, ctx := trace.NewSpan(ctx, "workflow.Server.fetchStreamLogs")
- defer span.Finish()
-
- span.Annotate("keyspace", req.Keyspace)
- span.Annotate("workflow", workflow.Name)
-
- vreplIDs := make([]int64, 0, len(workflow.ShardStreams))
- for _, shardStream := range maps.Values(workflow.ShardStreams) {
- for _, stream := range shardStream.Streams {
- vreplIDs = append(vreplIDs, stream.Id)
- }
- }
- idsBV, err := sqltypes.BuildBindVariable(vreplIDs)
- if err != nil {
- return
- }
-
- query, err := sqlparser.ParseAndBind(vrepLogQuery, idsBV)
- if err != nil {
- return
- }
-
- vx := vexec.NewVExec(req.Keyspace, workflow.Name, s.ts, s.tmc, s.SQLParser())
- results, err := vx.QueryContext(ctx, query)
- if err != nil {
- // Note that we do not return here. If there are any query results
- // in the map (i.e. some tablets returned successfully), we will
- // still try to read log rows from them on a best-effort basis. But,
- // we will also pre-emptively record the top-level fetch error on
- // every stream in every shard in the workflow. Further processing
- // below may override the error message for certain streams.
- for _, streams := range workflow.ShardStreams {
- for _, stream := range streams.Streams {
- stream.LogFetchError = err.Error()
- }
- }
- }
-
- for target, p3qr := range results {
- qr := sqltypes.Proto3ToResult(p3qr)
- shardStreamKey := fmt.Sprintf("%s/%s", target.Shard, target.AliasString())
-
- ss, ok := workflow.ShardStreams[shardStreamKey]
- if !ok || ss == nil {
- continue
- }
-
- streams := ss.Streams
- streamIdx := 0
- markErrors := func(err error) {
- if streamIdx >= len(streams) {
- return
- }
-
- streams[streamIdx].LogFetchError = err.Error()
- }
-
- for _, row := range qr.Rows {
- id, err := row[0].ToCastInt64()
- if err != nil {
- markErrors(err)
- continue
- }
-
- streamID, err := row[1].ToCastInt64()
- if err != nil {
- markErrors(err)
- continue
- }
-
- typ := row[2].ToString()
- state := row[3].ToString()
- message := row[4].ToString()
-
- createdAt, err := time.Parse("2006-01-02 15:04:05", row[5].ToString())
- if err != nil {
- markErrors(err)
- continue
- }
-
- updatedAt, err := time.Parse("2006-01-02 15:04:05", row[6].ToString())
- if err != nil {
- markErrors(err)
- continue
- }
-
- count, err := row[7].ToCastInt64()
- if err != nil {
- markErrors(err)
- continue
- }
-
- streamLog := &vtctldatapb.Workflow_Stream_Log{
- Id: id,
- StreamId: streamID,
- Type: typ,
- State: state,
- CreatedAt: &vttimepb.Time{
- Seconds: createdAt.Unix(),
- },
- UpdatedAt: &vttimepb.Time{
- Seconds: updatedAt.Unix(),
- },
- Message: message,
- Count: count,
- }
-
- // Earlier, in the main loop where we called scanWorkflow for
- // each _vt.vreplication row, we also sorted each ShardStreams
- // slice by ascending id, and our _vt.vreplication_log query
- // ordered by (stream_id ASC, id ASC), so we can walk the
- // streams in index order in O(n) amortized over all the rows
- // for this tablet.
- for streamIdx < len(streams) {
- stream := streams[streamIdx]
- if stream.Id < streamLog.StreamId {
- streamIdx++
- continue
- }
-
- if stream.Id > streamLog.StreamId {
- s.Logger().Warningf("Found stream log for nonexistent stream: %+v", streamLog)
- // This can happen on manual/failed workflow cleanup so move to the next log.
- break
- }
-
- // stream.Id == streamLog.StreamId
- stream.Logs = append(stream.Logs, streamLog)
- break
- }
- }
- }
- }
-
- workflows := make([]*vtctldatapb.Workflow, 0, len(workflowsMap))
-
- for name, workflow := range workflowsMap {
- sourceShards, ok := sourceShardsByWorkflow[name]
- if !ok {
- return nil, vterrors.Wrapf(ErrInvalidWorkflow, "%s has no source shards", name)
- }
-
- sourceKeyspace, ok := sourceKeyspaceByWorkflow[name]
- if !ok {
- return nil, vterrors.Wrapf(ErrInvalidWorkflow, "%s has no source keyspace", name)
- }
-
- targetShards, ok := targetShardsByWorkflow[name]
- if !ok {
- return nil, vterrors.Wrapf(ErrInvalidWorkflow, "%s has no target shards", name)
- }
-
- targetKeyspace, ok := targetKeyspaceByWorkflow[name]
- if !ok {
- return nil, vterrors.Wrapf(ErrInvalidWorkflow, "%s has no target keyspace", name)
- }
-
- maxVReplicationLag, ok := maxVReplicationLagByWorkflow[name]
- if !ok {
- return nil, vterrors.Wrapf(ErrInvalidWorkflow, "%s has no tracked vreplication lag", name)
- }
-
- maxVReplicationTransactionLag, ok := maxVReplicationTransactionLagByWorkflow[name]
- if !ok {
- return nil, vterrors.Wrapf(ErrInvalidWorkflow, "%s has no tracked vreplication transaction lag", name)
- }
-
- workflow.Source = &vtctldatapb.Workflow_ReplicationLocation{
- Keyspace: sourceKeyspace,
- Shards: sets.List(sourceShards),
- }
-
- workflow.Target = &vtctldatapb.Workflow_ReplicationLocation{
- Keyspace: targetKeyspace,
- Shards: sets.List(targetShards),
- }
-
- workflow.MaxVReplicationLag = int64(maxVReplicationLag)
- workflow.MaxVReplicationTransactionLag = int64(maxVReplicationTransactionLag)
-
- // Sort shard streams by stream_id ASC, to support an optimization
- // in fetchStreamLogs below.
- for _, shardStreams := range workflow.ShardStreams {
- sort.Slice(shardStreams.Streams, func(i, j int) bool {
- return shardStreams.Streams[i].Id < shardStreams.Streams[j].Id
- })
- }
-
- workflows = append(workflows, workflow)
-
- if req.IncludeLogs {
- // Fetch logs for all streams associated with this workflow in the background.
- fetchLogsWG.Add(1)
- go func(ctx context.Context, workflow *vtctldatapb.Workflow) {
- defer fetchLogsWG.Done()
- fetchStreamLogs(ctx, workflow)
- }(ctx, workflow)
- }
+ workflows, err := w.buildWorkflows(ctx, workflowsByShard, copyStatesByShardStreamId, req)
+ if err != nil {
+ return nil, err
}
- // Wait for all the log fetchers to finish.
- fetchLogsWG.Wait()
-
return &vtctldatapb.GetWorkflowsResponse{
Workflows: workflows,
}, nil
@@ -1080,51 +558,6 @@ func (s *Server) getWorkflowState(ctx context.Context, targetKeyspace, workflowN
return ts, state, nil
}
-func (s *Server) getWorkflowCopyStates(ctx context.Context, tablet *topo.TabletInfo, streamIds []int32) ([]*vtctldatapb.Workflow_Stream_CopyState, error) {
- span, ctx := trace.NewSpan(ctx, "workflow.Server.getWorkflowCopyStates")
- defer span.Finish()
-
- span.Annotate("keyspace", tablet.Keyspace)
- span.Annotate("shard", tablet.Shard)
- span.Annotate("tablet_alias", tablet.AliasString())
- span.Annotate("stream_ids", fmt.Sprintf("%#v", streamIds))
-
- idsBV, err := sqltypes.BuildBindVariable(streamIds)
- if err != nil {
- return nil, err
- }
- query, err := sqlparser.ParseAndBind("select vrepl_id, table_name, lastpk from _vt.copy_state where vrepl_id in %a and id in (select max(id) from _vt.copy_state where vrepl_id in %a group by vrepl_id, table_name)",
- idsBV, idsBV)
- if err != nil {
- return nil, err
- }
- qr, err := s.tmc.VReplicationExec(ctx, tablet.Tablet, query)
- if err != nil {
- return nil, err
- }
-
- result := sqltypes.Proto3ToResult(qr)
- if result == nil {
- return nil, nil
- }
-
- copyStates := make([]*vtctldatapb.Workflow_Stream_CopyState, len(result.Rows))
- for i, row := range result.Rows {
- streamId, err := row[0].ToInt64()
- if err != nil {
- return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "failed to cast vrepl_id to int64: %v", err)
- }
- // These string fields are technically varbinary, but this is close enough.
- copyStates[i] = &vtctldatapb.Workflow_Stream_CopyState{
- StreamId: streamId,
- Table: row[1].ToString(),
- LastPk: row[2].ToString(),
- }
- }
-
- return copyStates, nil
-}
-
// LookupVindexCreate creates the lookup vindex in the specified
// keyspace and creates a VReplication workflow to backfill that
// vindex from the keyspace to the target/lookup table specified.
@@ -1545,7 +978,7 @@ func (s *Server) moveTablesCreate(ctx context.Context, req *vtctldatapb.MoveTabl
}
}
if isStandardMoveTables() { // Non-standard ones do not use shard scoped mechanisms
- if err := s.setupInitialDeniedTables(ctx, ts); err != nil {
+ if err := setupInitialDeniedTables(ctx, ts); err != nil {
return nil, vterrors.Wrapf(err, "failed to put initial denied tables entries in place on the target shards")
}
}
@@ -1600,7 +1033,7 @@ func (s *Server) moveTablesCreate(ctx context.Context, req *vtctldatapb.MoveTabl
})
}
-func (s *Server) validateRoutingRuleFlags(req *vtctldatapb.MoveTablesCreateRequest, mz *materializer) error {
+func validateRoutingRuleFlags(req *vtctldatapb.MoveTablesCreateRequest, mz *materializer) error {
if mz.IsMultiTenantMigration() {
switch {
case req.NoRoutingRules:
@@ -1612,7 +1045,7 @@ func (s *Server) validateRoutingRuleFlags(req *vtctldatapb.MoveTablesCreateReque
return nil
}
-func (s *Server) setupInitialDeniedTables(ctx context.Context, ts *trafficSwitcher) error {
+func setupInitialDeniedTables(ctx context.Context, ts *trafficSwitcher) error {
if ts.MigrationType() != binlogdatapb.MigrationType_TABLES {
return nil
}
@@ -1630,7 +1063,7 @@ func (s *Server) setupInitialDeniedTables(ctx context.Context, ts *trafficSwitch
}
func (s *Server) setupInitialRoutingRules(ctx context.Context, req *vtctldatapb.MoveTablesCreateRequest, mz *materializer, tables []string) error {
- if err := s.validateRoutingRuleFlags(req, mz); err != nil {
+ if err := validateRoutingRuleFlags(req, mz); err != nil {
return err
}
@@ -4026,10 +3459,6 @@ func (s *Server) prepareCreateLookup(ctx context.Context, workflow, keyspace str
if len(vindexFromCols) != 1 {
return nil, nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "unique vindex 'from' should have only one column")
}
- } else {
- if len(vindexFromCols) < 2 {
- return nil, nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "non-unique vindex 'from' should have more than one column")
- }
}
vindexToCol = vindex.Params["to"]
// Make the vindex write_only. If one exists already in the vschema,
diff --git a/go/vt/vtctl/workflow/server_test.go b/go/vt/vtctl/workflow/server_test.go
index dbe06ab1a47..26d722f1de0 100644
--- a/go/vt/vtctl/workflow/server_test.go
+++ b/go/vt/vtctl/workflow/server_test.go
@@ -2470,7 +2470,7 @@ func TestGetWorkflowsStreamLogs(t *testing.T) {
}, sourceShards, targetShards)
logResult := sqltypes.MakeTestResult(
- sqltypes.MakeTestFields("id|vrepl_id|type|state|message|created_at|updated_at|`count`", "int64|int64|varchar|varchar|varchar|varchar|varchar|int64"),
+ sqltypes.MakeTestFields("id|vrepl_id|type|state|message|created_at|updated_at|count", "int64|int64|varchar|varchar|varchar|varchar|varchar|int64"),
"1|0|State Change|Running|test message for non-existent 1|2006-01-02 15:04:05|2006-01-02 15:04:05|1",
"2|0|State Change|Stopped|test message for non-existent 2|2006-01-02 15:04:06|2006-01-02 15:04:06|1",
"3|1|State Change|Running|log message|2006-01-02 15:04:07|2006-01-02 15:04:07|1",
@@ -2499,3 +2499,63 @@ func TestGetWorkflowsStreamLogs(t *testing.T) {
assert.Equal(t, gotLogs[0].State, "Running")
assert.Equal(t, gotLogs[0].Id, int64(3))
}
+
+func TestWorkflowStatus(t *testing.T) {
+ ctx := context.Background()
+
+ sourceKeyspace := "source_keyspace"
+ targetKeyspace := "target_keyspace"
+ workflow := "test_workflow"
+
+ sourceShards := []string{"-"}
+ targetShards := []string{"-"}
+
+ te := newTestMaterializerEnv(t, ctx, &vtctldatapb.MaterializeSettings{
+ SourceKeyspace: sourceKeyspace,
+ TargetKeyspace: targetKeyspace,
+ Workflow: workflow,
+ TableSettings: []*vtctldatapb.TableMaterializeSettings{
+ {
+ TargetTable: "table1",
+ SourceExpression: fmt.Sprintf("select * from %s", "table1"),
+ },
+ {
+ TargetTable: "table2",
+ SourceExpression: fmt.Sprintf("select * from %s", "table2"),
+ },
+ },
+ }, sourceShards, targetShards)
+
+ tablesResult := sqltypes.MakeTestResult(sqltypes.MakeTestFields("table_name", "varchar"), "table1", "table2")
+ te.tmc.expectVRQuery(200, "select distinct table_name from _vt.copy_state cs, _vt.vreplication vr where vr.id = cs.vrepl_id and vr.id = 1", tablesResult)
+
+ tablesTargetCopyResult := sqltypes.MakeTestResult(sqltypes.MakeTestFields("table_name|table_rows|data_length", "varchar|int64|int64"), "table1|50|500", "table2|100|250")
+ te.tmc.expectVRQuery(200, "select table_name, table_rows, data_length from information_schema.tables where table_schema = 'vt_target_keyspace' and table_name in ('table1','table2')", tablesTargetCopyResult)
+
+ tablesSourceCopyResult := sqltypes.MakeTestResult(sqltypes.MakeTestFields("table_name|table_rows|data_length", "varchar|int64|int64"), "table1|100|1000", "table2|200|500")
+ te.tmc.expectVRQuery(100, "select table_name, table_rows, data_length from information_schema.tables where table_schema = 'vt_source_keyspace' and table_name in ('table1','table2')", tablesSourceCopyResult)
+
+ te.tmc.expectVRQuery(200, "select vrepl_id, table_name, lastpk from _vt.copy_state where vrepl_id in (1) and id in (select max(id) from _vt.copy_state where vrepl_id in (1) group by vrepl_id, table_name)", &sqltypes.Result{})
+
+ res, err := te.ws.WorkflowStatus(ctx, &vtctldatapb.WorkflowStatusRequest{
+ Keyspace: targetKeyspace,
+ Workflow: workflow,
+ Shards: targetShards,
+ })
+
+ assert.NoError(t, err)
+
+ require.NotNil(t, res.TableCopyState)
+
+ stateTable1 := res.TableCopyState["table1"]
+ stateTable2 := res.TableCopyState["table2"]
+ require.NotNil(t, stateTable1)
+ require.NotNil(t, stateTable2)
+
+ assert.Equal(t, int64(100), stateTable1.RowsTotal)
+ assert.Equal(t, int64(200), stateTable2.RowsTotal)
+ assert.Equal(t, int64(50), stateTable1.RowsCopied)
+ assert.Equal(t, int64(100), stateTable2.RowsCopied)
+ assert.Equal(t, float32(50), stateTable1.RowsPercentage)
+ assert.Equal(t, float32(50), stateTable2.RowsPercentage)
+}
diff --git a/go/vt/vtctl/workflow/traffic_switcher.go b/go/vt/vtctl/workflow/traffic_switcher.go
index 937dffe70b3..4fc34992b0f 100644
--- a/go/vt/vtctl/workflow/traffic_switcher.go
+++ b/go/vt/vtctl/workflow/traffic_switcher.go
@@ -1135,30 +1135,45 @@ func (ts *trafficSwitcher) switchDeniedTables(ctx context.Context) error {
return nil
}
+// cancelMigration attempts to revert all changes made during the migration so that we can get back to the
+// state when traffic switching (or reversing) was initiated.
func (ts *trafficSwitcher) cancelMigration(ctx context.Context, sm *StreamMigrator) {
var err error
+
+ if ctx.Err() != nil {
+ // Even though we create a new context later on we still record any context error:
+ // for forensics in case of failures.
+ ts.Logger().Infof("In Cancel migration: original context invalid: %s", ctx.Err())
+ }
+
+ // We create a new context while canceling the migration, so that we are independent of the original
+ // context being cancelled prior to or during the cancel operation.
+ cmTimeout := 60 * time.Second
+ cmCtx, cmCancel := context.WithTimeout(context.Background(), cmTimeout)
+ defer cmCancel()
+
if ts.MigrationType() == binlogdatapb.MigrationType_TABLES {
- err = ts.switchDeniedTables(ctx)
+ err = ts.switchDeniedTables(cmCtx)
} else {
- err = ts.changeShardsAccess(ctx, ts.SourceKeyspaceName(), ts.SourceShards(), allowWrites)
+ err = ts.changeShardsAccess(cmCtx, ts.SourceKeyspaceName(), ts.SourceShards(), allowWrites)
}
if err != nil {
- ts.Logger().Errorf("Cancel migration failed: %v", err)
+ ts.Logger().Errorf("Cancel migration failed: could not revert denied tables / shard access: %v", err)
}
- sm.CancelStreamMigrations(ctx)
+ sm.CancelStreamMigrations(cmCtx)
err = ts.ForAllTargets(func(target *MigrationTarget) error {
query := fmt.Sprintf("update _vt.vreplication set state='Running', message='' where db_name=%s and workflow=%s",
encodeString(target.GetPrimary().DbName()), encodeString(ts.WorkflowName()))
- _, err := ts.TabletManagerClient().VReplicationExec(ctx, target.GetPrimary().Tablet, query)
+ _, err := ts.TabletManagerClient().VReplicationExec(cmCtx, target.GetPrimary().Tablet, query)
return err
})
if err != nil {
ts.Logger().Errorf("Cancel migration failed: could not restart vreplication: %v", err)
}
- err = ts.deleteReverseVReplication(ctx)
+ err = ts.deleteReverseVReplication(cmCtx)
if err != nil {
ts.Logger().Errorf("Cancel migration failed: could not delete reverse vreplication streams: %v", err)
}
diff --git a/go/vt/vtctl/workflow/traffic_switcher_test.go b/go/vt/vtctl/workflow/traffic_switcher_test.go
index 325b405b6f0..b06c95b6c16 100644
--- a/go/vt/vtctl/workflow/traffic_switcher_test.go
+++ b/go/vt/vtctl/workflow/traffic_switcher_test.go
@@ -28,6 +28,7 @@ import (
"github.com/stretchr/testify/require"
"vitess.io/vitess/go/sqlescape"
+ "vitess.io/vitess/go/sqltypes"
"vitess.io/vitess/go/vt/mysqlctl/tmutils"
"vitess.io/vitess/go/vt/proto/vschema"
"vitess.io/vitess/go/vt/sqlparser"
@@ -36,6 +37,7 @@ import (
"vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication"
tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata"
+ topodatapb "vitess.io/vitess/go/vt/proto/topodata"
vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata"
)
@@ -649,3 +651,255 @@ func TestTrafficSwitchPositionHandling(t *testing.T) {
})
require.NoError(t, err)
}
+
+func TestInitializeTargetSequences(t *testing.T) {
+ ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
+ defer cancel()
+
+ workflowName := "wf1"
+ tableName := "t1"
+ sourceKeyspaceName := "sourceks"
+ targetKeyspaceName := "targetks"
+
+ schema := map[string]*tabletmanagerdatapb.SchemaDefinition{
+ tableName: {
+ TableDefinitions: []*tabletmanagerdatapb.TableDefinition{
+ {
+ Name: tableName,
+ Schema: fmt.Sprintf("CREATE TABLE %s (id BIGINT, name VARCHAR(64), PRIMARY KEY (id))", tableName),
+ },
+ },
+ },
+ }
+
+ sourceKeyspace := &testKeyspace{
+ KeyspaceName: sourceKeyspaceName,
+ ShardNames: []string{"0"},
+ }
+ targetKeyspace := &testKeyspace{
+ KeyspaceName: targetKeyspaceName,
+ ShardNames: []string{"0"},
+ }
+
+ env := newTestEnv(t, ctx, defaultCellName, sourceKeyspace, targetKeyspace)
+ defer env.close()
+ env.tmc.schema = schema
+
+ ts, _, err := env.ws.getWorkflowState(ctx, targetKeyspaceName, workflowName)
+ require.NoError(t, err)
+ sw := &switcher{ts: ts, s: env.ws}
+
+ sequencesByBackingTable := map[string]*sequenceMetadata{
+ "my-seq1": {
+ backingTableName: "my-seq1",
+ backingTableKeyspace: sourceKeyspaceName,
+ backingTableDBName: fmt.Sprintf("vt_%s", sourceKeyspaceName),
+ usingTableName: tableName,
+ usingTableDBName: "vt_targetks",
+ usingTableDefinition: &vschema.Table{
+ AutoIncrement: &vschema.AutoIncrement{
+ Column: "my-col",
+ Sequence: fmt.Sprintf("%s.my-seq1", sourceKeyspace.KeyspaceName),
+ },
+ },
+ },
+ }
+
+ env.tmc.expectVRQuery(200, "/select max.*", sqltypes.MakeTestResult(sqltypes.MakeTestFields("maxval", "int64"), "34"))
+ // Expect the insert query to be executed with 35 as a params, since we provide a maxID of 34 in the last query
+ env.tmc.expectVRQuery(100, "/insert into.*35.*", &sqltypes.Result{RowsAffected: 1})
+
+ err = sw.initializeTargetSequences(ctx, sequencesByBackingTable)
+ assert.NoError(t, err)
+
+ // Expect the queries to be cleared
+ assert.Empty(t, env.tmc.vrQueries[100])
+ assert.Empty(t, env.tmc.vrQueries[200])
+}
+
+func TestAddTenantFilter(t *testing.T) {
+ ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
+ defer cancel()
+
+ workflowName := "wf1"
+ tableName := "t1"
+ sourceKeyspaceName := "sourceks"
+ targetKeyspaceName := "targetks"
+
+ sourceKeyspace := &testKeyspace{
+ KeyspaceName: sourceKeyspaceName,
+ ShardNames: []string{"0"},
+ }
+ targetKeyspace := &testKeyspace{
+ KeyspaceName: targetKeyspaceName,
+ ShardNames: []string{"0"},
+ }
+
+ schema := map[string]*tabletmanagerdatapb.SchemaDefinition{
+ tableName: {
+ TableDefinitions: []*tabletmanagerdatapb.TableDefinition{
+ {
+ Name: tableName,
+ Schema: fmt.Sprintf("CREATE TABLE %s (id BIGINT, name VARCHAR(64), PRIMARY KEY (id))", tableName),
+ },
+ },
+ },
+ }
+
+ env := newTestEnv(t, ctx, defaultCellName, sourceKeyspace, targetKeyspace)
+ defer env.close()
+ env.tmc.schema = schema
+
+ err := env.ts.SaveVSchema(ctx, targetKeyspaceName, &vschema.Keyspace{
+ MultiTenantSpec: &vschema.MultiTenantSpec{
+ TenantIdColumnName: "tenant_id",
+ TenantIdColumnType: sqltypes.Int64,
+ },
+ })
+ require.NoError(t, err)
+
+ ts, _, err := env.ws.getWorkflowState(ctx, targetKeyspaceName, workflowName)
+ require.NoError(t, err)
+
+ ts.options.TenantId = "123"
+
+ filter, err := ts.addTenantFilter(ctx, fmt.Sprintf("select * from %s where id < 5", tableName))
+ assert.NoError(t, err)
+ assert.Equal(t, "select * from t1 where tenant_id = 123 and id < 5", filter)
+}
+
+func TestChangeShardRouting(t *testing.T) {
+ ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
+ defer cancel()
+
+ workflowName := "wf1"
+ tableName := "t1"
+ sourceKeyspaceName := "sourceks"
+ targetKeyspaceName := "targetks"
+
+ sourceKeyspace := &testKeyspace{
+ KeyspaceName: sourceKeyspaceName,
+ ShardNames: []string{"0"},
+ }
+ targetKeyspace := &testKeyspace{
+ KeyspaceName: targetKeyspaceName,
+ ShardNames: []string{"0"},
+ }
+
+ schema := map[string]*tabletmanagerdatapb.SchemaDefinition{
+ tableName: {
+ TableDefinitions: []*tabletmanagerdatapb.TableDefinition{
+ {
+ Name: tableName,
+ Schema: fmt.Sprintf("CREATE TABLE %s (id BIGINT, name VARCHAR(64), PRIMARY KEY (id))", tableName),
+ },
+ },
+ },
+ }
+
+ env := newTestEnv(t, ctx, defaultCellName, sourceKeyspace, targetKeyspace)
+ defer env.close()
+ env.tmc.schema = schema
+
+ ts, _, err := env.ws.getWorkflowState(ctx, targetKeyspaceName, workflowName)
+ require.NoError(t, err)
+
+ err = env.ws.ts.UpdateSrvKeyspace(ctx, "cell", targetKeyspaceName, &topodatapb.SrvKeyspace{
+ Partitions: []*topodatapb.SrvKeyspace_KeyspacePartition{
+ {
+ ShardReferences: []*topodatapb.ShardReference{
+ {
+ Name: "0",
+ },
+ },
+ },
+ },
+ })
+ require.NoError(t, err)
+
+ err = env.ws.ts.UpdateSrvKeyspace(ctx, "cell", sourceKeyspaceName, &topodatapb.SrvKeyspace{
+ Partitions: []*topodatapb.SrvKeyspace_KeyspacePartition{
+ {
+ ShardReferences: []*topodatapb.ShardReference{
+ {
+ Name: "0",
+ },
+ },
+ },
+ },
+ })
+ require.NoError(t, err)
+
+ ctx, _, err = env.ws.ts.LockShard(ctx, targetKeyspaceName, "0", "targetks0")
+ require.NoError(t, err)
+
+ ctx, _, err = env.ws.ts.LockKeyspace(ctx, targetKeyspaceName, "targetks0")
+ require.NoError(t, err)
+
+ err = ts.changeShardRouting(ctx)
+ assert.NoError(t, err)
+
+ sourceShardInfo, err := env.ws.ts.GetShard(ctx, sourceKeyspaceName, "0")
+ assert.NoError(t, err)
+ assert.False(t, sourceShardInfo.IsPrimaryServing, "source shard shouldn't have it's primary serving after changeShardRouting() is called.")
+
+ targetShardInfo, err := env.ws.ts.GetShard(ctx, targetKeyspaceName, "0")
+ assert.NoError(t, err)
+ assert.True(t, targetShardInfo.IsPrimaryServing, "target shard should have it's primary serving after changeShardRouting() is called.")
+}
+
+func TestAddParticipatingTablesToKeyspace(t *testing.T) {
+ ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
+ defer cancel()
+
+ workflowName := "wf1"
+ tableName := "t1"
+ sourceKeyspaceName := "sourceks"
+ targetKeyspaceName := "targetks"
+
+ sourceKeyspace := &testKeyspace{
+ KeyspaceName: sourceKeyspaceName,
+ ShardNames: []string{"0"},
+ }
+ targetKeyspace := &testKeyspace{
+ KeyspaceName: targetKeyspaceName,
+ ShardNames: []string{"0"},
+ }
+
+ schema := map[string]*tabletmanagerdatapb.SchemaDefinition{
+ tableName: {
+ TableDefinitions: []*tabletmanagerdatapb.TableDefinition{
+ {
+ Name: tableName,
+ Schema: fmt.Sprintf("CREATE TABLE %s (id BIGINT, name VARCHAR(64), PRIMARY KEY (id))", tableName),
+ },
+ },
+ },
+ }
+
+ env := newTestEnv(t, ctx, defaultCellName, sourceKeyspace, targetKeyspace)
+ defer env.close()
+ env.tmc.schema = schema
+
+ ts, _, err := env.ws.getWorkflowState(ctx, targetKeyspaceName, workflowName)
+ require.NoError(t, err)
+
+ err = ts.addParticipatingTablesToKeyspace(ctx, sourceKeyspaceName, "")
+ assert.NoError(t, err)
+
+ vs, err := env.ts.GetVSchema(ctx, sourceKeyspaceName)
+ assert.NoError(t, err)
+ assert.NotNil(t, vs.Tables["t1"])
+ assert.Empty(t, vs.Tables["t1"])
+
+ specs := `{"t1":{"column_vindexes":[{"column":"col1","name":"v1"}, {"column":"col2","name":"v2"}]},"t2":{"column_vindexes":[{"column":"col2","name":"v2"}]}}`
+ err = ts.addParticipatingTablesToKeyspace(ctx, sourceKeyspaceName, specs)
+ assert.NoError(t, err)
+
+ vs, err = env.ts.GetVSchema(ctx, sourceKeyspaceName)
+ assert.NoError(t, err)
+ require.NotNil(t, vs.Tables["t1"])
+ require.NotNil(t, vs.Tables["t2"])
+ assert.Len(t, vs.Tables["t1"].ColumnVindexes, 2)
+ assert.Len(t, vs.Tables["t2"].ColumnVindexes, 1)
+}
diff --git a/go/vt/vtctl/workflow/workflows.go b/go/vt/vtctl/workflow/workflows.go
new file mode 100644
index 00000000000..da0ee5dfec7
--- /dev/null
+++ b/go/vt/vtctl/workflow/workflows.go
@@ -0,0 +1,672 @@
+/*
+Copyright 2024 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+This file provides functions for fetching and retrieving information about VReplication workflows
+
+At the moment it is used by the `GetWorkflows` function in `server.go and includes functionality to
+get the following:
+- Fetch workflows by shard
+- Fetch copy states by shard stream
+- Build workflows with metadata
+- Fetch stream logs
+*/
+
+package workflow
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "math"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/exp/maps"
+ "golang.org/x/sync/errgroup"
+
+ "vitess.io/vitess/go/sets"
+ "vitess.io/vitess/go/sqltypes"
+ "vitess.io/vitess/go/trace"
+ "vitess.io/vitess/go/vt/binlog/binlogplayer"
+ "vitess.io/vitess/go/vt/logutil"
+ "vitess.io/vitess/go/vt/sqlparser"
+ "vitess.io/vitess/go/vt/topo"
+ "vitess.io/vitess/go/vt/topo/topoproto"
+ "vitess.io/vitess/go/vt/vtctl/workflow/common"
+ "vitess.io/vitess/go/vt/vtctl/workflow/vexec"
+ "vitess.io/vitess/go/vt/vterrors"
+ "vitess.io/vitess/go/vt/vttablet/tmclient"
+
+ binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata"
+ tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata"
+ vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata"
+ vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
+ vttimepb "vitess.io/vitess/go/vt/proto/vttime"
+)
+
+// workflowFetcher is responsible for fetching and retrieving information
+// about VReplication workflows.
+type workflowFetcher struct {
+ ts *topo.Server
+ tmc tmclient.TabletManagerClient
+
+ logger logutil.Logger
+ parser *sqlparser.Parser
+}
+
+type workflowMetadata struct {
+ sourceKeyspace string
+ sourceShards sets.Set[string]
+ targetKeyspace string
+ targetShards sets.Set[string]
+ maxVReplicationLag float64
+ maxVReplicationTransactionLag float64
+}
+
+var vrepLogQuery = strings.TrimSpace(`
+SELECT
+ id,
+ vrepl_id,
+ type,
+ state,
+ message,
+ created_at,
+ updated_at,
+ count
+FROM
+ _vt.vreplication_log
+WHERE vrepl_id IN %a
+ORDER BY
+ vrepl_id ASC,
+ id ASC
+`)
+
+func (wf *workflowFetcher) fetchWorkflowsByShard(
+ ctx context.Context,
+ req *vtctldatapb.GetWorkflowsRequest,
+) (map[*topo.TabletInfo]*tabletmanagerdatapb.ReadVReplicationWorkflowsResponse, error) {
+ readReq := &tabletmanagerdatapb.ReadVReplicationWorkflowsRequest{}
+ if req.Workflow != "" {
+ readReq.IncludeWorkflows = []string{req.Workflow}
+ }
+ if req.ActiveOnly {
+ readReq.ExcludeStates = []binlogdatapb.VReplicationWorkflowState{binlogdatapb.VReplicationWorkflowState_Stopped}
+ }
+
+ m := sync.Mutex{}
+
+ shards, err := common.GetShards(ctx, wf.ts, req.Keyspace, req.Shards)
+ if err != nil {
+ return nil, err
+ }
+
+ results := make(map[*topo.TabletInfo]*tabletmanagerdatapb.ReadVReplicationWorkflowsResponse, len(shards))
+
+ err = wf.forAllShards(ctx, req.Keyspace, shards, func(ctx context.Context, si *topo.ShardInfo) error {
+ primary, err := wf.ts.GetTablet(ctx, si.PrimaryAlias)
+ if err != nil {
+ return err
+ }
+ if primary == nil {
+ return fmt.Errorf("%w %s/%s: tablet %v not found", vexec.ErrNoShardPrimary, req.Keyspace, si.ShardName(), topoproto.TabletAliasString(si.PrimaryAlias))
+ }
+ // Clone the request so that we can set the correct DB name for tablet.
+ req := readReq.CloneVT()
+ wres, err := wf.tmc.ReadVReplicationWorkflows(ctx, primary.Tablet, req)
+ if err != nil {
+ return err
+ }
+ m.Lock()
+ defer m.Unlock()
+ results[primary] = wres
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return results, nil
+}
+
+func (wf *workflowFetcher) fetchCopyStatesByShardStream(
+ ctx context.Context,
+ workflowsByShard map[*topo.TabletInfo]*tabletmanagerdatapb.ReadVReplicationWorkflowsResponse,
+) (map[string][]*vtctldatapb.Workflow_Stream_CopyState, error) {
+ m := sync.Mutex{}
+
+ copyStatesByShardStreamId := make(map[string][]*vtctldatapb.Workflow_Stream_CopyState, len(workflowsByShard))
+
+ fetchCopyStates := func(ctx context.Context, tablet *topo.TabletInfo, streamIds []int32) error {
+ span, ctx := trace.NewSpan(ctx, "workflowFetcher.workflow.fetchCopyStates")
+ defer span.Finish()
+
+ span.Annotate("shard", tablet.Shard)
+ span.Annotate("tablet_alias", tablet.AliasString())
+
+ copyStates, err := wf.getWorkflowCopyStates(ctx, tablet, streamIds)
+ if err != nil {
+ return err
+ }
+
+ m.Lock()
+ defer m.Unlock()
+
+ for _, copyState := range copyStates {
+ shardStreamId := fmt.Sprintf("%s/%d", tablet.Shard, copyState.StreamId)
+ copyStatesByShardStreamId[shardStreamId] = append(
+ copyStatesByShardStreamId[shardStreamId],
+ copyState,
+ )
+ }
+
+ return nil
+ }
+
+ fetchCopyStatesEg, fetchCopyStatesCtx := errgroup.WithContext(ctx)
+ for tablet, result := range workflowsByShard {
+ streamIds := make([]int32, 0, len(result.Workflows))
+ for _, wf := range result.Workflows {
+ for _, stream := range wf.Streams {
+ streamIds = append(streamIds, stream.Id)
+ }
+ }
+
+ if len(streamIds) == 0 {
+ continue
+ }
+
+ fetchCopyStatesEg.Go(func() error {
+ return fetchCopyStates(fetchCopyStatesCtx, tablet, streamIds)
+ })
+ }
+ if err := fetchCopyStatesEg.Wait(); err != nil {
+ return nil, err
+ }
+
+ return copyStatesByShardStreamId, nil
+}
+
+func (wf *workflowFetcher) getWorkflowCopyStates(ctx context.Context, tablet *topo.TabletInfo, streamIds []int32) ([]*vtctldatapb.Workflow_Stream_CopyState, error) {
+ span, ctx := trace.NewSpan(ctx, "workflowFetcher.workflow.getWorkflowCopyStates")
+ defer span.Finish()
+
+ span.Annotate("keyspace", tablet.Keyspace)
+ span.Annotate("shard", tablet.Shard)
+ span.Annotate("tablet_alias", tablet.AliasString())
+ span.Annotate("stream_ids", fmt.Sprintf("%#v", streamIds))
+
+ idsBV, err := sqltypes.BuildBindVariable(streamIds)
+ if err != nil {
+ return nil, err
+ }
+ query, err := sqlparser.ParseAndBind("select vrepl_id, table_name, lastpk from _vt.copy_state where vrepl_id in %a and id in (select max(id) from _vt.copy_state where vrepl_id in %a group by vrepl_id, table_name)",
+ idsBV, idsBV)
+ if err != nil {
+ return nil, err
+ }
+ qr, err := wf.tmc.VReplicationExec(ctx, tablet.Tablet, query)
+ if err != nil {
+ return nil, err
+ }
+
+ result := sqltypes.Proto3ToResult(qr)
+ if result == nil {
+ return nil, nil
+ }
+
+ copyStates := make([]*vtctldatapb.Workflow_Stream_CopyState, len(result.Rows))
+ for i, row := range result.Named().Rows {
+ streamId, err := row["vrepl_id"].ToInt64()
+ if err != nil {
+ return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "failed to cast vrepl_id to int64: %v", err)
+ }
+ // These string fields are technically varbinary, but this is close enough.
+ copyStates[i] = &vtctldatapb.Workflow_Stream_CopyState{
+ StreamId: streamId,
+ Table: row["table_name"].ToString(),
+ LastPk: row["lastpk"].ToString(),
+ }
+ }
+
+ return copyStates, nil
+}
+
+func (wf *workflowFetcher) buildWorkflows(
+ ctx context.Context,
+ results map[*topo.TabletInfo]*tabletmanagerdatapb.ReadVReplicationWorkflowsResponse,
+ copyStatesByShardStreamId map[string][]*vtctldatapb.Workflow_Stream_CopyState,
+ req *vtctldatapb.GetWorkflowsRequest,
+) ([]*vtctldatapb.Workflow, error) {
+ workflowsMap := make(map[string]*vtctldatapb.Workflow, len(results))
+ workflowMetadataMap := make(map[string]*workflowMetadata, len(results))
+
+ for tablet, result := range results {
+ // In the old implementation, we knew we had at most one (0 <= N <= 1)
+ // workflow for each shard primary we queried. There might be multiple
+ // rows (streams) comprising that workflow, so we would aggregate the
+ // rows for a given primary into a single value ("the workflow",
+ // ReplicationStatusResult in the old types).
+ //
+ // In this version, we have many (N >= 0) workflows for each shard
+ // primary we queried, so we need to determine if each row corresponds
+ // to a workflow we're already aggregating, or if it's a workflow we
+ // haven't seen yet for that shard primary. We use the workflow name to
+ // dedupe for this.
+ for _, wfres := range result.Workflows {
+ workflowName := wfres.Workflow
+ workflow, ok := workflowsMap[workflowName]
+ if !ok {
+ workflow = &vtctldatapb.Workflow{
+ Name: workflowName,
+ ShardStreams: map[string]*vtctldatapb.Workflow_ShardStream{},
+ }
+
+ workflowsMap[workflowName] = workflow
+ workflowMetadataMap[workflowName] = &workflowMetadata{
+ sourceShards: sets.New[string](),
+ targetShards: sets.New[string](),
+ }
+ }
+
+ metadata := workflowMetadataMap[workflowName]
+ err := wf.scanWorkflow(ctx, workflow, wfres, tablet, metadata, copyStatesByShardStreamId, req.Keyspace)
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ for name, workflow := range workflowsMap {
+ meta := workflowMetadataMap[name]
+ updateWorkflowWithMetadata(workflow, meta)
+
+ // Sort shard streams by stream_id ASC, to support an optimization
+ // in fetchStreamLogs below.
+ for _, shardStreams := range workflow.ShardStreams {
+ sort.Slice(shardStreams.Streams, func(i, j int) bool {
+ return shardStreams.Streams[i].Id < shardStreams.Streams[j].Id
+ })
+ }
+ }
+
+ if req.IncludeLogs {
+ var fetchLogsWG sync.WaitGroup
+
+ for _, workflow := range workflowsMap {
+ // Fetch logs for all streams associated with this workflow in the background.
+ fetchLogsWG.Add(1)
+ go func(ctx context.Context, workflow *vtctldatapb.Workflow) {
+ defer fetchLogsWG.Done()
+ wf.fetchStreamLogs(ctx, req.Keyspace, workflow)
+ }(ctx, workflow)
+ }
+
+ // Wait for all the log fetchers to finish.
+ fetchLogsWG.Wait()
+ }
+
+ return maps.Values(workflowsMap), nil
+}
+
+func (wf *workflowFetcher) scanWorkflow(
+ ctx context.Context,
+ workflow *vtctldatapb.Workflow,
+ res *tabletmanagerdatapb.ReadVReplicationWorkflowResponse,
+ tablet *topo.TabletInfo,
+ meta *workflowMetadata,
+ copyStatesByShardStreamId map[string][]*vtctldatapb.Workflow_Stream_CopyState,
+ keyspace string,
+) error {
+ shardStreamKey := fmt.Sprintf("%s/%s", tablet.Shard, tablet.AliasString())
+ shardStream, ok := workflow.ShardStreams[shardStreamKey]
+ if !ok {
+ ctx, cancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout)
+ defer cancel()
+
+ si, err := wf.ts.GetShard(ctx, keyspace, tablet.Shard)
+ if err != nil {
+ return err
+ }
+
+ shardStream = &vtctldatapb.Workflow_ShardStream{
+ Streams: nil,
+ TabletControls: si.TabletControls,
+ IsPrimaryServing: si.IsPrimaryServing,
+ }
+
+ workflow.ShardStreams[shardStreamKey] = shardStream
+ }
+
+ for _, rstream := range res.Streams {
+ // The value in the pos column can be compressed and thus not
+ // have a valid GTID consisting of valid UTF-8 characters so we
+ // have to decode it so that it's properly decompressed first
+ // when needed.
+ pos := rstream.Pos
+ if pos != "" {
+ mpos, err := binlogplayer.DecodePosition(pos)
+ if err != nil {
+ return err
+ }
+ pos = mpos.String()
+ }
+
+ cells := strings.Split(res.Cells, ",")
+ for i := range cells {
+ cells[i] = strings.TrimSpace(cells[i])
+ }
+ options := res.Options
+ if options != "" {
+ if err := json.Unmarshal([]byte(options), &workflow.Options); err != nil {
+ return err
+ }
+ }
+
+ stream := &vtctldatapb.Workflow_Stream{
+ Id: int64(rstream.Id),
+ Shard: tablet.Shard,
+ Tablet: tablet.Alias,
+ BinlogSource: rstream.Bls,
+ Position: pos,
+ StopPosition: rstream.StopPos,
+ State: rstream.State.String(),
+ DbName: tablet.DbName(),
+ TabletTypes: res.TabletTypes,
+ TabletSelectionPreference: res.TabletSelectionPreference,
+ Cells: cells,
+ TransactionTimestamp: rstream.TransactionTimestamp,
+ TimeUpdated: rstream.TimeUpdated,
+ Message: rstream.Message,
+ Tags: strings.Split(res.Tags, ","),
+ RowsCopied: rstream.RowsCopied,
+ ThrottlerStatus: &vtctldatapb.Workflow_Stream_ThrottlerStatus{
+ ComponentThrottled: rstream.ComponentThrottled,
+ TimeThrottled: rstream.TimeThrottled,
+ },
+ }
+
+ // Merge in copy states, which we've already fetched.
+ shardStreamId := fmt.Sprintf("%s/%d", tablet.Shard, stream.Id)
+ if copyStates, ok := copyStatesByShardStreamId[shardStreamId]; ok {
+ stream.CopyStates = copyStates
+ }
+
+ if rstream.TimeUpdated == nil {
+ rstream.TimeUpdated = &vttimepb.Time{}
+ }
+
+ stream.State = getStreamState(stream, rstream)
+
+ shardStream.Streams = append(shardStream.Streams, stream)
+
+ meta.sourceShards.Insert(stream.BinlogSource.Shard)
+ meta.targetShards.Insert(tablet.Shard)
+
+ if meta.sourceKeyspace != "" && meta.sourceKeyspace != stream.BinlogSource.Keyspace {
+ return vterrors.Wrapf(ErrMultipleSourceKeyspaces, "workflow = %v, ks1 = %v, ks2 = %v", workflow.Name, meta.sourceKeyspace, stream.BinlogSource.Keyspace)
+ }
+
+ meta.sourceKeyspace = stream.BinlogSource.Keyspace
+
+ if meta.targetKeyspace != "" && meta.targetKeyspace != tablet.Keyspace {
+ return vterrors.Wrapf(ErrMultipleTargetKeyspaces, "workflow = %v, ks1 = %v, ks2 = %v", workflow.Name, meta.targetKeyspace, tablet.Keyspace)
+ }
+
+ meta.targetKeyspace = tablet.Keyspace
+
+ if stream.TimeUpdated == nil {
+ stream.TimeUpdated = &vttimepb.Time{}
+ }
+ timeUpdated := time.Unix(stream.TimeUpdated.Seconds, 0)
+ vreplicationLag := time.Since(timeUpdated)
+
+ // MaxVReplicationLag represents the time since we last processed any event
+ // in the workflow.
+ if vreplicationLag.Seconds() > meta.maxVReplicationLag {
+ meta.maxVReplicationLag = vreplicationLag.Seconds()
+ }
+
+ workflow.WorkflowType = res.WorkflowType.String()
+ workflow.WorkflowSubType = res.WorkflowSubType.String()
+ workflow.DeferSecondaryKeys = res.DeferSecondaryKeys
+
+ // MaxVReplicationTransactionLag estimates the actual statement processing lag
+ // between the source and the target. If we are still processing source events it
+ // is the difference b/w current time and the timestamp of the last event. If
+ // heartbeats are more recent than the last event, then the lag is the time since
+ // the last heartbeat as there can be an actual event immediately after the
+ // heartbeat, but which has not yet been processed on the target.
+ // We don't allow switching during the copy phase, so in that case we just return
+ // a large lag. All timestamps are in seconds since epoch.
+ if rstream.TransactionTimestamp == nil {
+ rstream.TransactionTimestamp = &vttimepb.Time{}
+ }
+ lastTransactionTime := rstream.TransactionTimestamp.Seconds
+ if rstream.TimeHeartbeat == nil {
+ rstream.TimeHeartbeat = &vttimepb.Time{}
+ }
+ lastHeartbeatTime := rstream.TimeHeartbeat.Seconds
+ if stream.State == binlogdatapb.VReplicationWorkflowState_Copying.String() {
+ meta.maxVReplicationTransactionLag = math.MaxInt64
+ } else {
+ if lastTransactionTime == 0 /* no new events after copy */ ||
+ lastHeartbeatTime > lastTransactionTime /* no recent transactions, so all caught up */ {
+
+ lastTransactionTime = lastHeartbeatTime
+ }
+ now := time.Now().Unix() /* seconds since epoch */
+ transactionReplicationLag := float64(now - lastTransactionTime)
+ if transactionReplicationLag > meta.maxVReplicationTransactionLag {
+ meta.maxVReplicationTransactionLag = transactionReplicationLag
+ }
+ }
+ }
+
+ return nil
+}
+
+func updateWorkflowWithMetadata(workflow *vtctldatapb.Workflow, meta *workflowMetadata) {
+ workflow.Source = &vtctldatapb.Workflow_ReplicationLocation{
+ Keyspace: meta.sourceKeyspace,
+ Shards: sets.List(meta.sourceShards),
+ }
+
+ workflow.Target = &vtctldatapb.Workflow_ReplicationLocation{
+ Keyspace: meta.targetKeyspace,
+ Shards: sets.List(meta.targetShards),
+ }
+
+ workflow.MaxVReplicationLag = int64(meta.maxVReplicationLag)
+ workflow.MaxVReplicationTransactionLag = int64(meta.maxVReplicationTransactionLag)
+}
+
+func (wf *workflowFetcher) fetchStreamLogs(ctx context.Context, keyspace string, workflow *vtctldatapb.Workflow) {
+ span, ctx := trace.NewSpan(ctx, "workflowFetcher.workflow.fetchStreamLogs")
+ defer span.Finish()
+
+ span.Annotate("keyspace", keyspace)
+ span.Annotate("workflow", workflow.Name)
+
+ vreplIDs := make([]int64, 0, len(workflow.ShardStreams))
+ for _, shardStream := range maps.Values(workflow.ShardStreams) {
+ for _, stream := range shardStream.Streams {
+ vreplIDs = append(vreplIDs, stream.Id)
+ }
+ }
+ idsBV, err := sqltypes.BuildBindVariable(vreplIDs)
+ if err != nil {
+ return
+ }
+
+ query, err := sqlparser.ParseAndBind(vrepLogQuery, idsBV)
+ if err != nil {
+ return
+ }
+
+ vx := vexec.NewVExec(keyspace, workflow.Name, wf.ts, wf.tmc, wf.parser)
+ results, err := vx.QueryContext(ctx, query)
+ if err != nil {
+ // Note that we do not return here. If there are any query results
+ // in the map (i.e. some tablets returned successfully), we will
+ // still try to read log rows from them on a best-effort basis. But,
+ // we will also pre-emptively record the top-level fetch error on
+ // every stream in every shard in the workflow. Further processing
+ // below may override the error message for certain streams.
+ for _, streams := range workflow.ShardStreams {
+ for _, stream := range streams.Streams {
+ stream.LogFetchError = err.Error()
+ }
+ }
+ }
+
+ for target, p3qr := range results {
+ qr := sqltypes.Proto3ToResult(p3qr)
+ shardStreamKey := fmt.Sprintf("%s/%s", target.Shard, target.AliasString())
+
+ ss, ok := workflow.ShardStreams[shardStreamKey]
+ if !ok || ss == nil {
+ continue
+ }
+
+ streams := ss.Streams
+ streamIdx := 0
+ markErrors := func(err error) {
+ if streamIdx >= len(streams) {
+ return
+ }
+
+ streams[streamIdx].LogFetchError = err.Error()
+ }
+
+ for _, row := range qr.Named().Rows {
+ id, err := row["id"].ToCastInt64()
+ if err != nil {
+ markErrors(err)
+ continue
+ }
+
+ streamID, err := row["vrepl_id"].ToCastInt64()
+ if err != nil {
+ markErrors(err)
+ continue
+ }
+
+ typ := row["type"].ToString()
+ state := row["state"].ToString()
+ message := row["message"].ToString()
+
+ createdAt, err := time.Parse("2006-01-02 15:04:05", row["created_at"].ToString())
+ if err != nil {
+ markErrors(err)
+ continue
+ }
+
+ updatedAt, err := time.Parse("2006-01-02 15:04:05", row["updated_at"].ToString())
+ if err != nil {
+ markErrors(err)
+ continue
+ }
+
+ count, err := row["count"].ToCastInt64()
+ if err != nil {
+ markErrors(err)
+ continue
+ }
+
+ streamLog := &vtctldatapb.Workflow_Stream_Log{
+ Id: id,
+ StreamId: streamID,
+ Type: typ,
+ State: state,
+ CreatedAt: &vttimepb.Time{
+ Seconds: createdAt.Unix(),
+ },
+ UpdatedAt: &vttimepb.Time{
+ Seconds: updatedAt.Unix(),
+ },
+ Message: message,
+ Count: count,
+ }
+
+ // Earlier, in buildWorkflows, we sorted each ShardStreams
+ // slice by ascending id, and our _vt.vreplication_log query
+ // ordered by (stream_id ASC, id ASC), so we can walk the
+ // streams in index order in O(n) amortized over all the rows
+ // for this tablet.
+ for streamIdx < len(streams) {
+ stream := streams[streamIdx]
+ if stream.Id < streamLog.StreamId {
+ streamIdx++
+ continue
+ }
+
+ if stream.Id > streamLog.StreamId {
+ wf.logger.Warningf("Found stream log for nonexistent stream: %+v", streamLog)
+ // This can happen on manual/failed workflow cleanup so move to the next log.
+ break
+ }
+
+ // stream.Id == streamLog.StreamId
+ stream.Logs = append(stream.Logs, streamLog)
+ break
+ }
+ }
+ }
+}
+
+func (wf *workflowFetcher) forAllShards(
+ ctx context.Context,
+ keyspace string,
+ shards []string,
+ f func(ctx context.Context, shard *topo.ShardInfo) error,
+) error {
+ eg, egCtx := errgroup.WithContext(ctx)
+ for _, shard := range shards {
+ eg.Go(func() error {
+ si, err := wf.ts.GetShard(ctx, keyspace, shard)
+ if err != nil {
+ return err
+ }
+ if si.PrimaryAlias == nil {
+ return fmt.Errorf("%w %s/%s", vexec.ErrNoShardPrimary, keyspace, shard)
+ }
+
+ if err := f(egCtx, si); err != nil {
+ return err
+ }
+ return nil
+ })
+ }
+ if err := eg.Wait(); err != nil {
+ return err
+ }
+ return nil
+}
+
+func getStreamState(stream *vtctldatapb.Workflow_Stream, rstream *tabletmanagerdatapb.ReadVReplicationWorkflowResponse_Stream) string {
+ switch {
+ case strings.Contains(strings.ToLower(stream.Message), "error"):
+ return binlogdatapb.VReplicationWorkflowState_Error.String()
+ case stream.State == binlogdatapb.VReplicationWorkflowState_Running.String() && len(stream.CopyStates) > 0:
+ return binlogdatapb.VReplicationWorkflowState_Copying.String()
+ case stream.State == binlogdatapb.VReplicationWorkflowState_Running.String() && int64(time.Now().Second())-rstream.TimeUpdated.Seconds > 10:
+ return binlogdatapb.VReplicationWorkflowState_Lagging.String()
+ }
+ return rstream.State.String()
+}
diff --git a/go/vt/vtctl/workflow/workflows_test.go b/go/vt/vtctl/workflow/workflows_test.go
new file mode 100644
index 00000000000..2015c8d1b7c
--- /dev/null
+++ b/go/vt/vtctl/workflow/workflows_test.go
@@ -0,0 +1,260 @@
+/*
+Copyright 2024 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package workflow
+
+import (
+ "context"
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "vitess.io/vitess/go/sqltypes"
+ "vitess.io/vitess/go/vt/proto/binlogdata"
+ "vitess.io/vitess/go/vt/proto/vttime"
+ "vitess.io/vitess/go/vt/topo"
+
+ tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata"
+ topodatapb "vitess.io/vitess/go/vt/proto/topodata"
+ vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata"
+)
+
+func TestGetStreamState(t *testing.T) {
+ testCases := []struct {
+ name string
+ stream *vtctldatapb.Workflow_Stream
+ rstream *tabletmanagerdatapb.ReadVReplicationWorkflowResponse_Stream
+ want string
+ }{
+ {
+ name: "error state",
+ stream: &vtctldatapb.Workflow_Stream{
+ Message: "test error",
+ },
+ want: "Error",
+ },
+ {
+ name: "copying state",
+ stream: &vtctldatapb.Workflow_Stream{
+ State: "Running",
+ CopyStates: []*vtctldatapb.Workflow_Stream_CopyState{
+ {
+ Table: "table1",
+ },
+ },
+ },
+ want: "Copying",
+ },
+ {
+ name: "lagging state",
+ stream: &vtctldatapb.Workflow_Stream{
+ State: "Running",
+ },
+ rstream: &tabletmanagerdatapb.ReadVReplicationWorkflowResponse_Stream{
+ TimeUpdated: &vttime.Time{
+ Seconds: int64(time.Now().Second()) - 11,
+ },
+ },
+ want: "Lagging",
+ },
+ {
+ name: "non-running and error free",
+ stream: &vtctldatapb.Workflow_Stream{
+ State: "Stopped",
+ },
+ rstream: &tabletmanagerdatapb.ReadVReplicationWorkflowResponse_Stream{
+ State: binlogdata.VReplicationWorkflowState_Stopped,
+ },
+ want: "Stopped",
+ },
+ }
+
+ for _, tt := range testCases {
+ t.Run(tt.name, func(t *testing.T) {
+ state := getStreamState(tt.stream, tt.rstream)
+ assert.Equal(t, tt.want, state)
+ })
+ }
+}
+
+func TestGetWorkflowCopyStates(t *testing.T) {
+ ctx := context.Background()
+
+ sourceShards := []string{"-"}
+ targetShards := []string{"-"}
+
+ te := newTestMaterializerEnv(t, ctx, &vtctldatapb.MaterializeSettings{
+ SourceKeyspace: "source_keyspace",
+ TargetKeyspace: "target_keyspace",
+ Workflow: "test_workflow",
+ TableSettings: []*vtctldatapb.TableMaterializeSettings{
+ {
+ TargetTable: "table1",
+ SourceExpression: fmt.Sprintf("select * from %s", "table1"),
+ },
+ {
+ TargetTable: "table2",
+ SourceExpression: fmt.Sprintf("select * from %s", "table2"),
+ },
+ },
+ }, sourceShards, targetShards)
+
+ wf := workflowFetcher{
+ ts: te.ws.ts,
+ tmc: te.tmc,
+ }
+
+ tablet := &topodatapb.Tablet{
+ Alias: &topodatapb.TabletAlias{
+ Cell: "zone1",
+ Uid: 100,
+ },
+ }
+
+ query := "select vrepl_id, table_name, lastpk from _vt.copy_state where vrepl_id in (1) and id in (select max(id) from _vt.copy_state where vrepl_id in (1) group by vrepl_id, table_name)"
+ te.tmc.expectVRQuery(100, query, sqltypes.MakeTestResult(
+ sqltypes.MakeTestFields("vrepl_id|table_name|lastpk", "int64|varchar|varchar"),
+ "1|table1|2", "1|table2|1",
+ ))
+
+ copyStates, err := wf.getWorkflowCopyStates(ctx, &topo.TabletInfo{
+ Tablet: tablet,
+ }, []int32{1})
+ assert.NoError(t, err)
+ assert.Len(t, copyStates, 2)
+
+ state1 := &vtctldatapb.Workflow_Stream_CopyState{
+ Table: "table1",
+ LastPk: "2",
+ StreamId: 1,
+ }
+ state2 := &vtctldatapb.Workflow_Stream_CopyState{
+ Table: "table2",
+ LastPk: "1",
+ StreamId: 1,
+ }
+ assert.Contains(t, copyStates, state1)
+ assert.Contains(t, copyStates, state2)
+}
+
+func TestFetchCopyStatesByShardStream(t *testing.T) {
+ ctx := context.Background()
+
+ sourceShards := []string{"-"}
+ targetShards := []string{"-"}
+
+ te := newTestMaterializerEnv(t, ctx, &vtctldatapb.MaterializeSettings{
+ SourceKeyspace: "source_keyspace",
+ TargetKeyspace: "target_keyspace",
+ Workflow: "test_workflow",
+ TableSettings: []*vtctldatapb.TableMaterializeSettings{
+ {
+ TargetTable: "table1",
+ SourceExpression: fmt.Sprintf("select * from %s", "table1"),
+ },
+ {
+ TargetTable: "table2",
+ SourceExpression: fmt.Sprintf("select * from %s", "table2"),
+ },
+ },
+ }, sourceShards, targetShards)
+
+ wf := workflowFetcher{
+ ts: te.ws.ts,
+ tmc: te.tmc,
+ }
+
+ tablet := &topodatapb.Tablet{
+ Shard: "-80",
+ Alias: &topodatapb.TabletAlias{
+ Cell: "zone1",
+ Uid: 100,
+ },
+ }
+ tablet2 := &topodatapb.Tablet{
+ Shard: "80-",
+ Alias: &topodatapb.TabletAlias{
+ Cell: "zone1",
+ Uid: 101,
+ },
+ }
+
+ query := "select vrepl_id, table_name, lastpk from _vt.copy_state where vrepl_id in (1, 2) and id in (select max(id) from _vt.copy_state where vrepl_id in (1, 2) group by vrepl_id, table_name)"
+ te.tmc.expectVRQuery(100, query, sqltypes.MakeTestResult(
+ sqltypes.MakeTestFields("vrepl_id|table_name|lastpk", "int64|varchar|varchar"),
+ "1|table1|2", "2|table2|1", "2|table1|1",
+ ))
+
+ te.tmc.expectVRQuery(101, query, sqltypes.MakeTestResult(
+ sqltypes.MakeTestFields("vrepl_id|table_name|lastpk", "int64|varchar|varchar"),
+ "1|table1|2", "1|table2|1",
+ ))
+
+ ti := &topo.TabletInfo{
+ Tablet: tablet,
+ }
+ ti2 := &topo.TabletInfo{
+ Tablet: tablet2,
+ }
+
+ readVReplicationResponse := map[*topo.TabletInfo]*tabletmanagerdatapb.ReadVReplicationWorkflowsResponse{
+ ti: {
+ Workflows: []*tabletmanagerdatapb.ReadVReplicationWorkflowResponse{
+ {
+ Streams: []*tabletmanagerdatapb.ReadVReplicationWorkflowResponse_Stream{
+ {
+ Id: 1,
+ }, {
+ Id: 2,
+ },
+ },
+ },
+ },
+ },
+ ti2: {
+ Workflows: []*tabletmanagerdatapb.ReadVReplicationWorkflowResponse{
+ {
+ Streams: []*tabletmanagerdatapb.ReadVReplicationWorkflowResponse_Stream{
+ {
+ Id: 1,
+ }, {
+ Id: 2,
+ },
+ },
+ },
+ },
+ },
+ }
+ copyStatesByStreamId, err := wf.fetchCopyStatesByShardStream(ctx, readVReplicationResponse)
+ assert.NoError(t, err)
+
+ copyStates1 := copyStatesByStreamId["-80/1"]
+ copyStates2 := copyStatesByStreamId["-80/2"]
+ copyStates3 := copyStatesByStreamId["80-/1"]
+
+ require.NotNil(t, copyStates1)
+ require.NotNil(t, copyStates2)
+ require.NotNil(t, copyStates3)
+
+ assert.Len(t, copyStates1, 1)
+ assert.Len(t, copyStates2, 2)
+ assert.Len(t, copyStates3, 2)
+
+ assert.Nil(t, copyStatesByStreamId["80-/2"])
+}
diff --git a/go/vt/vtexplain/vtexplain_vtgate.go b/go/vt/vtexplain/vtexplain_vtgate.go
index d511e2d2ea0..f9ae8be3820 100644
--- a/go/vt/vtexplain/vtexplain_vtgate.go
+++ b/go/vt/vtexplain/vtexplain_vtgate.go
@@ -38,6 +38,7 @@ import (
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate"
"vitess.io/vitess/go/vt/vtgate/engine"
+ econtext "vitess.io/vitess/go/vt/vtgate/executorcontext"
"vitess.io/vitess/go/vt/vtgate/logstats"
"vitess.io/vitess/go/vt/vtgate/vindexes"
"vitess.io/vitess/go/vt/vttablet/queryservice"
@@ -235,7 +236,7 @@ func (vte *VTExplain) vtgateExecute(sql string) ([]*engine.Plan, map[string]*Tab
// This will ensure that the commit/rollback order is predictable.
vte.sortShardSession()
- _, err := vte.vtgateExecutor.Execute(context.Background(), nil, "VtexplainExecute", vtgate.NewSafeSession(vte.vtgateSession), sql, nil)
+ _, err := vte.vtgateExecutor.Execute(context.Background(), nil, "VtexplainExecute", econtext.NewSafeSession(vte.vtgateSession), sql, nil)
if err != nil {
for _, tc := range vte.explainTopo.TabletConns {
tc.tabletQueries = nil
diff --git a/go/vt/vtexplain/vtexplain_vttablet.go b/go/vt/vtexplain/vtexplain_vttablet.go
index 38a3ca7bbb3..65cd1a96181 100644
--- a/go/vt/vtexplain/vtexplain_vttablet.go
+++ b/go/vt/vtexplain/vtexplain_vttablet.go
@@ -22,6 +22,7 @@ import (
"reflect"
"strings"
"sync"
+ "time"
"vitess.io/vitess/go/stats"
"vitess.io/vitess/go/vt/sidecardb"
@@ -113,8 +114,7 @@ func (vte *VTExplain) newTablet(ctx context.Context, env *vtenv.Environment, opt
config := tabletenv.NewCurrentConfig()
config.TrackSchemaVersions = false
if opts.ExecutionMode == ModeTwoPC {
- config.TwoPCAbandonAge = 1.0
- config.TwoPCEnable = true
+ config.TwoPCAbandonAge = 1 * time.Second
}
config.EnableOnlineDDL = false
config.EnableTableGC = false
diff --git a/go/vt/vtgate/autocommit_test.go b/go/vt/vtgate/autocommit_test.go
index 1ba99c01ef2..2e65cefbabe 100644
--- a/go/vt/vtgate/autocommit_test.go
+++ b/go/vt/vtgate/autocommit_test.go
@@ -23,10 +23,10 @@ import (
"github.com/stretchr/testify/require"
"vitess.io/vitess/go/sqltypes"
-
querypb "vitess.io/vitess/go/vt/proto/query"
vtgatepb "vitess.io/vitess/go/vt/proto/vtgate"
vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
+ econtext "vitess.io/vitess/go/vt/vtgate/executorcontext"
)
// This file contains tests for all the autocommit code paths
@@ -382,7 +382,7 @@ func TestAutocommitTransactionStarted(t *testing.T) {
// single shard query - no savepoint needed
sql := "update `user` set a = 2 where id = 1"
- _, err := executor.Execute(context.Background(), nil, "TestExecute", NewSafeSession(session), sql, map[string]*querypb.BindVariable{})
+ _, err := executor.Execute(context.Background(), nil, "TestExecute", econtext.NewSafeSession(session), sql, map[string]*querypb.BindVariable{})
require.NoError(t, err)
require.Len(t, sbc1.Queries, 1)
require.Equal(t, sql, sbc1.Queries[0].Sql)
@@ -394,7 +394,7 @@ func TestAutocommitTransactionStarted(t *testing.T) {
// multi shard query - savepoint needed
sql = "update `user` set a = 2 where id in (1, 4)"
expectedSql := "update `user` set a = 2 where id in ::__vals"
- _, err = executor.Execute(context.Background(), nil, "TestExecute", NewSafeSession(session), sql, map[string]*querypb.BindVariable{})
+ _, err = executor.Execute(context.Background(), nil, "TestExecute", econtext.NewSafeSession(session), sql, map[string]*querypb.BindVariable{})
require.NoError(t, err)
require.Len(t, sbc1.Queries, 2)
require.Contains(t, sbc1.Queries[0].Sql, "savepoint")
@@ -413,7 +413,7 @@ func TestAutocommitDirectTarget(t *testing.T) {
}
sql := "insert into `simple`(val) values ('val')"
- _, err := executor.Execute(context.Background(), nil, "TestExecute", NewSafeSession(session), sql, map[string]*querypb.BindVariable{})
+ _, err := executor.Execute(context.Background(), nil, "TestExecute", econtext.NewSafeSession(session), sql, map[string]*querypb.BindVariable{})
require.NoError(t, err)
assertQueries(t, sbclookup, []*querypb.BoundQuery{{
@@ -434,7 +434,7 @@ func TestAutocommitDirectRangeTarget(t *testing.T) {
}
sql := "delete from sharded_user_msgs limit 1000"
- _, err := executor.Execute(context.Background(), nil, "TestExecute", NewSafeSession(session), sql, map[string]*querypb.BindVariable{})
+ _, err := executor.Execute(context.Background(), nil, "TestExecute", econtext.NewSafeSession(session), sql, map[string]*querypb.BindVariable{})
require.NoError(t, err)
assertQueries(t, sbc1, []*querypb.BoundQuery{{
@@ -451,5 +451,5 @@ func autocommitExec(executor *Executor, sql string) (*sqltypes.Result, error) {
TransactionMode: vtgatepb.TransactionMode_MULTI,
}
- return executor.Execute(context.Background(), nil, "TestExecute", NewSafeSession(session), sql, map[string]*querypb.BindVariable{})
+ return executor.Execute(context.Background(), nil, "TestExecute", econtext.NewSafeSession(session), sql, map[string]*querypb.BindVariable{})
}
diff --git a/go/vt/vtgate/buffer/buffer.go b/go/vt/vtgate/buffer/buffer.go
index dec83e2c78c..eb937a6361c 100644
--- a/go/vt/vtgate/buffer/buffer.go
+++ b/go/vt/vtgate/buffer/buffer.go
@@ -208,6 +208,7 @@ func (b *Buffer) WaitForFailoverEnd(ctx context.Context, keyspace, shard string,
}
func (b *Buffer) HandleKeyspaceEvent(ksevent *discovery.KeyspaceEvent) {
+ log.Infof("Keyspace Event received for keyspace %v", ksevent.Keyspace)
for _, shard := range ksevent.Shards {
sb := b.getOrCreateBuffer(shard.Target.Keyspace, shard.Target.Shard)
if sb != nil {
diff --git a/go/vt/vtgate/buffer/shard_buffer.go b/go/vt/vtgate/buffer/shard_buffer.go
index e1f02bb7f0e..66c6ee702e6 100644
--- a/go/vt/vtgate/buffer/shard_buffer.go
+++ b/go/vt/vtgate/buffer/shard_buffer.go
@@ -286,7 +286,7 @@ func (sb *shardBuffer) startBufferingLocked(ctx context.Context, kev *discovery.
msg = "Dry-run: Would have started buffering"
}
starts.Add(sb.statsKey, 1)
- log.Infof("%v for shard: %s (window: %v, size: %v, max failover duration: %v) (A failover was detected by this seen error: %v.)",
+ log.V(2).Infof("%v for shard: %s (window: %v, size: %v, max failover duration: %v) (A failover was detected by this seen error: %v.)",
msg,
topoproto.KeyspaceShardString(sb.keyspace, sb.shard),
sb.buf.config.Window,
@@ -488,7 +488,7 @@ func (sb *shardBuffer) recordKeyspaceEvent(alias *topodatapb.TabletAlias, stillS
sb.mu.Lock()
defer sb.mu.Unlock()
- log.Infof("disruption in shard %s/%s resolved (serving: %v), movetable state %#v",
+ log.V(2).Infof("disruption in shard %s/%s resolved (serving: %v), movetable state %#v",
sb.keyspace, sb.shard, stillServing, keyspaceEvent.MoveTablesState)
if !topoproto.TabletAliasEqual(alias, sb.currentPrimary) {
@@ -562,7 +562,7 @@ func (sb *shardBuffer) stopBufferingLocked(reason stopReason, details string) {
if sb.mode == bufferModeDryRun {
msg = "Dry-run: Would have stopped buffering"
}
- log.Infof("%v for shard: %s after: %.1f seconds due to: %v. Draining %d buffered requests now.",
+ log.V(2).Infof("%v for shard: %s after: %.1f seconds due to: %v. Draining %d buffered requests now.",
msg, topoproto.KeyspaceShardString(sb.keyspace, sb.shard), d.Seconds(), details, len(q))
var clientEntryError error
@@ -622,7 +622,7 @@ func (sb *shardBuffer) drain(q []*entry, err error) {
wg.Wait()
d := sb.timeNow().Sub(start)
- log.Infof("Draining finished for shard: %s Took: %v for: %d requests.", topoproto.KeyspaceShardString(sb.keyspace, sb.shard), d, len(q))
+ log.V(2).Infof("Draining finished for shard: %s Took: %v for: %d requests.", topoproto.KeyspaceShardString(sb.keyspace, sb.shard), d, len(q))
requestsDrained.Add(sb.statsKey, int64(len(q)))
// Draining is done. Change state from "draining" to "idle".
diff --git a/go/vt/vtgate/debugenv.go b/go/vt/vtgate/debugenv.go
index 4fa989c69a3..7213353432d 100644
--- a/go/vt/vtgate/debugenv.go
+++ b/go/vt/vtgate/debugenv.go
@@ -22,9 +22,10 @@ import (
"html"
"net/http"
"strconv"
- "text/template"
"time"
+ "github.com/google/safehtml/template"
+
"vitess.io/vitess/go/acl"
"vitess.io/vitess/go/vt/discovery"
"vitess.io/vitess/go/vt/log"
diff --git a/go/vt/vtgate/dynamicconfig/config.go b/go/vt/vtgate/dynamicconfig/config.go
new file mode 100644
index 00000000000..5bb1d991eae
--- /dev/null
+++ b/go/vt/vtgate/dynamicconfig/config.go
@@ -0,0 +1,6 @@
+package dynamicconfig
+
+type DDL interface {
+ OnlineEnabled() bool
+ DirectEnabled() bool
+}
diff --git a/go/vt/vtgate/engine/cached_size.go b/go/vt/vtgate/engine/cached_size.go
index 4c0d1009bd1..c764a6aab08 100644
--- a/go/vt/vtgate/engine/cached_size.go
+++ b/go/vt/vtgate/engine/cached_size.go
@@ -131,7 +131,7 @@ func (cached *DDL) CachedSize(alloc bool) int64 {
}
size := int64(0)
if alloc {
- size += int64(64)
+ size += int64(80)
}
// field Keyspace *vitess.io/vitess/go/vt/vtgate/vindexes.Keyspace
size += cached.Keyspace.CachedSize(true)
@@ -145,6 +145,10 @@ func (cached *DDL) CachedSize(alloc bool) int64 {
size += cached.NormalDDL.CachedSize(true)
// field OnlineDDL *vitess.io/vitess/go/vt/vtgate/engine.OnlineDDL
size += cached.OnlineDDL.CachedSize(true)
+ // field Config vitess.io/vitess/go/vt/vtgate/dynamicconfig.DDL
+ if cc, ok := cached.Config.(cachedObject); ok {
+ size += cc.CachedSize(true)
+ }
return size
}
func (cached *DML) CachedSize(alloc bool) int64 {
diff --git a/go/vt/vtgate/engine/ddl.go b/go/vt/vtgate/engine/ddl.go
index cfdaa5866dc..d7e17eb4f4f 100644
--- a/go/vt/vtgate/engine/ddl.go
+++ b/go/vt/vtgate/engine/ddl.go
@@ -25,6 +25,7 @@ import (
"vitess.io/vitess/go/vt/schema"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vterrors"
+ "vitess.io/vitess/go/vt/vtgate/dynamicconfig"
"vitess.io/vitess/go/vt/vtgate/vindexes"
)
@@ -42,8 +43,7 @@ type DDL struct {
NormalDDL *Send
OnlineDDL *OnlineDDL
- DirectDDLEnabled bool
- OnlineDDLEnabled bool
+ Config dynamicconfig.DDL
CreateTempTable bool
}
@@ -107,12 +107,12 @@ func (ddl *DDL) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[st
switch {
case ddl.isOnlineSchemaDDL():
- if !ddl.OnlineDDLEnabled {
+ if !ddl.Config.OnlineEnabled() {
return nil, schema.ErrOnlineDDLDisabled
}
return vcursor.ExecutePrimitive(ctx, ddl.OnlineDDL, bindVars, wantfields)
default: // non online-ddl
- if !ddl.DirectDDLEnabled {
+ if !ddl.Config.DirectEnabled() {
return nil, schema.ErrDirectDDLDisabled
}
return vcursor.ExecutePrimitive(ctx, ddl.NormalDDL, bindVars, wantfields)
diff --git a/go/vt/vtgate/engine/ddl_test.go b/go/vt/vtgate/engine/ddl_test.go
index 3f7ccb75f70..1d52089bf39 100644
--- a/go/vt/vtgate/engine/ddl_test.go
+++ b/go/vt/vtgate/engine/ddl_test.go
@@ -27,13 +27,23 @@ import (
"vitess.io/vitess/go/vt/vtgate/vindexes"
)
+type ddlConfig struct{}
+
+func (ddlConfig) DirectEnabled() bool {
+ return true
+}
+
+func (ddlConfig) OnlineEnabled() bool {
+ return true
+}
+
func TestDDL(t *testing.T) {
ddl := &DDL{
DDL: &sqlparser.CreateTable{
Table: sqlparser.NewTableName("a"),
},
- DirectDDLEnabled: true,
- OnlineDDL: &OnlineDDL{},
+ Config: ddlConfig{},
+ OnlineDDL: &OnlineDDL{},
NormalDDL: &Send{
Keyspace: &vindexes.Keyspace{
Name: "ks",
diff --git a/go/vt/vtgate/engine/plan_description.go b/go/vt/vtgate/engine/plan_description.go
index dfcad4e5e6b..e8e763c1ee1 100644
--- a/go/vt/vtgate/engine/plan_description.go
+++ b/go/vt/vtgate/engine/plan_description.go
@@ -126,6 +126,133 @@ func (pd PrimitiveDescription) MarshalJSON() ([]byte, error) {
return buf.Bytes(), nil
}
+// PrimitiveDescriptionFromString creates primitive description out of a data string.
+func PrimitiveDescriptionFromString(data string) (pd PrimitiveDescription, err error) {
+ resultMap := make(map[string]any)
+ err = json.Unmarshal([]byte(data), &resultMap)
+ if err != nil {
+ return PrimitiveDescription{}, err
+ }
+ return PrimitiveDescriptionFromMap(resultMap)
+}
+
+// PrimitiveDescriptionFromMap populates the fields of a PrimitiveDescription from a map representation.
+func PrimitiveDescriptionFromMap(data map[string]any) (pd PrimitiveDescription, err error) {
+ if opType, isPresent := data["OperatorType"]; isPresent {
+ pd.OperatorType = opType.(string)
+ }
+ if variant, isPresent := data["Variant"]; isPresent {
+ pd.Variant = variant.(string)
+ }
+ if ksMap, isPresent := data["Keyspace"]; isPresent {
+ ksMap := ksMap.(map[string]any)
+ pd.Keyspace = &vindexes.Keyspace{
+ Name: ksMap["Name"].(string),
+ Sharded: ksMap["Sharded"].(bool),
+ }
+ }
+ if ttt, isPresent := data["TargetTabletType"]; isPresent {
+ pd.TargetTabletType = topodatapb.TabletType(ttt.(int))
+ }
+ if other, isPresent := data["Other"]; isPresent {
+ pd.Other = other.(map[string]any)
+ }
+ if inpName, isPresent := data["InputName"]; isPresent {
+ pd.InputName = inpName.(string)
+ }
+ if avgRows, isPresent := data["AvgNumberOfRows"]; isPresent {
+ pd.RowsReceived = RowsReceived{
+ int(avgRows.(float64)),
+ }
+ }
+ if sq, isPresent := data["ShardsQueried"]; isPresent {
+ sq := int(sq.(float64))
+ pd.ShardsQueried = (*ShardsQueried)(&sq)
+ }
+ if inputs, isPresent := data["Inputs"]; isPresent {
+ inputs := inputs.([]any)
+ for _, input := range inputs {
+ inputMap := input.(map[string]any)
+ inp, err := PrimitiveDescriptionFromMap(inputMap)
+ if err != nil {
+ return PrimitiveDescription{}, err
+ }
+ pd.Inputs = append(pd.Inputs, inp)
+ }
+ }
+ return pd, nil
+}
+
+// WalkPrimitiveDescription walks the primitive description.
+func WalkPrimitiveDescription(pd PrimitiveDescription, f func(PrimitiveDescription)) {
+ f(pd)
+ for _, child := range pd.Inputs {
+ WalkPrimitiveDescription(child, f)
+ }
+}
+
+func (pd PrimitiveDescription) Equals(other PrimitiveDescription) string {
+ if pd.Variant != other.Variant {
+ return fmt.Sprintf("Variant: %v != %v", pd.Variant, other.Variant)
+ }
+
+ if pd.OperatorType != other.OperatorType {
+ return fmt.Sprintf("OperatorType: %v != %v", pd.OperatorType, other.OperatorType)
+ }
+
+ // TODO (harshit): enable this to compare keyspace as well
+ // switch {
+ // case pd.Keyspace == nil && other.Keyspace == nil:
+ // // do nothing
+ // case pd.Keyspace != nil && other.Keyspace != nil:
+ // if pd.Keyspace.Name != other.Keyspace.Name {
+ // return fmt.Sprintf("Keyspace.Name: %v != %v", pd.Keyspace.Name, other.Keyspace.Name)
+ // }
+ // default:
+ // return "Keyspace is nil in one of the descriptions"
+ // }
+
+ switch {
+ case pd.TargetDestination == nil && other.TargetDestination == nil:
+ // do nothing
+ case pd.TargetDestination != nil && other.TargetDestination != nil:
+ if pd.TargetDestination.String() != other.TargetDestination.String() {
+ return fmt.Sprintf("TargetDestination: %v != %v", pd.TargetDestination, other.TargetDestination)
+ }
+ default:
+ return "TargetDestination is nil in one of the descriptions"
+ }
+
+ if pd.TargetTabletType != other.TargetTabletType {
+ return fmt.Sprintf("TargetTabletType: %v != %v", pd.TargetTabletType, other.TargetTabletType)
+ }
+
+ switch {
+ case pd.Other == nil && other.Other == nil:
+ // do nothing
+ case pd.Other != nil && other.Other != nil:
+ if len(pd.Other) != len(other.Other) {
+ return fmt.Sprintf("Other length did not match: %v != %v", pd.Other, other.Other)
+ }
+ for ky, val := range pd.Other {
+ if other.Other[ky] != val {
+ return fmt.Sprintf("Other[%v]: %v != %v", ky, val, other.Other[ky])
+ }
+ }
+ default:
+ return "Other is nil in one of the descriptions"
+ }
+ if len(pd.Inputs) != len(other.Inputs) {
+ return fmt.Sprintf("Inputs length did not match: %v != %v", len(pd.Inputs), len(other.Inputs))
+ }
+ for idx, input := range pd.Inputs {
+ if diff := input.Equals(other.Inputs[idx]); diff != "" {
+ return diff
+ }
+ }
+ return ""
+}
+
func average(nums []int) float64 {
total := 0
for _, num := range nums {
diff --git a/go/vt/vtgate/evalengine/eval_result.go b/go/vt/vtgate/evalengine/eval_result.go
index d9916af03be..5c1973d8eb1 100644
--- a/go/vt/vtgate/evalengine/eval_result.go
+++ b/go/vt/vtgate/evalengine/eval_result.go
@@ -62,6 +62,7 @@ func (er EvalResult) String() string {
// TupleValues allows for retrieval of the value we expose for public consumption
func (er EvalResult) TupleValues() []sqltypes.Value {
+ // TODO: Make this collation-aware
switch v := er.v.(type) {
case *evalTuple:
result := make([]sqltypes.Value, 0, len(v.t))
diff --git a/go/vt/vtgate/executor.go b/go/vt/vtgate/executor.go
index 928f42fca30..0bb47361f55 100644
--- a/go/vt/vtgate/executor.go
+++ b/go/vt/vtgate/executor.go
@@ -30,6 +30,8 @@ import (
"github.com/spf13/pflag"
+ vschemapb "vitess.io/vitess/go/vt/proto/vschema"
+
"vitess.io/vitess/go/acl"
"vitess.io/vitess/go/cache/theine"
"vitess.io/vitess/go/mysql/capabilities"
@@ -57,6 +59,7 @@ import (
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/engine"
"vitess.io/vitess/go/vt/vtgate/evalengine"
+ econtext "vitess.io/vitess/go/vt/vtgate/executorcontext"
"vitess.io/vitess/go/vt/vtgate/logstats"
"vitess.io/vitess/go/vt/vtgate/planbuilder"
"vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
@@ -67,7 +70,6 @@ import (
)
var (
- errNoKeyspace = vterrors.VT09005()
defaultTabletType = topodatapb.TabletType_PRIMARY
// TODO: @rafael - These two counters should be deprecated in favor of the ByTable ones in v17+. They are kept for now for backwards compatibility.
@@ -111,7 +113,6 @@ type Executor struct {
resolver *Resolver
scatterConn *ScatterConn
txConn *TxConn
- pv plancontext.PlannerVersion
mu sync.Mutex
vschema *vindexes.VSchema
@@ -121,8 +122,7 @@ type Executor struct {
plans *PlanCache
epoch atomic.Uint32
- normalize bool
- warnShardedOnly bool
+ normalize bool
vm *VSchemaManager
schemaTracker SchemaInfo
@@ -135,6 +135,8 @@ type Executor struct {
warmingReadsPercent int
warmingReadsChannel chan bool
+
+ vConfig econtext.VCursorConfig
}
var executorOnce sync.Once
@@ -175,17 +177,16 @@ func NewExecutor(
scatterConn: resolver.scatterConn,
txConn: resolver.scatterConn.txConn,
normalize: normalize,
- warnShardedOnly: warnOnShardedOnly,
streamSize: streamSize,
schemaTracker: schemaTracker,
allowScatter: !noScatter,
- pv: pv,
plans: plans,
warmingReadsPercent: warmingReadsPercent,
warmingReadsChannel: make(chan bool, warmingReadsConcurrency),
}
+ // setting the vcursor config.
+ e.initVConfig(warnOnShardedOnly, pv)
- vschemaacl.Init()
// we subscribe to update from the VSchemaManager
e.vm = &VSchemaManager{
subscriber: e.SaveVSchema,
@@ -223,7 +224,7 @@ func NewExecutor(
}
// Execute executes a non-streaming query.
-func (e *Executor) Execute(ctx context.Context, mysqlCtx vtgateservice.MySQLConnection, method string, safeSession *SafeSession, sql string, bindVars map[string]*querypb.BindVariable) (result *sqltypes.Result, err error) {
+func (e *Executor) Execute(ctx context.Context, mysqlCtx vtgateservice.MySQLConnection, method string, safeSession *econtext.SafeSession, sql string, bindVars map[string]*querypb.BindVariable) (result *sqltypes.Result, err error) {
span, ctx := trace.NewSpan(ctx, "executor.Execute")
span.Annotate("method", method)
trace.AnnotateSQL(span, sqlparser.Preview(sql))
@@ -286,7 +287,7 @@ func (e *Executor) StreamExecute(
ctx context.Context,
mysqlCtx vtgateservice.MySQLConnection,
method string,
- safeSession *SafeSession,
+ safeSession *econtext.SafeSession,
sql string,
bindVars map[string]*querypb.BindVariable,
callback func(*sqltypes.Result) error,
@@ -300,7 +301,7 @@ func (e *Executor) StreamExecute(
srr := &streaminResultReceiver{callback: callback}
var err error
- resultHandler := func(ctx context.Context, plan *engine.Plan, vc *vcursorImpl, bindVars map[string]*querypb.BindVariable, execStart time.Time) error {
+ resultHandler := func(ctx context.Context, plan *engine.Plan, vc *econtext.VCursorImpl, bindVars map[string]*querypb.BindVariable, execStart time.Time) error {
var seenResults atomic.Bool
var resultMu sync.Mutex
result := &sqltypes.Result{}
@@ -368,7 +369,7 @@ func (e *Executor) StreamExecute(
logStats.TablesUsed = plan.TablesUsed
logStats.TabletType = vc.TabletType().String()
logStats.ExecuteTime = time.Since(execStart)
- logStats.ActiveKeyspace = vc.keyspace
+ logStats.ActiveKeyspace = vc.GetKeyspace()
e.updateQueryCounts(plan.Instructions.RouteType(), plan.Instructions.GetKeyspaceName(), plan.Instructions.GetTableName(), int64(logStats.ShardQueries))
@@ -411,12 +412,12 @@ func canReturnRows(stmtType sqlparser.StatementType) bool {
}
}
-func saveSessionStats(safeSession *SafeSession, stmtType sqlparser.StatementType, rowsAffected, insertID uint64, rowsReturned int, err error) {
+func saveSessionStats(safeSession *econtext.SafeSession, stmtType sqlparser.StatementType, rowsAffected, insertID uint64, rowsReturned int, err error) {
safeSession.RowCount = -1
if err != nil {
return
}
- if !safeSession.foundRowsHandled {
+ if !safeSession.IsFoundRowsHandled() {
safeSession.FoundRows = uint64(rowsReturned)
}
if insertID > 0 {
@@ -430,11 +431,11 @@ func saveSessionStats(safeSession *SafeSession, stmtType sqlparser.StatementType
}
}
-func (e *Executor) execute(ctx context.Context, mysqlCtx vtgateservice.MySQLConnection, safeSession *SafeSession, sql string, bindVars map[string]*querypb.BindVariable, logStats *logstats.LogStats) (sqlparser.StatementType, *sqltypes.Result, error) {
+func (e *Executor) execute(ctx context.Context, mysqlCtx vtgateservice.MySQLConnection, safeSession *econtext.SafeSession, sql string, bindVars map[string]*querypb.BindVariable, logStats *logstats.LogStats) (sqlparser.StatementType, *sqltypes.Result, error) {
var err error
var qr *sqltypes.Result
var stmtType sqlparser.StatementType
- err = e.newExecute(ctx, mysqlCtx, safeSession, sql, bindVars, logStats, func(ctx context.Context, plan *engine.Plan, vc *vcursorImpl, bindVars map[string]*querypb.BindVariable, time time.Time) error {
+ err = e.newExecute(ctx, mysqlCtx, safeSession, sql, bindVars, logStats, func(ctx context.Context, plan *engine.Plan, vc *econtext.VCursorImpl, bindVars map[string]*querypb.BindVariable, time time.Time) error {
stmtType = plan.Type
qr, err = e.executePlan(ctx, safeSession, plan, vc, bindVars, logStats, time)
return err
@@ -448,7 +449,7 @@ func (e *Executor) execute(ctx context.Context, mysqlCtx vtgateservice.MySQLConn
}
// addNeededBindVars adds bind vars that are needed by the plan
-func (e *Executor) addNeededBindVars(vcursor *vcursorImpl, bindVarNeeds *sqlparser.BindVarNeeds, bindVars map[string]*querypb.BindVariable, session *SafeSession) error {
+func (e *Executor) addNeededBindVars(vcursor *econtext.VCursorImpl, bindVarNeeds *sqlparser.BindVarNeeds, bindVars map[string]*querypb.BindVariable, session *econtext.SafeSession) error {
for _, funcName := range bindVarNeeds.NeedFunctionResult {
switch funcName {
case sqlparser.DBVarName:
@@ -541,7 +542,7 @@ func (e *Executor) addNeededBindVars(vcursor *vcursorImpl, bindVarNeeds *sqlpars
}
evalExpr, err := evalengine.Translate(expr, &evalengine.Config{
- Collation: vcursor.collation,
+ Collation: vcursor.ConnCollation(),
Environment: e.env,
SQLMode: evalengine.ParseSQLMode(vcursor.SQLMode()),
})
@@ -552,7 +553,7 @@ func (e *Executor) addNeededBindVars(vcursor *vcursorImpl, bindVarNeeds *sqlpars
if err != nil {
return err
}
- bindVars[key] = sqltypes.ValueBindVariable(evaluated.Value(vcursor.collation))
+ bindVars[key] = sqltypes.ValueBindVariable(evaluated.Value(vcursor.ConnCollation()))
}
}
}
@@ -572,21 +573,21 @@ func (e *Executor) addNeededBindVars(vcursor *vcursorImpl, bindVarNeeds *sqlpars
return nil
}
-func ifOptionsExist(session *SafeSession, f func(*querypb.ExecuteOptions)) {
+func ifOptionsExist(session *econtext.SafeSession, f func(*querypb.ExecuteOptions)) {
options := session.GetOptions()
if options != nil {
f(options)
}
}
-func ifReadAfterWriteExist(session *SafeSession, f func(*vtgatepb.ReadAfterWrite)) {
+func ifReadAfterWriteExist(session *econtext.SafeSession, f func(*vtgatepb.ReadAfterWrite)) {
raw := session.ReadAfterWrite
if raw != nil {
f(raw)
}
}
-func (e *Executor) handleBegin(ctx context.Context, safeSession *SafeSession, logStats *logstats.LogStats, stmt sqlparser.Statement) (*sqltypes.Result, error) {
+func (e *Executor) handleBegin(ctx context.Context, safeSession *econtext.SafeSession, logStats *logstats.LogStats, stmt sqlparser.Statement) (*sqltypes.Result, error) {
execStart := time.Now()
logStats.PlanTime = execStart.Sub(logStats.StartTime)
@@ -599,7 +600,7 @@ func (e *Executor) handleBegin(ctx context.Context, safeSession *SafeSession, lo
return &sqltypes.Result{}, err
}
-func (e *Executor) handleCommit(ctx context.Context, safeSession *SafeSession, logStats *logstats.LogStats) (*sqltypes.Result, error) {
+func (e *Executor) handleCommit(ctx context.Context, safeSession *econtext.SafeSession, logStats *logstats.LogStats) (*sqltypes.Result, error) {
execStart := time.Now()
logStats.PlanTime = execStart.Sub(logStats.StartTime)
logStats.ShardQueries = uint64(len(safeSession.ShardSessions))
@@ -611,11 +612,11 @@ func (e *Executor) handleCommit(ctx context.Context, safeSession *SafeSession, l
}
// Commit commits the existing transactions
-func (e *Executor) Commit(ctx context.Context, safeSession *SafeSession) error {
+func (e *Executor) Commit(ctx context.Context, safeSession *econtext.SafeSession) error {
return e.txConn.Commit(ctx, safeSession)
}
-func (e *Executor) handleRollback(ctx context.Context, safeSession *SafeSession, logStats *logstats.LogStats) (*sqltypes.Result, error) {
+func (e *Executor) handleRollback(ctx context.Context, safeSession *econtext.SafeSession, logStats *logstats.LogStats) (*sqltypes.Result, error) {
execStart := time.Now()
logStats.PlanTime = execStart.Sub(logStats.StartTime)
logStats.ShardQueries = uint64(len(safeSession.ShardSessions))
@@ -625,7 +626,7 @@ func (e *Executor) handleRollback(ctx context.Context, safeSession *SafeSession,
return &sqltypes.Result{}, err
}
-func (e *Executor) handleSavepoint(ctx context.Context, safeSession *SafeSession, sql string, planType string, logStats *logstats.LogStats, nonTxResponse func(query string) (*sqltypes.Result, error), ignoreMaxMemoryRows bool) (*sqltypes.Result, error) {
+func (e *Executor) handleSavepoint(ctx context.Context, safeSession *econtext.SafeSession, sql string, planType string, logStats *logstats.LogStats, nonTxResponse func(query string) (*sqltypes.Result, error), ignoreMaxMemoryRows bool) (*sqltypes.Result, error) {
execStart := time.Now()
logStats.PlanTime = execStart.Sub(logStats.StartTime)
logStats.ShardQueries = uint64(len(safeSession.ShardSessions))
@@ -637,7 +638,7 @@ func (e *Executor) handleSavepoint(ctx context.Context, safeSession *SafeSession
// If no transaction exists on any of the shard sessions,
// then savepoint does not need to be executed, it will be only stored in the session
// and later will be executed when a transaction is started.
- if !safeSession.isTxOpen() {
+ if !safeSession.IsTxOpen() {
if safeSession.InTransaction() {
// Storing, as this needs to be executed just after starting transaction on the shard.
safeSession.StoreSavepoint(sql)
@@ -645,7 +646,7 @@ func (e *Executor) handleSavepoint(ctx context.Context, safeSession *SafeSession
}
return nonTxResponse(sql)
}
- orig := safeSession.commitOrder
+ orig := safeSession.GetCommitOrder()
qr, err := e.executeSPInAllSessions(ctx, safeSession, sql, ignoreMaxMemoryRows)
safeSession.SetCommitOrder(orig)
if err != nil {
@@ -657,7 +658,7 @@ func (e *Executor) handleSavepoint(ctx context.Context, safeSession *SafeSession
// executeSPInAllSessions function executes the savepoint query in all open shard sessions (pre, normal and post)
// which has non-zero transaction id (i.e. an open transaction on the shard connection).
-func (e *Executor) executeSPInAllSessions(ctx context.Context, safeSession *SafeSession, sql string, ignoreMaxMemoryRows bool) (*sqltypes.Result, error) {
+func (e *Executor) executeSPInAllSessions(ctx context.Context, safeSession *econtext.SafeSession, sql string, ignoreMaxMemoryRows bool) (*sqltypes.Result, error) {
var qr *sqltypes.Result
var errs []error
for _, co := range []vtgatepb.CommitOrder{vtgatepb.CommitOrder_PRE, vtgatepb.CommitOrder_NORMAL, vtgatepb.CommitOrder_POST} {
@@ -665,7 +666,7 @@ func (e *Executor) executeSPInAllSessions(ctx context.Context, safeSession *Safe
var rss []*srvtopo.ResolvedShard
var queries []*querypb.BoundQuery
- for _, shardSession := range safeSession.getSessions() {
+ for _, shardSession := range safeSession.GetSessions() {
// This will avoid executing savepoint on reserved connections
// which has no open transaction.
if shardSession.TransactionId == 0 {
@@ -718,11 +719,11 @@ func (e *Executor) handleKill(ctx context.Context, mysqlCtx vtgateservice.MySQLC
// CloseSession releases the current connection, which rollbacks open transactions and closes reserved connections.
// It is called then the MySQL servers closes the connection to its client.
-func (e *Executor) CloseSession(ctx context.Context, safeSession *SafeSession) error {
+func (e *Executor) CloseSession(ctx context.Context, safeSession *econtext.SafeSession) error {
return e.txConn.ReleaseAll(ctx, safeSession)
}
-func (e *Executor) setVitessMetadata(ctx context.Context, name, value string) error {
+func (e *Executor) SetVitessMetadata(ctx context.Context, name, value string) error {
// TODO(kalfonso): move to its own acl check and consolidate into an acl component that can handle multiple operations (vschema, metadata)
user := callerid.ImmediateCallerIDFromContext(ctx)
allowed := vschemaacl.Authorized(user)
@@ -741,7 +742,7 @@ func (e *Executor) setVitessMetadata(ctx context.Context, name, value string) er
return ts.UpsertMetadata(ctx, name, value)
}
-func (e *Executor) showVitessMetadata(ctx context.Context, filter *sqlparser.ShowFilter) (*sqltypes.Result, error) {
+func (e *Executor) ShowVitessMetadata(ctx context.Context, filter *sqlparser.ShowFilter) (*sqltypes.Result, error) {
ts, err := e.serv.GetTopoServer()
if err != nil {
return nil, err
@@ -774,7 +775,7 @@ func (e *Executor) showVitessMetadata(ctx context.Context, filter *sqlparser.Sho
type tabletFilter func(tablet *topodatapb.Tablet, servingState string, primaryTermStartTime int64) bool
-func (e *Executor) showShards(ctx context.Context, filter *sqlparser.ShowFilter, destTabletType topodatapb.TabletType) (*sqltypes.Result, error) {
+func (e *Executor) ShowShards(ctx context.Context, filter *sqlparser.ShowFilter, destTabletType topodatapb.TabletType) (*sqltypes.Result, error) {
showVitessShardsFilters := func(filter *sqlparser.ShowFilter) ([]func(string) bool, []func(string, *topodatapb.ShardReference) bool) {
keyspaceFilters := []func(string) bool{}
shardFilters := []func(string, *topodatapb.ShardReference) bool{}
@@ -858,7 +859,7 @@ func (e *Executor) showShards(ctx context.Context, filter *sqlparser.ShowFilter,
}, nil
}
-func (e *Executor) showTablets(filter *sqlparser.ShowFilter) (*sqltypes.Result, error) {
+func (e *Executor) ShowTablets(filter *sqlparser.ShowFilter) (*sqltypes.Result, error) {
getTabletFilters := func(filter *sqlparser.ShowFilter) []tabletFilter {
var filters []tabletFilter
@@ -931,7 +932,7 @@ func (e *Executor) showTablets(filter *sqlparser.ShowFilter) (*sqltypes.Result,
}, nil
}
-func (e *Executor) showVitessReplicationStatus(ctx context.Context, filter *sqlparser.ShowFilter) (*sqltypes.Result, error) {
+func (e *Executor) ShowVitessReplicationStatus(ctx context.Context, filter *sqlparser.ShowFilter) (*sqltypes.Result, error) {
ctx, cancel := context.WithTimeout(ctx, healthCheckTimeout)
defer cancel()
rows := [][]sqltypes.Value{}
@@ -1078,26 +1079,14 @@ func (e *Executor) SaveVSchema(vschema *vindexes.VSchema, stats *VSchemaStats) {
// ParseDestinationTarget parses destination target string and sets default keyspace if possible.
func (e *Executor) ParseDestinationTarget(targetString string) (string, topodatapb.TabletType, key.Destination, error) {
- destKeyspace, destTabletType, dest, err := topoproto.ParseDestination(targetString, defaultTabletType)
- // Set default keyspace
- if destKeyspace == "" && len(e.VSchema().Keyspaces) == 1 {
- for k := range e.VSchema().Keyspaces {
- destKeyspace = k
- }
- }
- return destKeyspace, destTabletType, dest, err
-}
-
-type iQueryOption interface {
- cachePlan() bool
- getSelectLimit() int
+ return econtext.ParseDestinationTarget(targetString, defaultTabletType, e.VSchema())
}
// getPlan computes the plan for the given query. If one is in
// the cache, it reuses it.
func (e *Executor) getPlan(
ctx context.Context,
- vcursor *vcursorImpl,
+ vcursor *econtext.VCursorImpl,
query string,
stmt sqlparser.Statement,
comments sqlparser.MarginComments,
@@ -1135,10 +1124,10 @@ func (e *Executor) getPlan(
reservedVars,
bindVars,
parameterize,
- vcursor.keyspace,
- vcursor.safeSession.getSelectLimit(),
+ vcursor.GetKeyspace(),
+ vcursor.SafeSession.GetSelectLimit(),
setVarComment,
- vcursor.safeSession.SystemVariables,
+ vcursor.GetSystemVariablesCopy(),
vcursor.GetForeignKeyChecksState(),
vcursor,
)
@@ -1157,9 +1146,9 @@ func (e *Executor) getPlan(
return e.cacheAndBuildStatement(ctx, vcursor, query, stmt, reservedVars, bindVarNeeds, logStats)
}
-func (e *Executor) hashPlan(ctx context.Context, vcursor *vcursorImpl, query string) PlanCacheKey {
+func (e *Executor) hashPlan(ctx context.Context, vcursor *econtext.VCursorImpl, query string) PlanCacheKey {
hasher := vthash.New256()
- vcursor.keyForPlan(ctx, query, hasher)
+ vcursor.KeyForPlan(ctx, query, hasher)
var planKey PlanCacheKey
hasher.Sum(planKey[:0])
@@ -1168,19 +1157,22 @@ func (e *Executor) hashPlan(ctx context.Context, vcursor *vcursorImpl, query str
func (e *Executor) buildStatement(
ctx context.Context,
- vcursor *vcursorImpl,
+ vcursor *econtext.VCursorImpl,
query string,
stmt sqlparser.Statement,
reservedVars *sqlparser.ReservedVars,
bindVarNeeds *sqlparser.BindVarNeeds,
) (*engine.Plan, error) {
- plan, err := planbuilder.BuildFromStmt(ctx, query, stmt, reservedVars, vcursor, bindVarNeeds, enableOnlineDDL, enableDirectDDL)
+ cfg := &dynamicViperConfig{
+ onlineDDL: enableOnlineDDL,
+ directDDL: enableDirectDDL,
+ }
+ plan, err := planbuilder.BuildFromStmt(ctx, query, stmt, reservedVars, vcursor, bindVarNeeds, cfg)
if err != nil {
return nil, err
}
- plan.Warnings = vcursor.warnings
- vcursor.warnings = nil
+ plan.Warnings = vcursor.GetAndEmptyWarnings()
err = e.checkThatPlanIsValid(stmt, plan)
return plan, err
@@ -1188,14 +1180,14 @@ func (e *Executor) buildStatement(
func (e *Executor) cacheAndBuildStatement(
ctx context.Context,
- vcursor *vcursorImpl,
+ vcursor *econtext.VCursorImpl,
query string,
stmt sqlparser.Statement,
reservedVars *sqlparser.ReservedVars,
bindVarNeeds *sqlparser.BindVarNeeds,
logStats *logstats.LogStats,
) (*engine.Plan, error) {
- planCachable := sqlparser.CachePlan(stmt) && vcursor.safeSession.cachePlan()
+ planCachable := sqlparser.CachePlan(stmt) && vcursor.CachePlan()
if planCachable {
planKey := e.hashPlan(ctx, vcursor, query)
@@ -1213,7 +1205,7 @@ func (e *Executor) canNormalizeStatement(stmt sqlparser.Statement, setVarComment
return sqlparser.CanNormalize(stmt) || setVarComment != ""
}
-func prepareSetVarComment(vcursor *vcursorImpl, stmt sqlparser.Statement) (string, error) {
+func prepareSetVarComment(vcursor *econtext.VCursorImpl, stmt sqlparser.Statement) (string, error) {
if vcursor == nil || vcursor.Session().InReservedConn() {
return "", nil
}
@@ -1354,7 +1346,7 @@ func isValidPayloadSize(query string) bool {
}
// Prepare executes a prepare statements.
-func (e *Executor) Prepare(ctx context.Context, method string, safeSession *SafeSession, sql string, bindVars map[string]*querypb.BindVariable) (fld []*querypb.Field, err error) {
+func (e *Executor) Prepare(ctx context.Context, method string, safeSession *econtext.SafeSession, sql string, bindVars map[string]*querypb.BindVariable) (fld []*querypb.Field, err error) {
logStats := logstats.NewLogStats(ctx, method, sql, safeSession.GetSessionUUID(), bindVars)
fld, err = e.prepare(ctx, safeSession, sql, bindVars, logStats)
logStats.Error = err
@@ -1373,7 +1365,7 @@ func (e *Executor) Prepare(ctx context.Context, method string, safeSession *Safe
return fld, err
}
-func (e *Executor) prepare(ctx context.Context, safeSession *SafeSession, sql string, bindVars map[string]*querypb.BindVariable, logStats *logstats.LogStats) ([]*querypb.Field, error) {
+func (e *Executor) prepare(ctx context.Context, safeSession *econtext.SafeSession, sql string, bindVars map[string]*querypb.BindVariable, logStats *logstats.LogStats) ([]*querypb.Field, error) {
// Start an implicit transaction if necessary.
if !safeSession.Autocommit && !safeSession.InTransaction() {
if err := e.txConn.Begin(ctx, safeSession, nil); err != nil {
@@ -1409,9 +1401,41 @@ func (e *Executor) prepare(ctx context.Context, safeSession *SafeSession, sql st
return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] unrecognized prepare statement: %s", sql)
}
-func (e *Executor) handlePrepare(ctx context.Context, safeSession *SafeSession, sql string, bindVars map[string]*querypb.BindVariable, logStats *logstats.LogStats) ([]*querypb.Field, error) {
+func (e *Executor) initVConfig(warnOnShardedOnly bool, pv plancontext.PlannerVersion) {
+ connCollation := collations.Unknown
+ if gw, isTabletGw := e.resolver.resolver.GetGateway().(*TabletGateway); isTabletGw {
+ connCollation = gw.DefaultConnCollation()
+ }
+ if connCollation == collations.Unknown {
+ connCollation = e.env.CollationEnv().DefaultConnectionCharset()
+ }
+
+ e.vConfig = econtext.VCursorConfig{
+ Collation: connCollation,
+ DefaultTabletType: defaultTabletType,
+ PlannerVersion: pv,
+
+ QueryTimeout: queryTimeout,
+ MaxMemoryRows: maxMemoryRows,
+
+ SetVarEnabled: sysVarSetEnabled,
+ EnableViews: enableViews,
+ ForeignKeyMode: fkMode(foreignKeyMode),
+ EnableShardRouting: enableShardRouting,
+ WarnShardedOnly: warnOnShardedOnly,
+
+ DBDDLPlugin: dbDDLPlugin,
+
+ WarmingReadsPercent: e.warmingReadsPercent,
+ WarmingReadsTimeout: warmingReadsQueryTimeout,
+ WarmingReadsChannel: e.warmingReadsChannel,
+ }
+}
+
+func (e *Executor) handlePrepare(ctx context.Context, safeSession *econtext.SafeSession, sql string, bindVars map[string]*querypb.BindVariable, logStats *logstats.LogStats) ([]*querypb.Field, error) {
query, comments := sqlparser.SplitMarginComments(sql)
- vcursor, _ := newVCursorImpl(safeSession, comments, e, logStats, e.vm, e.VSchema(), e.resolver.resolver, e.serv, e.warnShardedOnly, e.pv)
+
+ vcursor, _ := econtext.NewVCursorImpl(safeSession, comments, e, logStats, e.vm, e.VSchema(), e.resolver.resolver, e.serv, nullResultsObserver{}, e.vConfig)
stmt, reservedVars, err := parseAndValidateQuery(query, e.env.Parser())
if err != nil {
@@ -1460,17 +1484,17 @@ func parseAndValidateQuery(query string, parser *sqlparser.Parser) (sqlparser.St
}
// ExecuteMultiShard implements the IExecutor interface
-func (e *Executor) ExecuteMultiShard(ctx context.Context, primitive engine.Primitive, rss []*srvtopo.ResolvedShard, queries []*querypb.BoundQuery, session *SafeSession, autocommit bool, ignoreMaxMemoryRows bool, resultsObserver resultsObserver) (qr *sqltypes.Result, errs []error) {
+func (e *Executor) ExecuteMultiShard(ctx context.Context, primitive engine.Primitive, rss []*srvtopo.ResolvedShard, queries []*querypb.BoundQuery, session *econtext.SafeSession, autocommit bool, ignoreMaxMemoryRows bool, resultsObserver econtext.ResultsObserver) (qr *sqltypes.Result, errs []error) {
return e.scatterConn.ExecuteMultiShard(ctx, primitive, rss, queries, session, autocommit, ignoreMaxMemoryRows, resultsObserver)
}
// StreamExecuteMulti implements the IExecutor interface
-func (e *Executor) StreamExecuteMulti(ctx context.Context, primitive engine.Primitive, query string, rss []*srvtopo.ResolvedShard, vars []map[string]*querypb.BindVariable, session *SafeSession, autocommit bool, callback func(reply *sqltypes.Result) error, resultsObserver resultsObserver) []error {
+func (e *Executor) StreamExecuteMulti(ctx context.Context, primitive engine.Primitive, query string, rss []*srvtopo.ResolvedShard, vars []map[string]*querypb.BindVariable, session *econtext.SafeSession, autocommit bool, callback func(reply *sqltypes.Result) error, resultsObserver econtext.ResultsObserver) []error {
return e.scatterConn.StreamExecuteMulti(ctx, primitive, query, rss, vars, session, autocommit, callback, resultsObserver)
}
// ExecuteLock implements the IExecutor interface
-func (e *Executor) ExecuteLock(ctx context.Context, rs *srvtopo.ResolvedShard, query *querypb.BoundQuery, session *SafeSession, lockFuncType sqlparser.LockingFuncType) (*sqltypes.Result, error) {
+func (e *Executor) ExecuteLock(ctx context.Context, rs *srvtopo.ResolvedShard, query *querypb.BoundQuery, session *econtext.SafeSession, lockFuncType sqlparser.LockingFuncType) (*sqltypes.Result, error) {
return e.scatterConn.ExecuteLock(ctx, rs, query, session, lockFuncType)
}
@@ -1581,25 +1605,25 @@ func getTabletThrottlerStatus(tabletHostPort string) (string, error) {
}
// ReleaseLock implements the IExecutor interface
-func (e *Executor) ReleaseLock(ctx context.Context, session *SafeSession) error {
+func (e *Executor) ReleaseLock(ctx context.Context, session *econtext.SafeSession) error {
return e.txConn.ReleaseLock(ctx, session)
}
-// planPrepareStmt implements the IExecutor interface
-func (e *Executor) planPrepareStmt(ctx context.Context, vcursor *vcursorImpl, query string) (*engine.Plan, sqlparser.Statement, error) {
+// PlanPrepareStmt implements the IExecutor interface
+func (e *Executor) PlanPrepareStmt(ctx context.Context, vcursor *econtext.VCursorImpl, query string) (*engine.Plan, sqlparser.Statement, error) {
stmt, reservedVars, err := parseAndValidateQuery(query, e.env.Parser())
if err != nil {
return nil, nil, err
}
// creating this log stats to not interfere with the original log stats.
- lStats := logstats.NewLogStats(ctx, "prepare", query, vcursor.safeSession.SessionUUID, nil)
+ lStats := logstats.NewLogStats(ctx, "prepare", query, vcursor.Session().GetSessionUUID(), nil)
plan, err := e.getPlan(
ctx,
vcursor,
query,
sqlparser.Clone(stmt),
- vcursor.marginComments,
+ vcursor.GetMarginComments(),
map[string]*querypb.BindVariable{},
reservedVars, /* normalize */
false,
@@ -1621,7 +1645,7 @@ func (e *Executor) Close() {
e.plans.Close()
}
-func (e *Executor) environment() *vtenv.Environment {
+func (e *Executor) Environment() *vtenv.Environment {
return e.env
}
@@ -1633,6 +1657,10 @@ func (e *Executor) UnresolvedTransactions(ctx context.Context, targets []*queryp
return e.txConn.UnresolvedTransactions(ctx, targets)
}
+func (e *Executor) AddWarningCount(name string, count int64) {
+ warnings.Add(name, count)
+}
+
type (
errorTransformer interface {
TransformError(err error) error
@@ -1643,3 +1671,16 @@ type (
func (nullErrorTransformer) TransformError(err error) error {
return err
}
+
+func fkMode(foreignkey string) vschemapb.Keyspace_ForeignKeyMode {
+ switch foreignkey {
+ case "disallow":
+ return vschemapb.Keyspace_disallow
+ case "managed":
+ return vschemapb.Keyspace_managed
+ case "unmanaged":
+ return vschemapb.Keyspace_unmanaged
+
+ }
+ return vschemapb.Keyspace_unspecified
+}
diff --git a/go/vt/vtgate/executor_ddl_test.go b/go/vt/vtgate/executor_ddl_test.go
index 3274fd94475..bf117856e08 100644
--- a/go/vt/vtgate/executor_ddl_test.go
+++ b/go/vt/vtgate/executor_ddl_test.go
@@ -21,14 +21,15 @@ import (
"testing"
vtgatepb "vitess.io/vitess/go/vt/proto/vtgate"
+ econtext "vitess.io/vitess/go/vt/vtgate/executorcontext"
"github.com/stretchr/testify/require"
)
func TestDDLFlags(t *testing.T) {
defer func() {
- enableOnlineDDL = true
- enableDirectDDL = true
+ enableOnlineDDL.Set(true)
+ enableDirectDDL.Set(true)
}()
testcases := []struct {
enableDirectDDL bool
@@ -56,9 +57,9 @@ func TestDDLFlags(t *testing.T) {
for _, testcase := range testcases {
t.Run(fmt.Sprintf("%s-%v-%v", testcase.sql, testcase.enableDirectDDL, testcase.enableOnlineDDL), func(t *testing.T) {
executor, _, _, _, ctx := createExecutorEnv(t)
- session := NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded})
- enableDirectDDL = testcase.enableDirectDDL
- enableOnlineDDL = testcase.enableOnlineDDL
+ session := econtext.NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded})
+ enableDirectDDL.Set(testcase.enableDirectDDL)
+ enableOnlineDDL.Set(testcase.enableOnlineDDL)
_, err := executor.Execute(ctx, nil, "TestDDLFlags", session, testcase.sql, nil)
if testcase.wantErr {
require.EqualError(t, err, testcase.err)
diff --git a/go/vt/vtgate/executor_dml_test.go b/go/vt/vtgate/executor_dml_test.go
index 3dce4e212ef..792e197f48d 100644
--- a/go/vt/vtgate/executor_dml_test.go
+++ b/go/vt/vtgate/executor_dml_test.go
@@ -25,6 +25,8 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ econtext "vitess.io/vitess/go/vt/vtgate/executorcontext"
+
"vitess.io/vitess/go/mysql/config"
"vitess.io/vitess/go/mysql/sqlerror"
"vitess.io/vitess/go/sqltypes"
@@ -135,7 +137,6 @@ func TestUpdateEqual(t *testing.T) {
func TestUpdateFromSubQuery(t *testing.T) {
executor, sbc1, sbc2, _, ctx := createExecutorEnv(t)
- executor.pv = querypb.ExecuteOptions_Gen4
logChan := executor.queryLogger.Subscribe("Test")
defer executor.queryLogger.Unsubscribe(logChan)
@@ -234,7 +235,7 @@ func TestUpdateInTransactionLookupDefaultReadLock(t *testing.T) {
)}
executor, sbc1, sbc2, sbcLookup, ctx := createCustomExecutorSetValues(t, executorVSchema, res)
- safeSession := NewSafeSession(&vtgatepb.Session{InTransaction: true})
+ safeSession := econtext.NewSafeSession(&vtgatepb.Session{InTransaction: true})
_, err := executorExecSession(ctx,
executor,
"update t2_lookup set lu_col = 5 where nv_lu_col = 2",
@@ -296,7 +297,7 @@ func TestUpdateInTransactionLookupExclusiveReadLock(t *testing.T) {
)}
executor, sbc1, sbc2, sbcLookup, ctx := createCustomExecutorSetValues(t, executorVSchema, res)
- safeSession := NewSafeSession(&vtgatepb.Session{InTransaction: true})
+ safeSession := econtext.NewSafeSession(&vtgatepb.Session{InTransaction: true})
_, err := executorExecSession(ctx,
executor,
"update t2_lookup set lu_col = 5 where erl_lu_col = 2",
@@ -358,7 +359,7 @@ func TestUpdateInTransactionLookupSharedReadLock(t *testing.T) {
)}
executor, sbc1, sbc2, sbcLookup, ctx := createCustomExecutorSetValues(t, executorVSchema, res)
- safeSession := NewSafeSession(&vtgatepb.Session{InTransaction: true})
+ safeSession := econtext.NewSafeSession(&vtgatepb.Session{InTransaction: true})
_, err := executorExecSession(ctx,
executor,
"update t2_lookup set lu_col = 5 where srl_lu_col = 2",
@@ -420,7 +421,7 @@ func TestUpdateInTransactionLookupNoReadLock(t *testing.T) {
)}
executor, sbc1, sbc2, sbcLookup, ctx := createCustomExecutorSetValues(t, executorVSchema, res)
- safeSession := NewSafeSession(&vtgatepb.Session{InTransaction: true})
+ safeSession := econtext.NewSafeSession(&vtgatepb.Session{InTransaction: true})
_, err := executorExecSession(ctx,
executor,
"update t2_lookup set lu_col = 5 where nrl_lu_col = 2",
@@ -2066,7 +2067,7 @@ func TestInsertPartialFail1(t *testing.T) {
context.Background(),
nil,
"TestExecute",
- NewSafeSession(&vtgatepb.Session{InTransaction: true}),
+ econtext.NewSafeSession(&vtgatepb.Session{InTransaction: true}),
"insert into user(id, v, name) values (1, 2, 'myname')",
nil,
)
@@ -2082,7 +2083,7 @@ func TestInsertPartialFail2(t *testing.T) {
// Make the second DML fail, it should result in a rollback.
sbc1.MustFailExecute[sqlparser.StmtInsert] = 1
- safeSession := NewSafeSession(&vtgatepb.Session{InTransaction: true})
+ safeSession := econtext.NewSafeSession(&vtgatepb.Session{InTransaction: true})
_, err := executor.Execute(
context.Background(),
nil,
@@ -2656,7 +2657,7 @@ func TestReservedConnDML(t *testing.T) {
logChan := executor.queryLogger.Subscribe("TestReservedConnDML")
defer executor.queryLogger.Unsubscribe(logChan)
- session := NewAutocommitSession(&vtgatepb.Session{EnableSystemSettings: true})
+ session := econtext.NewAutocommitSession(&vtgatepb.Session{EnableSystemSettings: true})
_, err := executor.Execute(ctx, nil, "TestReservedConnDML", session, "use "+KsTestUnsharded, nil)
require.NoError(t, err)
@@ -2708,7 +2709,7 @@ func TestStreamingDML(t *testing.T) {
logChan := executor.queryLogger.Subscribe(method)
defer executor.queryLogger.Unsubscribe(logChan)
- session := NewAutocommitSession(&vtgatepb.Session{})
+ session := econtext.NewAutocommitSession(&vtgatepb.Session{})
tcases := []struct {
query string
@@ -2792,7 +2793,7 @@ func TestPartialVindexInsertQueryFailure(t *testing.T) {
logChan := executor.queryLogger.Subscribe("Test")
defer executor.queryLogger.Unsubscribe(logChan)
- session := NewAutocommitSession(&vtgatepb.Session{})
+ session := econtext.NewAutocommitSession(&vtgatepb.Session{})
require.True(t, session.GetAutocommit())
require.False(t, session.InTransaction())
@@ -2845,7 +2846,7 @@ func TestPartialVindexInsertQueryFailureAutoCommit(t *testing.T) {
logChan := executor.queryLogger.Subscribe("Test")
defer executor.queryLogger.Unsubscribe(logChan)
- session := NewAutocommitSession(&vtgatepb.Session{})
+ session := econtext.NewAutocommitSession(&vtgatepb.Session{})
require.True(t, session.GetAutocommit())
require.False(t, session.InTransaction())
@@ -2886,7 +2887,7 @@ func TestPartialVindexInsertQueryFailureAutoCommit(t *testing.T) {
func TestMultiInternalSavepoint(t *testing.T) {
executor, sbc1, sbc2, _, ctx := createExecutorEnv(t)
- session := NewAutocommitSession(&vtgatepb.Session{})
+ session := econtext.NewAutocommitSession(&vtgatepb.Session{})
_, err := executorExecSession(ctx, executor, "begin", nil, session.Session)
require.NoError(t, err)
@@ -2935,7 +2936,7 @@ func TestInsertSelectFromDual(t *testing.T) {
logChan := executor.queryLogger.Subscribe("TestInsertSelect")
defer executor.queryLogger.Unsubscribe(logChan)
- session := NewAutocommitSession(&vtgatepb.Session{})
+ session := econtext.NewAutocommitSession(&vtgatepb.Session{})
query := "insert into user(id, v, name) select 1, 2, 'myname' from dual"
wantQueries := []*querypb.BoundQuery{{
@@ -2990,7 +2991,7 @@ func TestInsertSelectFromTable(t *testing.T) {
logChan := executor.queryLogger.Subscribe("TestInsertSelect")
defer executor.queryLogger.Unsubscribe(logChan)
- session := NewAutocommitSession(&vtgatepb.Session{})
+ session := econtext.NewAutocommitSession(&vtgatepb.Session{})
query := "insert into user(id, name) select c1, c2 from music"
wantQueries := []*querypb.BoundQuery{{
@@ -3140,3 +3141,62 @@ func TestDeleteMultiTable(t *testing.T) {
// delete from `user` where (`user`.id) in ::dml_vals - 1 shard
testQueryLog(t, executor, logChan, "TestExecute", "DELETE", "delete `user` from `user` join music on `user`.col = music.col where music.user_id = 1", 18)
}
+
+// TestSessionRowsAffected test that rowsAffected is set correctly for each shard session.
+func TestSessionRowsAffected(t *testing.T) {
+ method := t.Name()
+ executor, _, sbc4060, _, ctx := createExecutorEnv(t)
+
+ session := econtext.NewAutocommitSession(&vtgatepb.Session{})
+
+ // start the transaction
+ _, err := executor.Execute(ctx, nil, method, session, "begin", nil)
+ require.NoError(t, err)
+
+ // -20 - select query
+ _, err = executor.Execute(ctx, nil, method, session, "select * from user where id = 1", nil)
+ require.NoError(t, err)
+ require.Len(t, session.ShardSessions, 1)
+ require.False(t, session.ShardSessions[0].RowsAffected)
+
+ // -20 - update query (rows affected)
+ _, err = executor.Execute(ctx, nil, method, session, "update user set foo = 41 where id = 1", nil)
+ require.NoError(t, err)
+ require.True(t, session.ShardSessions[0].RowsAffected)
+
+ // e0- - select query
+ _, err = executor.Execute(ctx, nil, method, session, "select * from user where id = 7", nil)
+ require.NoError(t, err)
+ assert.Len(t, session.ShardSessions, 2)
+ require.False(t, session.ShardSessions[1].RowsAffected)
+
+ // c0-e0 - update query (rows affected)
+ _, err = executor.Execute(ctx, nil, method, session, "update user set foo = 42 where id = 5", nil)
+ require.NoError(t, err)
+ require.Len(t, session.ShardSessions, 3)
+ require.True(t, session.ShardSessions[2].RowsAffected)
+
+ // 40-60 - update query (no rows affected)
+ sbc4060.SetResults([]*sqltypes.Result{{RowsAffected: 0}})
+ _, err = executor.Execute(ctx, nil, method, session, "update user set foo = 42 where id = 3", nil)
+ require.NoError(t, err)
+ assert.Len(t, session.ShardSessions, 4)
+ require.False(t, session.ShardSessions[3].RowsAffected)
+
+ // 40-60 - select query
+ _, err = executor.Execute(ctx, nil, method, session, "select * from user where id = 3", nil)
+ require.NoError(t, err)
+ require.False(t, session.ShardSessions[3].RowsAffected)
+
+ // 40-60 - delete query (rows affected)
+ _, err = executor.Execute(ctx, nil, method, session, "delete from user where id = 3", nil)
+ require.NoError(t, err)
+ require.True(t, session.ShardSessions[0].RowsAffected)
+ require.False(t, session.ShardSessions[1].RowsAffected)
+ require.True(t, session.ShardSessions[2].RowsAffected)
+ require.True(t, session.ShardSessions[3].RowsAffected)
+
+ _, err = executor.Execute(ctx, nil, method, session, "commit", nil)
+ require.NoError(t, err)
+ require.Zero(t, session.ShardSessions)
+}
diff --git a/go/vt/vtgate/executor_framework_test.go b/go/vt/vtgate/executor_framework_test.go
index 332139c4a78..2ee3425209f 100644
--- a/go/vt/vtgate/executor_framework_test.go
+++ b/go/vt/vtgate/executor_framework_test.go
@@ -28,6 +28,8 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ econtext "vitess.io/vitess/go/vt/vtgate/executorcontext"
+
"vitess.io/vitess/go/cache/theine"
"vitess.io/vitess/go/constants/sidecar"
"vitess.io/vitess/go/sqltypes"
@@ -307,7 +309,7 @@ func executorExecSession(ctx context.Context, executor *Executor, sql string, bv
ctx,
nil,
"TestExecute",
- NewSafeSession(session),
+ econtext.NewSafeSession(session),
sql,
bv)
}
@@ -320,7 +322,7 @@ func executorPrepare(ctx context.Context, executor *Executor, session *vtgatepb.
return executor.Prepare(
ctx,
"TestExecute",
- NewSafeSession(session),
+ econtext.NewSafeSession(session),
sql,
bv)
}
@@ -331,7 +333,7 @@ func executorStream(ctx context.Context, executor *Executor, sql string) (qr *sq
ctx,
nil,
"TestExecuteStream",
- NewSafeSession(nil),
+ econtext.NewSafeSession(nil),
sql,
nil,
func(qr *sqltypes.Result) error {
diff --git a/go/vt/vtgate/executor_scatter_stats_test.go b/go/vt/vtgate/executor_scatter_stats_test.go
index 84dd2744e8b..b665f850a23 100644
--- a/go/vt/vtgate/executor_scatter_stats_test.go
+++ b/go/vt/vtgate/executor_scatter_stats_test.go
@@ -24,12 +24,13 @@ import (
"github.com/stretchr/testify/require"
vtgatepb "vitess.io/vitess/go/vt/proto/vtgate"
+ econtext "vitess.io/vitess/go/vt/vtgate/executorcontext"
)
func TestScatterStatsWithNoScatterQuery(t *testing.T) {
executor, _, _, _, ctx := createExecutorEnv(t)
- session := NewSafeSession(&vtgatepb.Session{TargetString: "@primary"})
+ session := econtext.NewSafeSession(&vtgatepb.Session{TargetString: "@primary"})
_, err := executor.Execute(ctx, nil, "TestExecutorResultsExceeded", session, "select * from main1", nil)
require.NoError(t, err)
@@ -41,7 +42,7 @@ func TestScatterStatsWithNoScatterQuery(t *testing.T) {
func TestScatterStatsWithSingleScatterQuery(t *testing.T) {
executor, _, _, _, ctx := createExecutorEnv(t)
- session := NewSafeSession(&vtgatepb.Session{TargetString: "@primary"})
+ session := econtext.NewSafeSession(&vtgatepb.Session{TargetString: "@primary"})
_, err := executor.Execute(ctx, nil, "TestExecutorResultsExceeded", session, "select * from user", nil)
require.NoError(t, err)
@@ -53,7 +54,7 @@ func TestScatterStatsWithSingleScatterQuery(t *testing.T) {
func TestScatterStatsHttpWriting(t *testing.T) {
executor, _, _, _, ctx := createExecutorEnv(t)
- session := NewSafeSession(&vtgatepb.Session{TargetString: "@primary"})
+ session := econtext.NewSafeSession(&vtgatepb.Session{TargetString: "@primary"})
_, err := executor.Execute(ctx, nil, "TestExecutorResultsExceeded", session, "select * from user", nil)
require.NoError(t, err)
diff --git a/go/vt/vtgate/executor_select_test.go b/go/vt/vtgate/executor_select_test.go
index 8ba89d25daf..86aafaefba4 100644
--- a/go/vt/vtgate/executor_select_test.go
+++ b/go/vt/vtgate/executor_select_test.go
@@ -30,6 +30,8 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ econtext "vitess.io/vitess/go/vt/vtgate/executorcontext"
+
_flag "vitess.io/vitess/go/internal/flag"
"vitess.io/vitess/go/mysql/collations"
"vitess.io/vitess/go/sqltypes"
@@ -59,7 +61,7 @@ func TestSelectNext(t *testing.T) {
}}
// Autocommit
- session := NewAutocommitSession(&vtgatepb.Session{})
+ session := econtext.NewAutocommitSession(&vtgatepb.Session{})
_, err := executor.Execute(context.Background(), nil, "TestSelectNext", session, query, bv)
require.NoError(t, err)
@@ -69,7 +71,7 @@ func TestSelectNext(t *testing.T) {
sbclookup.Queries = nil
// Txn
- session = NewAutocommitSession(&vtgatepb.Session{})
+ session = econtext.NewAutocommitSession(&vtgatepb.Session{})
session.Session.InTransaction = true
_, err = executor.Execute(context.Background(), nil, "TestSelectNext", session, query, bv)
require.NoError(t, err)
@@ -80,7 +82,7 @@ func TestSelectNext(t *testing.T) {
sbclookup.Queries = nil
// Reserve
- session = NewAutocommitSession(&vtgatepb.Session{})
+ session = econtext.NewAutocommitSession(&vtgatepb.Session{})
session.Session.InReservedConn = true
_, err = executor.Execute(context.Background(), nil, "TestSelectNext", session, query, bv)
require.NoError(t, err)
@@ -91,7 +93,7 @@ func TestSelectNext(t *testing.T) {
sbclookup.Queries = nil
// Reserve and Txn
- session = NewAutocommitSession(&vtgatepb.Session{})
+ session = econtext.NewAutocommitSession(&vtgatepb.Session{})
session.Session.InReservedConn = true
session.Session.InTransaction = true
_, err = executor.Execute(context.Background(), nil, "TestSelectNext", session, query, bv)
@@ -107,7 +109,7 @@ func TestSelectDBA(t *testing.T) {
query := "select * from INFORMATION_SCHEMA.foo"
_, err := executor.Execute(context.Background(), nil, "TestSelectDBA",
- NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}),
+ econtext.NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}),
query, map[string]*querypb.BindVariable{},
)
require.NoError(t, err)
@@ -117,7 +119,7 @@ func TestSelectDBA(t *testing.T) {
sbc1.Queries = nil
query = "SELECT COUNT(*) FROM INFORMATION_SCHEMA.TABLES ist WHERE ist.table_schema = 'performance_schema' AND ist.table_name = 'foo'"
_, err = executor.Execute(context.Background(), nil, "TestSelectDBA",
- NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}),
+ econtext.NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}),
query, map[string]*querypb.BindVariable{},
)
require.NoError(t, err)
@@ -133,7 +135,7 @@ func TestSelectDBA(t *testing.T) {
sbc1.Queries = nil
query = "select 1 from information_schema.table_constraints where constraint_schema = 'vt_ks' and table_name = 'user'"
_, err = executor.Execute(context.Background(), nil, "TestSelectDBA",
- NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}),
+ econtext.NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}),
query, map[string]*querypb.BindVariable{},
)
require.NoError(t, err)
@@ -149,7 +151,7 @@ func TestSelectDBA(t *testing.T) {
sbc1.Queries = nil
query = "select 1 from information_schema.table_constraints where constraint_schema = 'vt_ks'"
_, err = executor.Execute(context.Background(), nil, "TestSelectDBA",
- NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}),
+ econtext.NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}),
query, map[string]*querypb.BindVariable{},
)
require.NoError(t, err)
@@ -167,7 +169,7 @@ func TestSystemVariablesMySQLBelow80(t *testing.T) {
executor.normalize = true
setVarEnabled = true
- session := NewAutocommitSession(&vtgatepb.Session{EnableSystemSettings: true, TargetString: "TestExecutor"})
+ session := econtext.NewAutocommitSession(&vtgatepb.Session{EnableSystemSettings: true, TargetString: "TestExecutor"})
sbc1.SetResults([]*sqltypes.Result{{
Fields: []*querypb.Field{
@@ -199,12 +201,8 @@ func TestSystemVariablesMySQLBelow80(t *testing.T) {
func TestSystemVariablesWithSetVarDisabled(t *testing.T) {
executor, sbc1, _, _, _ := createCustomExecutor(t, "{}", "8.0.0")
executor.normalize = true
-
- setVarEnabled = false
- defer func() {
- setVarEnabled = true
- }()
- session := NewAutocommitSession(&vtgatepb.Session{EnableSystemSettings: true, TargetString: "TestExecutor"})
+ executor.vConfig.SetVarEnabled = false
+ session := econtext.NewAutocommitSession(&vtgatepb.Session{EnableSystemSettings: true, TargetString: "TestExecutor"})
sbc1.SetResults([]*sqltypes.Result{{
Fields: []*querypb.Field{
@@ -237,7 +235,7 @@ func TestSetSystemVariablesTx(t *testing.T) {
executor, sbc1, _, _, _ := createCustomExecutor(t, "{}", "8.0.1")
executor.normalize = true
- session := NewAutocommitSession(&vtgatepb.Session{EnableSystemSettings: true, TargetString: "TestExecutor"})
+ session := econtext.NewAutocommitSession(&vtgatepb.Session{EnableSystemSettings: true, TargetString: "TestExecutor"})
_, err := executor.Execute(context.Background(), nil, "TestBegin", session, "begin", map[string]*querypb.BindVariable{})
require.NoError(t, err)
@@ -283,7 +281,7 @@ func TestSetSystemVariables(t *testing.T) {
executor, _, _, lookup, _ := createExecutorEnv(t)
executor.normalize = true
- session := NewAutocommitSession(&vtgatepb.Session{EnableSystemSettings: true, TargetString: KsTestUnsharded, SystemVariables: map[string]string{}})
+ session := econtext.NewAutocommitSession(&vtgatepb.Session{EnableSystemSettings: true, TargetString: KsTestUnsharded, SystemVariables: map[string]string{}})
// Set @@sql_mode and execute a select statement. We should have SET_VAR in the select statement
@@ -394,7 +392,7 @@ func TestSetSystemVariablesWithReservedConnection(t *testing.T) {
executor, sbc1, _, _, _ := createExecutorEnv(t)
executor.normalize = true
- session := NewAutocommitSession(&vtgatepb.Session{EnableSystemSettings: true, SystemVariables: map[string]string{}})
+ session := econtext.NewAutocommitSession(&vtgatepb.Session{EnableSystemSettings: true, SystemVariables: map[string]string{}})
sbc1.SetResults([]*sqltypes.Result{{
Fields: []*querypb.Field{
@@ -437,7 +435,7 @@ func TestSelectVindexFunc(t *testing.T) {
executor, _, _, _, _ := createExecutorEnv(t)
query := "select * from hash_index where id = 1"
- session := NewAutocommitSession(&vtgatepb.Session{})
+ session := econtext.NewAutocommitSession(&vtgatepb.Session{})
_, err := executor.Execute(context.Background(), nil, "TestSelectVindexFunc", session, query, nil)
require.ErrorContains(t, err, "VT09005: no database selected")
@@ -450,7 +448,7 @@ func TestCreateTableValidTimestamp(t *testing.T) {
executor, sbc1, _, _, _ := createExecutorEnv(t)
executor.normalize = true
- session := NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor", SystemVariables: map[string]string{"sql_mode": "ALLOW_INVALID_DATES"}})
+ session := econtext.NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor", SystemVariables: map[string]string{"sql_mode": "ALLOW_INVALID_DATES"}})
query := "create table aa(t timestamp default 0)"
_, err := executor.Execute(context.Background(), nil, "TestSelect", session, query, map[string]*querypb.BindVariable{})
@@ -468,11 +466,10 @@ func TestCreateTableValidTimestamp(t *testing.T) {
func TestGen4SelectDBA(t *testing.T) {
executor, sbc1, _, _, _ := createExecutorEnv(t)
executor.normalize = true
- executor.pv = querypb.ExecuteOptions_Gen4
query := "select * from INFORMATION_SCHEMA.TABLE_CONSTRAINTS"
_, err := executor.Execute(context.Background(), nil, "TestSelectDBA",
- NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}),
+ econtext.NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}),
query, map[string]*querypb.BindVariable{},
)
require.NoError(t, err)
@@ -483,7 +480,7 @@ func TestGen4SelectDBA(t *testing.T) {
sbc1.Queries = nil
query = "SELECT COUNT(*) FROM INFORMATION_SCHEMA.TABLES ist WHERE ist.table_schema = 'performance_schema' AND ist.table_name = 'foo'"
_, err = executor.Execute(context.Background(), nil, "TestSelectDBA",
- NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}),
+ econtext.NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}),
query, map[string]*querypb.BindVariable{},
)
require.NoError(t, err)
@@ -501,7 +498,7 @@ func TestGen4SelectDBA(t *testing.T) {
sbc1.Queries = nil
query = "select 1 from information_schema.table_constraints where constraint_schema = 'vt_ks' and table_name = 'user'"
_, err = executor.Execute(context.Background(), nil, "TestSelectDBA",
- NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}),
+ econtext.NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}),
query, map[string]*querypb.BindVariable{},
)
require.NoError(t, err)
@@ -519,7 +516,7 @@ func TestGen4SelectDBA(t *testing.T) {
sbc1.Queries = nil
query = "select 1 from information_schema.table_constraints where constraint_schema = 'vt_ks'"
- _, err = executor.Execute(context.Background(), nil, "TestSelectDBA", NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}), query, map[string]*querypb.BindVariable{})
+ _, err = executor.Execute(context.Background(), nil, "TestSelectDBA", econtext.NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}), query, map[string]*querypb.BindVariable{})
require.NoError(t, err)
wantQueries = []*querypb.BoundQuery{{
Sql: "select :vtg1 /* INT64 */ from information_schema.table_constraints where constraint_schema = :__vtschemaname /* VARCHAR */",
@@ -534,7 +531,7 @@ func TestGen4SelectDBA(t *testing.T) {
sbc1.Queries = nil
query = "select t.table_schema,t.table_name,c.column_name,c.column_type from tables t join columns c on c.table_schema = t.table_schema and c.table_name = t.table_name where t.table_schema = 'TestExecutor' and c.table_schema = 'TestExecutor' order by t.table_schema,t.table_name,c.column_name"
_, err = executor.Execute(context.Background(), nil, "TestSelectDBA",
- NewSafeSession(&vtgatepb.Session{TargetString: "information_schema"}),
+ econtext.NewSafeSession(&vtgatepb.Session{TargetString: "information_schema"}),
query, map[string]*querypb.BindVariable{},
)
require.NoError(t, err)
@@ -651,7 +648,7 @@ func TestStreamBuffering(t *testing.T) {
context.Background(),
nil,
"TestStreamBuffering",
- NewSafeSession(session),
+ econtext.NewSafeSession(session),
"select id from music_user_map where id = 1",
nil,
func(qr *sqltypes.Result) error {
@@ -723,7 +720,7 @@ func TestStreamLimitOffset(t *testing.T) {
context.Background(),
nil,
"TestStreamLimitOffset",
- NewSafeSession(session),
+ econtext.NewSafeSession(session),
"select id, textcol from user order by id limit 2 offset 2",
nil,
func(qr *sqltypes.Result) error {
@@ -1083,7 +1080,7 @@ func TestSelectDatabase(t *testing.T) {
newSession := &vtgatepb.Session{
TargetString: "@primary",
}
- session := NewSafeSession(newSession)
+ session := econtext.NewSafeSession(newSession)
session.TargetString = "TestExecutor@primary"
result, err := executor.Execute(
context.Background(),
@@ -1283,7 +1280,6 @@ func TestSelectEqual(t *testing.T) {
func TestSelectINFromOR(t *testing.T) {
executor, sbc1, _, _, ctx := createExecutorEnv(t)
- executor.pv = querypb.ExecuteOptions_Gen4
session := &vtgatepb.Session{
TargetString: "@primary",
@@ -2951,7 +2947,7 @@ func TestSubQueryAndQueryWithLimit(t *testing.T) {
sbc1.SetResults(result1)
sbc2.SetResults(result2)
- exec(executor, NewSafeSession(&vtgatepb.Session{
+ exec(executor, econtext.NewSafeSession(&vtgatepb.Session{
TargetString: "@primary",
}), "select id1, id2 from t1 where id1 >= ( select id1 from t1 order by id1 asc limit 1) limit 100")
require.Equal(t, 2, len(sbc1.Queries))
@@ -3000,7 +2996,7 @@ func TestSelectUsingMultiEqualOnLookupColumn(t *testing.T) {
}},
}})
- result, err := exec(executor, NewSafeSession(&vtgatepb.Session{
+ result, err := exec(executor, econtext.NewSafeSession(&vtgatepb.Session{
TargetString: KsTestSharded,
}), "select nv_lu_col, other from t2_lookup WHERE (nv_lu_col = 1 AND other = 'bar') OR (nv_lu_col = 2 AND other = 'baz') OR (nv_lu_col = 3 AND other = 'qux') OR (nv_lu_col = 4 AND other = 'brz') OR (nv_lu_col = 5 AND other = 'brz')")
@@ -3197,7 +3193,7 @@ func TestSelectWithUnionAll(t *testing.T) {
func TestSelectLock(t *testing.T) {
executor, sbc1, _, _, _ := createExecutorEnv(t)
- session := NewSafeSession(nil)
+ session := econtext.NewSafeSession(nil)
session.Session.InTransaction = true
session.ShardSessions = []*vtgatepb.Session_ShardSession{{
Target: &querypb.Target{
@@ -3265,7 +3261,7 @@ func TestLockReserve(t *testing.T) {
"select release_lock('lock name') from dual",
}
- session := NewAutocommitSession(&vtgatepb.Session{})
+ session := econtext.NewAutocommitSession(&vtgatepb.Session{})
for _, sql := range tcases {
t.Run(sql, func(t *testing.T) {
@@ -3283,7 +3279,7 @@ func TestLockReserve(t *testing.T) {
func TestSelectFromInformationSchema(t *testing.T) {
executor, sbc1, _, _, _ := createExecutorEnv(t)
- session := NewSafeSession(nil)
+ session := econtext.NewSafeSession(nil)
// check failure when trying to query two keyspaces
_, err := exec(executor, session, "SELECT B.TABLE_NAME FROM INFORMATION_SCHEMA.TABLES AS A, INFORMATION_SCHEMA.COLUMNS AS B WHERE A.TABLE_SCHEMA = 'TestExecutor' AND A.TABLE_SCHEMA = 'TestXBadSharding'")
@@ -3410,8 +3406,8 @@ func TestSelectScatterFails(t *testing.T) {
func TestGen4SelectStraightJoin(t *testing.T) {
executor, sbc1, _, _, _ := createExecutorEnv(t)
executor.normalize = true
- executor.pv = querypb.ExecuteOptions_Gen4
- session := NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"})
+
+ session := econtext.NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"})
query := "select u.id from user u straight_join user2 u2 on u.id = u2.id"
_, err := executor.Execute(context.Background(), nil,
"TestGen4SelectStraightJoin",
@@ -3432,9 +3428,8 @@ func TestGen4SelectStraightJoin(t *testing.T) {
func TestGen4MultiColumnVindexEqual(t *testing.T) {
executor, sbc1, sbc2, _, _ := createExecutorEnv(t)
executor.normalize = true
- executor.pv = querypb.ExecuteOptions_Gen4
- session := NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"})
+ session := econtext.NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"})
query := "select * from user_region where cola = 1 and colb = 2"
_, err := executor.Execute(context.Background(), nil, "TestGen4MultiColumnVindex", session, query, map[string]*querypb.BindVariable{})
require.NoError(t, err)
@@ -3471,9 +3466,8 @@ func TestGen4MultiColumnVindexEqual(t *testing.T) {
func TestGen4MultiColumnVindexIn(t *testing.T) {
executor, sbc1, sbc2, _, _ := createExecutorEnv(t)
executor.normalize = true
- executor.pv = querypb.ExecuteOptions_Gen4
- session := NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"})
+ session := econtext.NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"})
query := "select * from user_region where cola IN (1,17984) and colb IN (2,3,4)"
_, err := executor.Execute(context.Background(), nil, "TestGen4MultiColumnVindex", session, query, map[string]*querypb.BindVariable{})
require.NoError(t, err)
@@ -3510,9 +3504,8 @@ func TestGen4MultiColumnVindexIn(t *testing.T) {
func TestGen4MultiColMixedColComparision(t *testing.T) {
executor, sbc1, sbc2, _, _ := createExecutorEnv(t)
executor.normalize = true
- executor.pv = querypb.ExecuteOptions_Gen4
- session := NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"})
+ session := econtext.NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"})
query := "select * from user_region where colb = 2 and cola IN (1,17984)"
_, err := executor.Execute(context.Background(), nil, "TestGen4MultiColMixedColComparision", session, query, map[string]*querypb.BindVariable{})
require.NoError(t, err)
@@ -3547,9 +3540,8 @@ func TestGen4MultiColMixedColComparision(t *testing.T) {
func TestGen4MultiColBestVindexSel(t *testing.T) {
executor, sbc1, sbc2, _, _ := createExecutorEnv(t)
executor.normalize = true
- executor.pv = querypb.ExecuteOptions_Gen4
- session := NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"})
+ session := econtext.NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"})
query := "select * from user_region where colb = 2 and cola IN (1,17984) and cola = 1"
_, err := executor.Execute(context.Background(), nil, "TestGen4MultiColBestVindexSel", session, query, map[string]*querypb.BindVariable{})
require.NoError(t, err)
@@ -3593,9 +3585,8 @@ func TestGen4MultiColBestVindexSel(t *testing.T) {
func TestGen4MultiColMultiEqual(t *testing.T) {
executor, sbc1, sbc2, _, _ := createExecutorEnv(t)
executor.normalize = true
- executor.pv = querypb.ExecuteOptions_Gen4
- session := NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"})
+ session := econtext.NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"})
query := "select * from user_region where (cola,colb) in ((17984,2),(17984,3))"
_, err := executor.Execute(context.Background(), nil, "TestGen4MultiColMultiEqual", session, query, map[string]*querypb.BindVariable{})
require.NoError(t, err)
@@ -3615,7 +3606,6 @@ func TestGen4MultiColMultiEqual(t *testing.T) {
func TestGen4SelectUnqualifiedReferenceTable(t *testing.T) {
executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t)
- executor.pv = querypb.ExecuteOptions_Gen4
query := "select * from zip_detail"
session := &vtgatepb.Session{
@@ -3636,7 +3626,6 @@ func TestGen4SelectUnqualifiedReferenceTable(t *testing.T) {
func TestGen4SelectQualifiedReferenceTable(t *testing.T) {
executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t)
- executor.pv = querypb.ExecuteOptions_Gen4
query := fmt.Sprintf("select * from %s.zip_detail", KsTestSharded)
session := &vtgatepb.Session{
@@ -3657,7 +3646,6 @@ func TestGen4SelectQualifiedReferenceTable(t *testing.T) {
func TestGen4JoinUnqualifiedReferenceTable(t *testing.T) {
executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t)
- executor.pv = querypb.ExecuteOptions_Gen4
query := "select * from user join zip_detail on user.zip_detail_id = zip_detail.id"
session := &vtgatepb.Session{
@@ -3694,7 +3682,6 @@ func TestGen4JoinUnqualifiedReferenceTable(t *testing.T) {
func TestGen4CrossShardJoinQualifiedReferenceTable(t *testing.T) {
executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t)
- executor.pv = querypb.ExecuteOptions_Gen4
query := "select user.id from user join TestUnsharded.zip_detail on user.zip_detail_id = TestUnsharded.zip_detail.id"
session := &vtgatepb.Session{
@@ -3751,7 +3738,6 @@ func TestRegionRange(t *testing.T) {
}
executor := createExecutor(ctx, serv, cell, resolver)
defer executor.Close()
- executor.pv = querypb.ExecuteOptions_Gen4
tcases := []struct {
regionID int
@@ -3769,7 +3755,7 @@ func TestRegionRange(t *testing.T) {
for _, tcase := range tcases {
t.Run(strconv.Itoa(tcase.regionID), func(t *testing.T) {
sql := fmt.Sprintf("select * from user_region where cola = %d", tcase.regionID)
- _, err := executor.Execute(context.Background(), nil, "TestRegionRange", NewAutocommitSession(&vtgatepb.Session{}), sql, nil)
+ _, err := executor.Execute(context.Background(), nil, "TestRegionRange", econtext.NewAutocommitSession(&vtgatepb.Session{}), sql, nil)
require.NoError(t, err)
count := 0
for _, sbc := range conns {
@@ -3801,7 +3787,6 @@ func TestMultiCol(t *testing.T) {
}
executor := createExecutor(ctx, serv, cell, resolver)
defer executor.Close()
- executor.pv = querypb.ExecuteOptions_Gen4
tcases := []struct {
cola, colb, colc int
@@ -3817,7 +3802,7 @@ func TestMultiCol(t *testing.T) {
shards: []string{"20a0-"},
}}
- session := NewAutocommitSession(&vtgatepb.Session{})
+ session := econtext.NewAutocommitSession(&vtgatepb.Session{})
for _, tcase := range tcases {
t.Run(fmt.Sprintf("%d_%d_%d", tcase.cola, tcase.colb, tcase.colc), func(t *testing.T) {
@@ -3882,7 +3867,6 @@ func TestMultiColPartial(t *testing.T) {
}
executor := createExecutor(ctx, serv, cell, resolver)
defer executor.Close()
- executor.pv = querypb.ExecuteOptions_Gen4
tcases := []struct {
where string
@@ -3907,7 +3891,7 @@ func TestMultiColPartial(t *testing.T) {
shards: []string{"20a0c0-"},
}}
- session := NewAutocommitSession(&vtgatepb.Session{})
+ session := econtext.NewAutocommitSession(&vtgatepb.Session{})
for _, tcase := range tcases {
t.Run(tcase.where, func(t *testing.T) {
@@ -3946,7 +3930,6 @@ func TestSelectAggregationNoData(t *testing.T) {
}
executor := createExecutor(ctx, serv, cell, resolver)
defer executor.Close()
- executor.pv = querypb.ExecuteOptions_Gen4
tcases := []struct {
sql string
@@ -4038,7 +4021,6 @@ func TestSelectAggregationData(t *testing.T) {
}
executor := createExecutor(ctx, serv, cell, resolver)
defer executor.Close()
- executor.pv = querypb.ExecuteOptions_Gen4
tcases := []struct {
sql string
@@ -4196,8 +4178,7 @@ func TestSelectAggregationRandom(t *testing.T) {
executor := createExecutor(ctx, serv, cell, resolver)
defer executor.Close()
- executor.pv = querypb.ExecuteOptions_Gen4
- session := NewAutocommitSession(&vtgatepb.Session{})
+ session := econtext.NewAutocommitSession(&vtgatepb.Session{})
rs, err := executor.Execute(context.Background(), nil, "TestSelectCFC", session, "select /*vt+ PLANNER=gen4 */ A.a, A.b, (A.a / A.b) as c from (select sum(a) as a, sum(b) as b from user) A", nil)
require.NoError(t, err)
@@ -4207,7 +4188,7 @@ func TestSelectAggregationRandom(t *testing.T) {
func TestSelectDateTypes(t *testing.T) {
executor, _, _, _, _ := createExecutorEnv(t)
executor.normalize = true
- session := NewAutocommitSession(&vtgatepb.Session{})
+ session := econtext.NewAutocommitSession(&vtgatepb.Session{})
qr, err := executor.Execute(context.Background(), nil, "TestSelectDateTypes", session, "select '2020-01-01' + interval month(date_sub(FROM_UNIXTIME(1234), interval 1 month))-1 month", nil)
require.NoError(t, err)
@@ -4218,7 +4199,7 @@ func TestSelectDateTypes(t *testing.T) {
func TestSelectHexAndBit(t *testing.T) {
executor, _, _, _, _ := createExecutorEnv(t)
executor.normalize = true
- session := NewAutocommitSession(&vtgatepb.Session{})
+ session := econtext.NewAutocommitSession(&vtgatepb.Session{})
qr, err := executor.Execute(context.Background(), nil, "TestSelectHexAndBit", session, "select 0b1001, b'1001', 0x9, x'09'", nil)
require.NoError(t, err)
@@ -4234,7 +4215,7 @@ func TestSelectHexAndBit(t *testing.T) {
func TestSelectCFC(t *testing.T) {
executor, _, _, _, _ := createExecutorEnv(t)
executor.normalize = true
- session := NewAutocommitSession(&vtgatepb.Session{})
+ session := econtext.NewAutocommitSession(&vtgatepb.Session{})
_, err := executor.Execute(context.Background(), nil, "TestSelectCFC", session, "select /*vt+ PLANNER=gen4 */ c2 from tbl_cfc where c1 like 'A%'", nil)
require.NoError(t, err)
@@ -4263,7 +4244,7 @@ func TestSelectView(t *testing.T) {
require.NoError(t, err)
executor.normalize = true
- session := NewAutocommitSession(&vtgatepb.Session{})
+ session := econtext.NewAutocommitSession(&vtgatepb.Session{})
_, err = executor.Execute(context.Background(), nil, "TestSelectView", session, "select * from user_details_view", nil)
require.NoError(t, err)
@@ -4304,7 +4285,7 @@ func TestWarmingReads(t *testing.T) {
executor, primary, replica := createExecutorEnvWithPrimaryReplicaConn(t, ctx, 100)
executor.normalize = true
- session := NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded})
+ session := econtext.NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded})
// Since queries on the replica will run in a separate go-routine, we need synchronization for the Queries field in the sandboxconn.
replica.RequireQueriesLocking()
@@ -4368,6 +4349,7 @@ func TestWarmingReads(t *testing.T) {
// waitUntilQueryCount waits until the number of queries run on the tablet reach the specified count.
func waitUntilQueryCount(t *testing.T, tab *sandboxconn.SandboxConn, count int) {
+ t.Helper()
timeout := time.After(1 * time.Second)
for {
select {
@@ -4428,7 +4410,7 @@ func TestStreamJoinQuery(t *testing.T) {
func TestSysVarGlobalAndSession(t *testing.T) {
executor, sbc1, _, _, _ := createExecutorEnv(t)
executor.normalize = true
- session := NewAutocommitSession(&vtgatepb.Session{EnableSystemSettings: true, SystemVariables: map[string]string{}})
+ session := econtext.NewAutocommitSession(&vtgatepb.Session{EnableSystemSettings: true, SystemVariables: map[string]string{}})
sbc1.SetResults([]*sqltypes.Result{
sqltypes.MakeTestResult(sqltypes.MakeTestFields("innodb_lock_wait_timeout", "uint64"), "20"),
diff --git a/go/vt/vtgate/executor_set_test.go b/go/vt/vtgate/executor_set_test.go
index 12e8e272bd7..f8ed0b558c3 100644
--- a/go/vt/vtgate/executor_set_test.go
+++ b/go/vt/vtgate/executor_set_test.go
@@ -22,6 +22,7 @@ import (
"vitess.io/vitess/go/mysql/sqlerror"
querypb "vitess.io/vitess/go/vt/proto/query"
+ econtext "vitess.io/vitess/go/vt/vtgate/executorcontext"
"vitess.io/vitess/go/test/utils"
@@ -266,7 +267,7 @@ func TestExecutorSet(t *testing.T) {
}}
for i, tcase := range testcases {
t.Run(fmt.Sprintf("%d-%s", i, tcase.in), func(t *testing.T) {
- session := NewSafeSession(&vtgatepb.Session{Autocommit: true})
+ session := econtext.NewSafeSession(&vtgatepb.Session{Autocommit: true})
_, err := executorEnv.Execute(ctx, nil, "TestExecute", session, tcase.in, nil)
if tcase.err == "" {
require.NoError(t, err)
@@ -374,7 +375,7 @@ func TestExecutorSetOp(t *testing.T) {
}}
for _, tcase := range testcases {
t.Run(tcase.in, func(t *testing.T) {
- session := NewAutocommitSession(&vtgatepb.Session{
+ session := econtext.NewAutocommitSession(&vtgatepb.Session{
TargetString: "@primary",
})
session.TargetString = KsTestUnsharded
@@ -392,7 +393,7 @@ func TestExecutorSetMetadata(t *testing.T) {
t.Run("Session 1", func(t *testing.T) {
executor, _, _, _, ctx := createExecutorEnv(t)
- session := NewSafeSession(&vtgatepb.Session{TargetString: "@primary", Autocommit: true})
+ session := econtext.NewSafeSession(&vtgatepb.Session{TargetString: "@primary", Autocommit: true})
set := "set @@vitess_metadata.app_keyspace_v1= '1'"
_, err := executor.Execute(ctx, nil, "TestExecute", session, set, nil)
@@ -400,21 +401,21 @@ func TestExecutorSetMetadata(t *testing.T) {
})
t.Run("Session 2", func(t *testing.T) {
- vschemaacl.AuthorizedDDLUsers = "%"
+ vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("%"))
defer func() {
- vschemaacl.AuthorizedDDLUsers = ""
+ vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers(""))
}()
executor, _, _, _, ctx := createExecutorEnv(t)
- session := NewSafeSession(&vtgatepb.Session{TargetString: "@primary", Autocommit: true})
+ session := econtext.NewSafeSession(&vtgatepb.Session{TargetString: "@primary", Autocommit: true})
set := "set @@vitess_metadata.app_keyspace_v1= '1'"
_, err := executor.Execute(ctx, nil, "TestExecute", session, set, nil)
- assert.NoError(t, err, "%s error: %v", set, err)
+ require.NoError(t, err, "%s error: %v", set, err)
show := `show vitess_metadata variables like 'app\\_keyspace\\_v_'`
result, err := executor.Execute(ctx, nil, "TestExecute", session, show, nil)
- assert.NoError(t, err)
+ require.NoError(t, err)
want := "1"
got := result.Rows[0][1].ToString()
@@ -423,11 +424,11 @@ func TestExecutorSetMetadata(t *testing.T) {
// Update metadata
set = "set @@vitess_metadata.app_keyspace_v2='2'"
_, err = executor.Execute(ctx, nil, "TestExecute", session, set, nil)
- assert.NoError(t, err, "%s error: %v", set, err)
+ require.NoError(t, err, "%s error: %v", set, err)
show = `show vitess_metadata variables like 'app\\_keyspace\\_v%'`
gotqr, err := executor.Execute(ctx, nil, "TestExecute", session, show, nil)
- assert.NoError(t, err)
+ require.NoError(t, err)
wantqr := &sqltypes.Result{
Fields: buildVarCharFields("Key", "Value"),
@@ -469,7 +470,7 @@ func TestPlanExecutorSetUDV(t *testing.T) {
}}
for _, tcase := range testcases {
t.Run(tcase.in, func(t *testing.T) {
- session := NewSafeSession(&vtgatepb.Session{Autocommit: true})
+ session := econtext.NewSafeSession(&vtgatepb.Session{Autocommit: true})
_, err := executor.Execute(ctx, nil, "TestExecute", session, tcase.in, nil)
if err != nil {
require.EqualError(t, err, tcase.err)
@@ -515,7 +516,7 @@ func TestSetVar(t *testing.T) {
executor, _, _, sbc, ctx := createCustomExecutor(t, "{}", "8.0.0")
executor.normalize = true
- session := NewAutocommitSession(&vtgatepb.Session{EnableSystemSettings: true, TargetString: KsTestUnsharded})
+ session := econtext.NewAutocommitSession(&vtgatepb.Session{EnableSystemSettings: true, TargetString: KsTestUnsharded})
sbc.SetResults([]*sqltypes.Result{sqltypes.MakeTestResult(
sqltypes.MakeTestFields("orig|new", "varchar|varchar"),
@@ -554,7 +555,7 @@ func TestSetVarShowVariables(t *testing.T) {
executor, _, _, sbc, ctx := createCustomExecutor(t, "{}", "8.0.0")
executor.normalize = true
- session := NewAutocommitSession(&vtgatepb.Session{EnableSystemSettings: true, TargetString: KsTestUnsharded})
+ session := econtext.NewAutocommitSession(&vtgatepb.Session{EnableSystemSettings: true, TargetString: KsTestUnsharded})
sbc.SetResults([]*sqltypes.Result{
// select query result for checking any change in system settings
@@ -597,7 +598,7 @@ func TestExecutorSetAndSelect(t *testing.T) {
sysVar: "tx_isolation",
exp: `[[VARCHAR("READ-UNCOMMITTED")]]`, // this returns the value set in previous query.
}}
- session := NewAutocommitSession(&vtgatepb.Session{TargetString: KsTestUnsharded, EnableSystemSettings: true})
+ session := econtext.NewAutocommitSession(&vtgatepb.Session{TargetString: KsTestUnsharded, EnableSystemSettings: true})
for _, tcase := range testcases {
t.Run(fmt.Sprintf("%s-%s", tcase.sysVar, tcase.val), func(t *testing.T) {
sbc.ExecCount.Store(0) // reset the value
@@ -631,7 +632,7 @@ func TestExecutorSetAndSelect(t *testing.T) {
func TestExecutorTimeZone(t *testing.T) {
e, _, _, _, ctx := createExecutorEnv(t)
- session := NewAutocommitSession(&vtgatepb.Session{TargetString: KsTestUnsharded, EnableSystemSettings: true})
+ session := econtext.NewAutocommitSession(&vtgatepb.Session{TargetString: KsTestUnsharded, EnableSystemSettings: true})
session.SetSystemVariable("time_zone", "'+08:00'")
qr, err := e.Execute(ctx, nil, "TestExecutorSetAndSelect", session, "select now()", nil)
diff --git a/go/vt/vtgate/executor_stream_test.go b/go/vt/vtgate/executor_stream_test.go
index b8cfeaf3cd5..a8500dd59c4 100644
--- a/go/vt/vtgate/executor_stream_test.go
+++ b/go/vt/vtgate/executor_stream_test.go
@@ -31,6 +31,7 @@ import (
topodatapb "vitess.io/vitess/go/vt/proto/topodata"
vtgatepb "vitess.io/vitess/go/vt/proto/vtgate"
"vitess.io/vitess/go/vt/vtenv"
+ econtext "vitess.io/vitess/go/vt/vtgate/executorcontext"
"vitess.io/vitess/go/vt/vtgate/logstats"
_ "vitess.io/vitess/go/vt/vtgate/vindexes"
"vitess.io/vitess/go/vt/vttablet/sandboxconn"
@@ -102,7 +103,7 @@ func executorStreamMessages(executor *Executor, sql string) (qr *sqltypes.Result
ctx,
nil,
"TestExecuteStream",
- NewSafeSession(session),
+ econtext.NewSafeSession(session),
sql,
nil,
func(qr *sqltypes.Result) error {
diff --git a/go/vt/vtgate/executor_test.go b/go/vt/vtgate/executor_test.go
index 3732a37d1d1..d3ab28d6600 100644
--- a/go/vt/vtgate/executor_test.go
+++ b/go/vt/vtgate/executor_test.go
@@ -36,6 +36,8 @@ import (
"github.com/stretchr/testify/require"
"google.golang.org/protobuf/proto"
+ econtext "vitess.io/vitess/go/vt/vtgate/executorcontext"
+
"vitess.io/vitess/go/mysql/collations"
"vitess.io/vitess/go/mysql/sqlerror"
"vitess.io/vitess/go/sqltypes"
@@ -64,7 +66,7 @@ func TestExecutorResultsExceeded(t *testing.T) {
warnMemoryRows = 3
defer func() { warnMemoryRows = save }()
- session := NewSafeSession(&vtgatepb.Session{TargetString: "@primary"})
+ session := econtext.NewSafeSession(&vtgatepb.Session{TargetString: "@primary"})
initial := warnings.Counts()["ResultsExceeded"]
@@ -88,7 +90,7 @@ func TestExecutorMaxMemoryRowsExceeded(t *testing.T) {
maxMemoryRows = 3
defer func() { maxMemoryRows = save }()
- session := NewSafeSession(&vtgatepb.Session{TargetString: "@primary"})
+ session := econtext.NewSafeSession(&vtgatepb.Session{TargetString: "@primary"})
result := sqltypes.MakeTestResult(sqltypes.MakeTestFields("col", "int64"), "1", "2", "3", "4")
fn := func(r *sqltypes.Result) error {
return nil
@@ -122,7 +124,7 @@ func TestExecutorMaxMemoryRowsExceeded(t *testing.T) {
func TestExecutorTransactionsNoAutoCommit(t *testing.T) {
executor, _, _, sbclookup, ctx := createExecutorEnv(t)
- session := NewSafeSession(&vtgatepb.Session{TargetString: "@primary", SessionUUID: "suuid"})
+ session := econtext.NewSafeSession(&vtgatepb.Session{TargetString: "@primary", SessionUUID: "suuid"})
logChan := executor.queryLogger.Subscribe("Test")
defer executor.queryLogger.Unsubscribe(logChan)
@@ -188,7 +190,7 @@ func TestExecutorTransactionsNoAutoCommit(t *testing.T) {
}
// Prevent use of non-primary if in_transaction is on.
- session = NewSafeSession(&vtgatepb.Session{TargetString: "@primary", InTransaction: true})
+ session = econtext.NewSafeSession(&vtgatepb.Session{TargetString: "@primary", InTransaction: true})
_, err = executor.Execute(ctx, nil, "TestExecute", session, "use @replica", nil)
require.EqualError(t, err, `can't execute the given command because you have an active transaction`)
}
@@ -205,7 +207,7 @@ func TestDirectTargetRewrites(t *testing.T) {
}
sql := "select database()"
- _, err := executor.Execute(ctx, nil, "TestExecute", NewSafeSession(session), sql, map[string]*querypb.BindVariable{})
+ _, err := executor.Execute(ctx, nil, "TestExecute", econtext.NewSafeSession(session), sql, map[string]*querypb.BindVariable{})
require.NoError(t, err)
assertQueries(t, sbclookup, []*querypb.BoundQuery{{
Sql: "select :__vtdbname as `database()` from dual",
@@ -216,7 +218,7 @@ func TestDirectTargetRewrites(t *testing.T) {
func TestExecutorTransactionsAutoCommit(t *testing.T) {
executor, _, _, sbclookup, ctx := createExecutorEnv(t)
- session := NewSafeSession(&vtgatepb.Session{TargetString: "@primary", Autocommit: true, SessionUUID: "suuid"})
+ session := econtext.NewSafeSession(&vtgatepb.Session{TargetString: "@primary", Autocommit: true, SessionUUID: "suuid"})
logChan := executor.queryLogger.Subscribe("Test")
defer executor.queryLogger.Unsubscribe(logChan)
@@ -270,7 +272,7 @@ func TestExecutorTransactionsAutoCommitStreaming(t *testing.T) {
executor, _, _, sbclookup, ctx := createExecutorEnv(t)
oltpOptions := &querypb.ExecuteOptions{Workload: querypb.ExecuteOptions_OLTP}
- session := NewSafeSession(&vtgatepb.Session{
+ session := econtext.NewSafeSession(&vtgatepb.Session{
TargetString: "@primary",
Autocommit: true,
Options: oltpOptions,
@@ -333,13 +335,13 @@ func TestExecutorTransactionsAutoCommitStreaming(t *testing.T) {
}
func TestExecutorDeleteMetadata(t *testing.T) {
- vschemaacl.AuthorizedDDLUsers = "%"
+ vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("%"))
defer func() {
- vschemaacl.AuthorizedDDLUsers = ""
+ vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers(""))
}()
executor, _, _, _, ctx := createExecutorEnv(t)
- session := NewSafeSession(&vtgatepb.Session{TargetString: "@primary", Autocommit: true})
+ session := econtext.NewSafeSession(&vtgatepb.Session{TargetString: "@primary", Autocommit: true})
set := "set @@vitess_metadata.app_v1= '1'"
_, err := executor.Execute(ctx, nil, "TestExecute", session, set, nil)
@@ -367,7 +369,7 @@ func TestExecutorDeleteMetadata(t *testing.T) {
func TestExecutorAutocommit(t *testing.T) {
executor, _, _, sbclookup, ctx := createExecutorEnv(t)
- session := NewSafeSession(&vtgatepb.Session{TargetString: "@primary"})
+ session := econtext.NewSafeSession(&vtgatepb.Session{TargetString: "@primary"})
logChan := executor.queryLogger.Subscribe("Test")
defer executor.queryLogger.Unsubscribe(logChan)
@@ -446,7 +448,7 @@ func TestExecutorAutocommit(t *testing.T) {
// transition autocommit from 0 to 1 in the middle of a transaction.
startCount = sbclookup.CommitCount.Load()
- session = NewSafeSession(&vtgatepb.Session{TargetString: "@primary"})
+ session = econtext.NewSafeSession(&vtgatepb.Session{TargetString: "@primary"})
_, err = executor.Execute(ctx, nil, "TestExecute", session, "begin", nil)
require.NoError(t, err)
_, err = executor.Execute(ctx, nil, "TestExecute", session, "update main1 set id=1", nil)
@@ -468,7 +470,7 @@ func TestExecutorAutocommit(t *testing.T) {
func TestExecutorShowColumns(t *testing.T) {
executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t)
- session := NewSafeSession(&vtgatepb.Session{TargetString: ""})
+ session := econtext.NewSafeSession(&vtgatepb.Session{TargetString: ""})
queries := []string{
"SHOW COLUMNS FROM `user` in `TestExecutor`",
@@ -520,7 +522,7 @@ func assertMatchesNoOrder(t *testing.T, expected, got string) {
func TestExecutorShow(t *testing.T) {
executor, _, _, sbclookup, ctx := createExecutorEnv(t)
- session := NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"})
+ session := econtext.NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"})
for _, query := range []string{"show vitess_keyspaces", "show keyspaces"} {
qr, err := executor.Execute(ctx, nil, "TestExecute", session, query, nil)
@@ -545,7 +547,7 @@ func TestExecutorShow(t *testing.T) {
_, err = executor.Execute(ctx, nil, "TestExecute", session, "use @primary", nil)
require.NoError(t, err)
_, err = executor.Execute(ctx, nil, "TestExecute", session, "show tables", nil)
- assert.EqualError(t, err, errNoKeyspace.Error(), "'show tables' should fail without a keyspace")
+ assert.EqualError(t, err, econtext.ErrNoKeyspace.Error(), "'show tables' should fail without a keyspace")
assert.Empty(t, sbclookup.Queries, "sbclookup unexpectedly has queries already")
showResults := &sqltypes.Result{
@@ -920,7 +922,7 @@ func TestExecutorShow(t *testing.T) {
query = "show vschema vindexes on user"
_, err = executor.Execute(ctx, nil, "TestExecute", session, query, nil)
- wantErr := errNoKeyspace.Error()
+ wantErr := econtext.ErrNoKeyspace.Error()
assert.EqualError(t, err, wantErr, query)
query = "show vschema vindexes on TestExecutor.garbage"
@@ -1024,7 +1026,7 @@ func TestExecutorShow(t *testing.T) {
utils.MustMatch(t, wantqr, qr, fmt.Sprintf("%s, with a bad keyspace", query))
query = "show vschema tables"
- session = NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded})
+ session = econtext.NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded})
qr, err = executor.Execute(ctx, nil, "TestExecute", session, query, nil)
require.NoError(t, err)
wantqr = &sqltypes.Result{
@@ -1050,9 +1052,9 @@ func TestExecutorShow(t *testing.T) {
utils.MustMatch(t, wantqr, qr, query)
query = "show vschema tables"
- session = NewSafeSession(&vtgatepb.Session{})
+ session = econtext.NewSafeSession(&vtgatepb.Session{})
_, err = executor.Execute(ctx, nil, "TestExecute", session, query, nil)
- want = errNoKeyspace.Error()
+ want = econtext.ErrNoKeyspace.Error()
assert.EqualError(t, err, want, query)
query = "show 10"
@@ -1061,7 +1063,7 @@ func TestExecutorShow(t *testing.T) {
assert.EqualError(t, err, want, query)
query = "show vschema tables"
- session = NewSafeSession(&vtgatepb.Session{TargetString: "no_such_keyspace"})
+ session = econtext.NewSafeSession(&vtgatepb.Session{TargetString: "no_such_keyspace"})
_, err = executor.Execute(ctx, nil, "TestExecute", session, query, nil)
want = "VT05003: unknown database 'no_such_keyspace' in vschema"
assert.EqualError(t, err, want, query)
@@ -1080,7 +1082,7 @@ func TestExecutorShow(t *testing.T) {
func TestExecutorShowTargeted(t *testing.T) {
executor, _, sbc2, _, ctx := createExecutorEnv(t)
- session := NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor/40-60"})
+ session := econtext.NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor/40-60"})
queries := []string{
"show databases",
@@ -1107,7 +1109,7 @@ func TestExecutorShowTargeted(t *testing.T) {
func TestExecutorShowFromSystemSchema(t *testing.T) {
executor, _, _, _, ctx := createExecutorEnv(t)
- session := NewSafeSession(&vtgatepb.Session{TargetString: "mysql"})
+ session := econtext.NewSafeSession(&vtgatepb.Session{TargetString: "mysql"})
_, err := executor.Execute(ctx, nil, "TestExecutorShowFromSystemSchema", session, "show tables", nil)
require.NoError(t, err)
@@ -1116,7 +1118,7 @@ func TestExecutorShowFromSystemSchema(t *testing.T) {
func TestExecutorUse(t *testing.T) {
executor, _, _, _, ctx := createExecutorEnv(t)
- session := NewSafeSession(&vtgatepb.Session{Autocommit: true, TargetString: "@primary"})
+ session := econtext.NewSafeSession(&vtgatepb.Session{Autocommit: true, TargetString: "@primary"})
stmts := []string{
"use TestExecutor",
@@ -1135,13 +1137,13 @@ func TestExecutorUse(t *testing.T) {
utils.MustMatch(t, wantSession, session.Session, "session does not match")
}
- _, err := executor.Execute(ctx, nil, "TestExecute", NewSafeSession(&vtgatepb.Session{}), "use 1", nil)
+ _, err := executor.Execute(ctx, nil, "TestExecute", econtext.NewSafeSession(&vtgatepb.Session{}), "use 1", nil)
wantErr := "syntax error at position 6 near '1'"
if err == nil || err.Error() != wantErr {
t.Errorf("got: %v, want %v", err, wantErr)
}
- _, err = executor.Execute(ctx, nil, "TestExecute", NewSafeSession(&vtgatepb.Session{}), "use UnexistentKeyspace", nil)
+ _, err = executor.Execute(ctx, nil, "TestExecute", econtext.NewSafeSession(&vtgatepb.Session{}), "use UnexistentKeyspace", nil)
require.EqualError(t, err, "VT05003: unknown database 'UnexistentKeyspace' in vschema")
}
@@ -1155,7 +1157,7 @@ func TestExecutorComment(t *testing.T) {
wantResult := &sqltypes.Result{}
for _, stmt := range stmts {
- gotResult, err := executor.Execute(ctx, nil, "TestExecute", NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded}), stmt, nil)
+ gotResult, err := executor.Execute(ctx, nil, "TestExecute", econtext.NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded}), stmt, nil)
if err != nil {
t.Error(err)
}
@@ -1240,9 +1242,9 @@ func TestExecutorDDL(t *testing.T) {
sbc2.ExecCount.Store(0)
sbclookup.ExecCount.Store(0)
stmtType := "DDL"
- _, err := executor.Execute(ctx, nil, "TestExecute", NewSafeSession(&vtgatepb.Session{TargetString: tc.targetStr}), stmt, nil)
+ _, err := executor.Execute(ctx, nil, "TestExecute", econtext.NewSafeSession(&vtgatepb.Session{TargetString: tc.targetStr}), stmt, nil)
if tc.hasNoKeyspaceErr {
- require.EqualError(t, err, errNoKeyspace.Error(), "expect query to fail: %q", stmt)
+ require.EqualError(t, err, econtext.ErrNoKeyspace.Error(), "expect query to fail: %q", stmt)
stmtType = "" // For error case, plan is not generated to query log will not contain any stmtType.
} else {
require.NoError(t, err, "did not expect error for query: %q", stmt)
@@ -1278,9 +1280,9 @@ func TestExecutorDDL(t *testing.T) {
sbc1.ExecCount.Store(0)
sbc2.ExecCount.Store(0)
sbclookup.ExecCount.Store(0)
- _, err := executor.Execute(ctx, nil, "TestExecute", NewSafeSession(&vtgatepb.Session{TargetString: ""}), stmt.input, nil)
+ _, err := executor.Execute(ctx, nil, "TestExecute", econtext.NewSafeSession(&vtgatepb.Session{TargetString: ""}), stmt.input, nil)
if stmt.hasErr {
- require.EqualError(t, err, errNoKeyspace.Error(), "expect query to fail")
+ require.EqualError(t, err, econtext.ErrNoKeyspace.Error(), "expect query to fail")
testQueryLog(t, executor, logChan, "TestExecute", "", stmt.input, 0)
} else {
require.NoError(t, err)
@@ -1297,13 +1299,13 @@ func TestExecutorDDLFk(t *testing.T) {
}
for _, stmt := range stmts {
- for _, fkMode := range []string{"allow", "disallow"} {
- t.Run(stmt+fkMode, func(t *testing.T) {
+ for _, mode := range []string{"allow", "disallow"} {
+ t.Run(stmt+mode, func(t *testing.T) {
executor, _, _, sbc, ctx := createExecutorEnv(t)
sbc.ExecCount.Store(0)
- foreignKeyMode = fkMode
- _, err := executor.Execute(ctx, nil, mName, NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded}), stmt, nil)
- if fkMode == "allow" {
+ executor.vConfig.ForeignKeyMode = fkMode(mode)
+ _, err := executor.Execute(ctx, nil, mName, econtext.NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded}), stmt, nil)
+ if mode == "allow" {
require.NoError(t, err)
require.EqualValues(t, 1, sbc.ExecCount.Load())
} else {
@@ -1316,13 +1318,13 @@ func TestExecutorDDLFk(t *testing.T) {
}
func TestExecutorAlterVSchemaKeyspace(t *testing.T) {
- vschemaacl.AuthorizedDDLUsers = "%"
+ vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("%"))
defer func() {
- vschemaacl.AuthorizedDDLUsers = ""
+ vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers(""))
}()
executor, _, _, _, ctx := createExecutorEnv(t)
- session := NewSafeSession(&vtgatepb.Session{TargetString: "@primary", Autocommit: true})
+ session := econtext.NewSafeSession(&vtgatepb.Session{TargetString: "@primary", Autocommit: true})
vschemaUpdates := make(chan *vschemapb.SrvVSchema, 2)
executor.serv.WatchSrvVSchema(ctx, executor.cell, func(vschema *vschemapb.SrvVSchema, err error) bool {
@@ -1345,9 +1347,9 @@ func TestExecutorAlterVSchemaKeyspace(t *testing.T) {
}
func TestExecutorCreateVindexDDL(t *testing.T) {
- vschemaacl.AuthorizedDDLUsers = "%"
+ vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("%"))
defer func() {
- vschemaacl.AuthorizedDDLUsers = ""
+ vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers(""))
}()
executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t)
ks := "TestExecutor"
@@ -1364,7 +1366,7 @@ func TestExecutorCreateVindexDDL(t *testing.T) {
t.Fatalf("test_vindex should not exist in original vschema")
}
- session := NewSafeSession(&vtgatepb.Session{TargetString: ks})
+ session := econtext.NewSafeSession(&vtgatepb.Session{TargetString: ks})
stmt := "alter vschema create vindex test_vindex using hash"
_, err := executor.Execute(ctx, nil, "TestExecute", session, stmt, nil)
require.NoError(t, err)
@@ -1388,7 +1390,7 @@ func TestExecutorCreateVindexDDL(t *testing.T) {
// Create a new vschema keyspace implicitly by creating a vindex with a different
// target in the session
// ksNew := "test_new_keyspace"
- session = NewSafeSession(&vtgatepb.Session{TargetString: ks})
+ session = econtext.NewSafeSession(&vtgatepb.Session{TargetString: ks})
stmt = "alter vschema create vindex test_vindex2 using hash"
_, err = executor.Execute(ctx, nil, "TestExecute", session, stmt, nil)
if err != nil {
@@ -1415,9 +1417,9 @@ func TestExecutorCreateVindexDDL(t *testing.T) {
}
func TestExecutorAddDropVschemaTableDDL(t *testing.T) {
- vschemaacl.AuthorizedDDLUsers = "%"
+ vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("%"))
defer func() {
- vschemaacl.AuthorizedDDLUsers = ""
+ vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers(""))
}()
executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t)
ks := KsTestUnsharded
@@ -1439,7 +1441,7 @@ func TestExecutorAddDropVschemaTableDDL(t *testing.T) {
vschemaTables = append(vschemaTables, t)
}
- session := NewSafeSession(&vtgatepb.Session{TargetString: ks})
+ session := econtext.NewSafeSession(&vtgatepb.Session{TargetString: ks})
stmt := "alter vschema add table test_table"
_, err := executor.Execute(ctx, nil, "TestExecute", session, stmt, nil)
require.NoError(t, err)
@@ -1451,7 +1453,7 @@ func TestExecutorAddDropVschemaTableDDL(t *testing.T) {
_ = waitForVschemaTables(t, ks, append([]string{"test_table", "test_table2"}, vschemaTables...), executor)
// Should fail adding a table on a sharded keyspace
- session = NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"})
+ session = econtext.NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"})
stmt = "alter vschema add table test_table"
_, err = executor.Execute(ctx, nil, "TestExecute", session, stmt, nil)
require.EqualError(t, err, "add vschema table: unsupported on sharded keyspace TestExecutor")
@@ -1470,7 +1472,7 @@ func TestExecutorVindexDDLACL(t *testing.T) {
executor, _, _, _, ctx := createExecutorEnv(t)
ks := "TestExecutor"
- session := NewSafeSession(&vtgatepb.Session{TargetString: ks})
+ session := econtext.NewSafeSession(&vtgatepb.Session{TargetString: ks})
ctxRedUser := callerid.NewContext(ctx, &vtrpcpb.CallerID{}, &querypb.VTGateCallerID{Username: "redUser"})
ctxBlueUser := callerid.NewContext(ctx, &vtrpcpb.CallerID{}, &querypb.VTGateCallerID{Username: "blueUser"})
@@ -1484,8 +1486,7 @@ func TestExecutorVindexDDLACL(t *testing.T) {
require.EqualError(t, err, `User 'blueUser' is not authorized to perform vschema operations`)
// test when all users are enabled
- vschemaacl.AuthorizedDDLUsers = "%"
- vschemaacl.Init()
+ vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("%"))
_, err = executor.Execute(ctxRedUser, nil, "TestExecute", session, stmt, nil)
if err != nil {
t.Errorf("unexpected error '%v'", err)
@@ -1497,8 +1498,7 @@ func TestExecutorVindexDDLACL(t *testing.T) {
}
// test when only one user is enabled
- vschemaacl.AuthorizedDDLUsers = "orangeUser, blueUser, greenUser"
- vschemaacl.Init()
+ vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("orangeUser, blueUser, greenUser"))
_, err = executor.Execute(ctxRedUser, nil, "TestExecute", session, stmt, nil)
require.EqualError(t, err, `User 'redUser' is not authorized to perform vschema operations`)
@@ -1509,13 +1509,13 @@ func TestExecutorVindexDDLACL(t *testing.T) {
}
// restore the disallowed state
- vschemaacl.AuthorizedDDLUsers = ""
+ vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers(""))
}
func TestExecutorUnrecognized(t *testing.T) {
executor, _, _, _, ctx := createExecutorEnv(t)
- _, err := executor.Execute(ctx, nil, "TestExecute", NewSafeSession(&vtgatepb.Session{}), "invalid statement", nil)
+ _, err := executor.Execute(ctx, nil, "TestExecute", econtext.NewSafeSession(&vtgatepb.Session{}), "invalid statement", nil)
require.Error(t, err, "unrecognized statement: invalid statement'")
}
@@ -1525,7 +1525,7 @@ func TestExecutorDeniedErrorNoBuffer(t *testing.T) {
vschemaWaitTimeout = 500 * time.Millisecond
- session := NewAutocommitSession(&vtgatepb.Session{TargetString: "@primary"})
+ session := econtext.NewAutocommitSession(&vtgatepb.Session{TargetString: "@primary"})
startExec := time.Now()
_, err := executor.Execute(ctx, nil, "TestExecutorDeniedErrorNoBuffer", session, "select * from user", nil)
require.NoError(t, err, "enforce denied tables not buffered")
@@ -1559,9 +1559,8 @@ var pv = querypb.ExecuteOptions_Gen4
func TestGetPlanUnnormalized(t *testing.T) {
r, _, _, _, ctx := createExecutorEnv(t)
-
- emptyvc, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv)
- unshardedvc, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv)
+ emptyvc, _ := econtext.NewVCursorImpl(econtext.NewSafeSession(&vtgatepb.Session{TargetString: "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, nullResultsObserver{}, econtext.VCursorConfig{})
+ unshardedvc, _ := econtext.NewVCursorImpl(econtext.NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, nullResultsObserver{}, econtext.VCursorConfig{})
query1 := "select * from music_user_map where id = 1"
plan1, logStats1 := getPlanCached(t, ctx, r, emptyvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false)
@@ -1604,7 +1603,7 @@ func assertCacheSize(t *testing.T, c *PlanCache, expected int) {
}
}
-func assertCacheContains(t *testing.T, e *Executor, vc *vcursorImpl, sql string) *engine.Plan {
+func assertCacheContains(t *testing.T, e *Executor, vc *econtext.VCursorImpl, sql string) *engine.Plan {
t.Helper()
var plan *engine.Plan
@@ -1623,9 +1622,9 @@ func assertCacheContains(t *testing.T, e *Executor, vc *vcursorImpl, sql string)
return plan
}
-func getPlanCached(t *testing.T, ctx context.Context, e *Executor, vcursor *vcursorImpl, sql string, comments sqlparser.MarginComments, bindVars map[string]*querypb.BindVariable, skipQueryPlanCache bool) (*engine.Plan, *logstats.LogStats) {
+func getPlanCached(t *testing.T, ctx context.Context, e *Executor, vcursor *econtext.VCursorImpl, sql string, comments sqlparser.MarginComments, bindVars map[string]*querypb.BindVariable, skipQueryPlanCache bool) (*engine.Plan, *logstats.LogStats) {
logStats := logstats.NewLogStats(ctx, "Test", "", "", nil)
- vcursor.safeSession = &SafeSession{
+ vcursor.SafeSession = &econtext.SafeSession{
Session: &vtgatepb.Session{
Options: &querypb.ExecuteOptions{SkipQueryPlanCache: skipQueryPlanCache}},
}
@@ -1644,7 +1643,7 @@ func TestGetPlanCacheUnnormalized(t *testing.T) {
t.Run("Cache", func(t *testing.T) {
r, _, _, _, ctx := createExecutorEnv(t)
- emptyvc, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv)
+ emptyvc, _ := econtext.NewVCursorImpl(econtext.NewSafeSession(&vtgatepb.Session{TargetString: "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, nullResultsObserver{}, econtext.VCursorConfig{})
query1 := "select * from music_user_map where id = 1"
_, logStats1 := getPlanCached(t, ctx, r, emptyvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, true)
@@ -1668,7 +1667,7 @@ func TestGetPlanCacheUnnormalized(t *testing.T) {
// Skip cache using directive
r, _, _, _, ctx := createExecutorEnv(t)
- unshardedvc, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv)
+ unshardedvc, _ := econtext.NewVCursorImpl(econtext.NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, nullResultsObserver{}, r.vConfig)
query1 := "insert /*vt+ SKIP_QUERY_PLAN_CACHE=1 */ into user(id) values (1), (2)"
getPlanCached(t, ctx, r, unshardedvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false)
@@ -1679,12 +1678,12 @@ func TestGetPlanCacheUnnormalized(t *testing.T) {
assertCacheSize(t, r.plans, 1)
// the target string will be resolved and become part of the plan cache key, which adds a new entry
- ksIDVc1, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "[deadbeef]"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv)
+ ksIDVc1, _ := econtext.NewVCursorImpl(econtext.NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "[deadbeef]"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, nullResultsObserver{}, r.vConfig)
getPlanCached(t, ctx, r, ksIDVc1, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false)
assertCacheSize(t, r.plans, 2)
// the target string will be resolved and become part of the plan cache key, as it's an unsharded ks, it will be the same entry as above
- ksIDVc2, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "[beefdead]"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv)
+ ksIDVc2, _ := econtext.NewVCursorImpl(econtext.NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "[beefdead]"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, nullResultsObserver{}, r.vConfig)
getPlanCached(t, ctx, r, ksIDVc2, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false)
assertCacheSize(t, r.plans, 2)
})
@@ -1694,7 +1693,7 @@ func TestGetPlanCacheNormalized(t *testing.T) {
t.Run("Cache", func(t *testing.T) {
r, _, _, _, ctx := createExecutorEnv(t)
r.normalize = true
- emptyvc, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv)
+ emptyvc, _ := econtext.NewVCursorImpl(econtext.NewSafeSession(&vtgatepb.Session{TargetString: "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, nullResultsObserver{}, r.vConfig)
query1 := "select * from music_user_map where id = 1"
_, logStats1 := getPlanCached(t, ctx, r, emptyvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, true /* skipQueryPlanCache */)
@@ -1711,7 +1710,7 @@ func TestGetPlanCacheNormalized(t *testing.T) {
// Skip cache using directive
r, _, _, _, ctx := createExecutorEnv(t)
r.normalize = true
- unshardedvc, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv)
+ unshardedvc, _ := econtext.NewVCursorImpl(econtext.NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, nullResultsObserver{}, r.vConfig)
query1 := "insert /*vt+ SKIP_QUERY_PLAN_CACHE=1 */ into user(id) values (1), (2)"
getPlanCached(t, ctx, r, unshardedvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false)
@@ -1722,12 +1721,12 @@ func TestGetPlanCacheNormalized(t *testing.T) {
assertCacheSize(t, r.plans, 1)
// the target string will be resolved and become part of the plan cache key, which adds a new entry
- ksIDVc1, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "[deadbeef]"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv)
+ ksIDVc1, _ := econtext.NewVCursorImpl(econtext.NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "[deadbeef]"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, nullResultsObserver{}, r.vConfig)
getPlanCached(t, ctx, r, ksIDVc1, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false)
assertCacheSize(t, r.plans, 2)
// the target string will be resolved and become part of the plan cache key, as it's an unsharded ks, it will be the same entry as above
- ksIDVc2, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "[beefdead]"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv)
+ ksIDVc2, _ := econtext.NewVCursorImpl(econtext.NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "[beefdead]"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, nullResultsObserver{}, r.vConfig)
getPlanCached(t, ctx, r, ksIDVc2, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false)
assertCacheSize(t, r.plans, 2)
})
@@ -1737,8 +1736,8 @@ func TestGetPlanNormalized(t *testing.T) {
r, _, _, _, ctx := createExecutorEnv(t)
r.normalize = true
- emptyvc, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv)
- unshardedvc, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv)
+ emptyvc, _ := econtext.NewVCursorImpl(econtext.NewSafeSession(&vtgatepb.Session{TargetString: "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, nullResultsObserver{}, econtext.VCursorConfig{})
+ unshardedvc, _ := econtext.NewVCursorImpl(econtext.NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, nullResultsObserver{}, econtext.VCursorConfig{})
query1 := "select * from music_user_map where id = 1"
query2 := "select * from music_user_map where id = 2"
@@ -1785,7 +1784,7 @@ func TestGetPlanPriority(t *testing.T) {
{name: "empty priority", sql: "select * from music_user_map", expectedPriority: "", expectedError: nil},
}
- session := NewSafeSession(&vtgatepb.Session{TargetString: "@unknown", Options: &querypb.ExecuteOptions{}})
+ session := econtext.NewSafeSession(&vtgatepb.Session{TargetString: "@unknown", Options: &querypb.ExecuteOptions{}})
for _, aTestCase := range testCases {
testCase := aTestCase
@@ -1795,7 +1794,7 @@ func TestGetPlanPriority(t *testing.T) {
r.normalize = true
logStats := logstats.NewLogStats(ctx, "Test", "", "", nil)
- vCursor, err := newVCursorImpl(session, makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv)
+ vCursor, err := econtext.NewVCursorImpl(session, makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, nullResultsObserver{}, econtext.VCursorConfig{})
assert.NoError(t, err)
stmt, err := sqlparser.NewTestParser().Parse(testCase.sql)
@@ -1809,7 +1808,7 @@ func TestGetPlanPriority(t *testing.T) {
} else {
assert.NoError(t, err)
assert.Equal(t, testCase.expectedPriority, priorityFromStatement)
- assert.Equal(t, testCase.expectedPriority, vCursor.safeSession.Options.Priority)
+ assert.Equal(t, testCase.expectedPriority, vCursor.SafeSession.Options.Priority)
}
})
}
@@ -1966,7 +1965,7 @@ func TestExecutorMaxPayloadSizeExceeded(t *testing.T) {
executor, _, _, _, _ := createExecutorEnv(t)
- session := NewSafeSession(&vtgatepb.Session{TargetString: "@primary"})
+ session := econtext.NewSafeSession(&vtgatepb.Session{TargetString: "@primary"})
warningCount := warnings.Counts()["WarnPayloadSizeExceeded"]
testMaxPayloadSizeExceeded := []string{
"select * from main1",
@@ -2014,7 +2013,7 @@ func TestOlapSelectDatabase(t *testing.T) {
cbInvoked = true
return nil
}
- err := executor.StreamExecute(context.Background(), nil, "TestExecute", NewSafeSession(session), sql, nil, cb)
+ err := executor.StreamExecute(context.Background(), nil, "TestExecute", econtext.NewSafeSession(session), sql, nil, cb)
assert.NoError(t, err)
assert.True(t, cbInvoked)
}
@@ -2022,7 +2021,7 @@ func TestOlapSelectDatabase(t *testing.T) {
func TestExecutorClearsWarnings(t *testing.T) {
executor, _, _, _, _ := createExecutorEnv(t)
- session := NewSafeSession(&vtgatepb.Session{
+ session := econtext.NewSafeSession(&vtgatepb.Session{
Warnings: []*querypb.QueryWarning{{Code: 234, Message: "oh noes"}},
})
_, err := executor.Execute(context.Background(), nil, "TestExecute", session, "select 42", nil)
@@ -2039,7 +2038,6 @@ func TestServingKeyspaces(t *testing.T) {
executor, sbc1, _, sbclookup, ctx := createExecutorEnv(t)
- executor.pv = querypb.ExecuteOptions_Gen4
gw, ok := executor.resolver.resolver.GetGateway().(*TabletGateway)
require.True(t, ok)
hc := gw.hc.(*discovery.FakeHealthCheck)
@@ -2058,7 +2056,7 @@ func TestServingKeyspaces(t *testing.T) {
})
require.ElementsMatch(t, []string{"TestExecutor", "TestUnsharded"}, gw.GetServingKeyspaces())
- result, err := executor.Execute(ctx, nil, "TestServingKeyspaces", NewSafeSession(&vtgatepb.Session{}), "select keyspace_name from dual", nil)
+ result, err := executor.Execute(ctx, nil, "TestServingKeyspaces", econtext.NewSafeSession(&vtgatepb.Session{}), "select keyspace_name from dual", nil)
require.NoError(t, err)
require.Equal(t, `[[VARCHAR("TestExecutor")]]`, fmt.Sprintf("%v", result.Rows))
@@ -2074,7 +2072,7 @@ func TestServingKeyspaces(t *testing.T) {
// Clear plan cache, to force re-planning of the query.
executor.ClearPlans()
require.ElementsMatch(t, []string{"TestUnsharded"}, gw.GetServingKeyspaces())
- result, err = executor.Execute(ctx, nil, "TestServingKeyspaces", NewSafeSession(&vtgatepb.Session{}), "select keyspace_name from dual", nil)
+ result, err = executor.Execute(ctx, nil, "TestServingKeyspaces", econtext.NewSafeSession(&vtgatepb.Session{}), "select keyspace_name from dual", nil)
require.NoError(t, err)
require.Equal(t, `[[VARCHAR("TestUnsharded")]]`, fmt.Sprintf("%v", result.Rows))
}
@@ -2150,9 +2148,9 @@ func TestExecutorOther(t *testing.T) {
sbc2.ExecCount.Store(0)
sbclookup.ExecCount.Store(0)
- _, err := executor.Execute(ctx, nil, "TestExecute", NewSafeSession(&vtgatepb.Session{TargetString: tc.targetStr}), stmt, nil)
+ _, err := executor.Execute(ctx, nil, "TestExecute", econtext.NewSafeSession(&vtgatepb.Session{TargetString: tc.targetStr}), stmt, nil)
if tc.hasNoKeyspaceErr {
- assert.Error(t, err, errNoKeyspace)
+ assert.Error(t, err, econtext.ErrNoKeyspace.Error())
} else if tc.hasDestinationShardErr {
assert.Errorf(t, err, "Destination can only be a single shard for statement: %s", stmt)
} else {
@@ -2206,7 +2204,7 @@ func TestExecutorAnalyze(t *testing.T) {
sbc2.ExecCount.Store(0)
sbclookup.ExecCount.Store(0)
- _, err := executor.Execute(context.Background(), nil, "TestExecute", NewSafeSession(&vtgatepb.Session{TargetString: tc.targetStr}), stmt, nil)
+ _, err := executor.Execute(context.Background(), nil, "TestExecute", econtext.NewSafeSession(&vtgatepb.Session{TargetString: tc.targetStr}), stmt, nil)
require.NoError(t, err)
utils.MustMatch(t, tc.wantCnts, cnts{
@@ -2270,7 +2268,7 @@ func TestExecutorExplainStmt(t *testing.T) {
sbc2.ExecCount.Store(0)
sbclookup.ExecCount.Store(0)
- _, err := executor.Execute(ctx, nil, "TestExecute", NewSafeSession(&vtgatepb.Session{TargetString: tc.targetStr}), stmt, nil)
+ _, err := executor.Execute(ctx, nil, "TestExecute", econtext.NewSafeSession(&vtgatepb.Session{TargetString: tc.targetStr}), stmt, nil)
assert.NoError(t, err)
utils.MustMatch(t, tc.wantCnts, cnts{
@@ -2360,9 +2358,9 @@ func TestExecutorOtherAdmin(t *testing.T) {
sbc2.ExecCount.Store(0)
sbclookup.ExecCount.Store(0)
- _, err := executor.Execute(context.Background(), nil, "TestExecute", NewSafeSession(&vtgatepb.Session{TargetString: tc.targetStr}), stmt, nil)
+ _, err := executor.Execute(context.Background(), nil, "TestExecute", econtext.NewSafeSession(&vtgatepb.Session{TargetString: tc.targetStr}), stmt, nil)
if tc.hasNoKeyspaceErr {
- assert.Error(t, err, errNoKeyspace)
+ assert.Error(t, err, econtext.ErrNoKeyspace.Error())
} else if tc.hasDestinationShardErr {
assert.Errorf(t, err, "Destination can only be a single shard for statement: %s, got: DestinationExactKeyRange(-)", stmt)
} else {
@@ -2387,7 +2385,7 @@ func TestExecutorSavepointInTx(t *testing.T) {
logChan := executor.queryLogger.Subscribe("TestExecutorSavepoint")
defer executor.queryLogger.Unsubscribe(logChan)
- session := NewSafeSession(&vtgatepb.Session{Autocommit: false, TargetString: "@primary"})
+ session := econtext.NewSafeSession(&vtgatepb.Session{Autocommit: false, TargetString: "@primary"})
_, err := exec(executor, session, "savepoint a")
require.NoError(t, err)
_, err = exec(executor, session, "rollback to a")
@@ -2470,7 +2468,7 @@ func TestExecutorSavepointInTxWithReservedConn(t *testing.T) {
logChan := executor.queryLogger.Subscribe("TestExecutorSavepoint")
defer executor.queryLogger.Unsubscribe(logChan)
- session := NewSafeSession(&vtgatepb.Session{Autocommit: true, TargetString: "TestExecutor", EnableSystemSettings: true})
+ session := econtext.NewSafeSession(&vtgatepb.Session{Autocommit: true, TargetString: "TestExecutor", EnableSystemSettings: true})
sbc1.SetResults([]*sqltypes.Result{
sqltypes.MakeTestResult(sqltypes.MakeTestFields("orig|new", "varchar|varchar"), "a|"),
})
@@ -2537,7 +2535,7 @@ func TestExecutorSavepointWithoutTx(t *testing.T) {
logChan := executor.queryLogger.Subscribe("TestExecutorSavepoint")
defer executor.queryLogger.Unsubscribe(logChan)
- session := NewSafeSession(&vtgatepb.Session{Autocommit: true, TargetString: "@primary", InTransaction: false})
+ session := econtext.NewSafeSession(&vtgatepb.Session{Autocommit: true, TargetString: "@primary", InTransaction: false})
_, err := exec(executor, session, "savepoint a")
require.NoError(t, err)
_, err = exec(executor, session, "rollback to a")
@@ -2622,9 +2620,9 @@ func TestExecutorCallProc(t *testing.T) {
sbc2.ExecCount.Store(0)
sbcUnsharded.ExecCount.Store(0)
- _, err := executor.Execute(context.Background(), nil, "TestExecute", NewSafeSession(&vtgatepb.Session{TargetString: tc.targetStr}), "CALL proc()", nil)
+ _, err := executor.Execute(context.Background(), nil, "TestExecute", econtext.NewSafeSession(&vtgatepb.Session{TargetString: tc.targetStr}), "CALL proc()", nil)
if tc.hasNoKeyspaceErr {
- assert.EqualError(t, err, errNoKeyspace.Error())
+ assert.EqualError(t, err, econtext.ErrNoKeyspace.Error())
} else if tc.unshardedOnlyErr {
require.EqualError(t, err, "CALL is not supported for sharded keyspace")
} else {
@@ -2644,9 +2642,9 @@ func TestExecutorTempTable(t *testing.T) {
executor, _, _, sbcUnsharded, ctx := createExecutorEnv(t)
initialWarningsCount := warnings.Counts()["WarnUnshardedOnly"]
- executor.warnShardedOnly = true
+ executor.vConfig.WarnShardedOnly = true
creatQuery := "create temporary table temp_t(id bigint primary key)"
- session := NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded})
+ session := econtext.NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded})
_, err := executor.Execute(ctx, nil, "TestExecutorTempTable", session, creatQuery, nil)
require.NoError(t, err)
assert.EqualValues(t, 1, sbcUnsharded.ExecCount.Load())
@@ -2665,7 +2663,7 @@ func TestExecutorShowVitessMigrations(t *testing.T) {
executor, sbc1, sbc2, _, ctx := createExecutorEnv(t)
showQuery := "show vitess_migrations"
- session := NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"})
+ session := econtext.NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"})
_, err := executor.Execute(ctx, nil, "", session, showQuery, nil)
require.NoError(t, err)
assert.Contains(t, sbc1.StringQueries(), "show vitess_migrations")
@@ -2676,7 +2674,7 @@ func TestExecutorDescHash(t *testing.T) {
executor, _, _, _, ctx := createExecutorEnv(t)
showQuery := "desc hash_index"
- session := NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"})
+ session := econtext.NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"})
_, err := executor.Execute(ctx, nil, "", session, showQuery, nil)
require.NoError(t, err)
}
@@ -2684,7 +2682,7 @@ func TestExecutorDescHash(t *testing.T) {
func TestExecutorVExplainQueries(t *testing.T) {
executor, _, _, sbclookup, ctx := createExecutorEnv(t)
- session := NewAutocommitSession(&vtgatepb.Session{})
+ session := econtext.NewAutocommitSession(&vtgatepb.Session{})
sbclookup.SetResults([]*sqltypes.Result{
sqltypes.MakeTestResult(sqltypes.MakeTestFields("name|user_id", "varchar|int64"), "apa|1", "apa|2"),
@@ -2697,7 +2695,7 @@ func TestExecutorVExplainQueries(t *testing.T) {
// Test the streaming side as well
var results []sqltypes.Row
- session = NewAutocommitSession(&vtgatepb.Session{})
+ session = econtext.NewAutocommitSession(&vtgatepb.Session{})
err = executor.StreamExecute(ctx, nil, "TestExecutorVExplainQueries", session, "vexplain queries select * from user where name = 'apa'", nil, func(result *sqltypes.Result) error {
results = append(results, result.Rows...)
return nil
@@ -2710,7 +2708,7 @@ func TestExecutorVExplainQueries(t *testing.T) {
func TestExecutorStartTxnStmt(t *testing.T) {
executor, _, _, _, ctx := createExecutorEnv(t)
- session := NewAutocommitSession(&vtgatepb.Session{})
+ session := econtext.NewAutocommitSession(&vtgatepb.Session{})
tcases := []struct {
beginSQL string
@@ -2757,7 +2755,7 @@ func TestExecutorPrepareExecute(t *testing.T) {
executor, _, _, _, _ := createExecutorEnv(t)
executor.normalize = true
- session := NewAutocommitSession(&vtgatepb.Session{})
+ session := econtext.NewAutocommitSession(&vtgatepb.Session{})
// prepare statement.
_, err := executor.Execute(context.Background(), nil, "TestExecutorPrepareExecute", session, "prepare prep_user from 'select * from user where id = ?'", nil)
@@ -2834,7 +2832,7 @@ func TestExecutorSettingsInTwoPC(t *testing.T) {
sbc2.SetResults(tcase.testRes)
// create a new session
- session := NewSafeSession(&vtgatepb.Session{
+ session := econtext.NewSafeSession(&vtgatepb.Session{
TargetString: KsTestSharded,
TransactionMode: vtgatepb.TransactionMode_TWOPC,
EnableSystemSettings: true,
@@ -2892,7 +2890,7 @@ func TestExecutorRejectTwoPC(t *testing.T) {
sbc2.SetResults(tcase.testRes)
// create a new session
- session := NewSafeSession(&vtgatepb.Session{
+ session := econtext.NewSafeSession(&vtgatepb.Session{
TargetString: KsTestSharded,
TransactionMode: vtgatepb.TransactionMode_TWOPC,
EnableSystemSettings: true,
@@ -2922,7 +2920,7 @@ func TestExecutorTruncateErrors(t *testing.T) {
truncateErrorLen = 32
defer func() { truncateErrorLen = save }()
- session := NewSafeSession(&vtgatepb.Session{})
+ session := econtext.NewSafeSession(&vtgatepb.Session{})
fn := func(r *sqltypes.Result) error {
return nil
}
@@ -2982,7 +2980,7 @@ func TestExecutorFlushStmt(t *testing.T) {
for _, tc := range tcs {
t.Run(tc.query+tc.targetStr, func(t *testing.T) {
- _, err := executor.Execute(context.Background(), nil, "TestExecutorFlushStmt", NewSafeSession(&vtgatepb.Session{TargetString: tc.targetStr}), tc.query, nil)
+ _, err := executor.Execute(context.Background(), nil, "TestExecutorFlushStmt", econtext.NewSafeSession(&vtgatepb.Session{TargetString: tc.targetStr}), tc.query, nil)
if tc.expectedErr == "" {
require.NoError(t, err)
} else {
@@ -3029,7 +3027,7 @@ func TestExecutorKillStmt(t *testing.T) {
allowKillStmt = !tc.disallow
t.Run("execute:"+tc.query+tc.errStr, func(t *testing.T) {
mysqlCtx := &fakeMysqlConnection{ErrMsg: tc.errStr}
- _, err := executor.Execute(context.Background(), mysqlCtx, "TestExecutorKillStmt", NewAutocommitSession(&vtgatepb.Session{}), tc.query, nil)
+ _, err := executor.Execute(context.Background(), mysqlCtx, "TestExecutorKillStmt", econtext.NewAutocommitSession(&vtgatepb.Session{}), tc.query, nil)
if tc.errStr != "" {
require.ErrorContains(t, err, tc.errStr)
} else {
@@ -3039,7 +3037,7 @@ func TestExecutorKillStmt(t *testing.T) {
})
t.Run("stream:"+tc.query+tc.errStr, func(t *testing.T) {
mysqlCtx := &fakeMysqlConnection{ErrMsg: tc.errStr}
- err := executor.StreamExecute(context.Background(), mysqlCtx, "TestExecutorKillStmt", NewAutocommitSession(&vtgatepb.Session{}), tc.query, nil, func(result *sqltypes.Result) error {
+ err := executor.StreamExecute(context.Background(), mysqlCtx, "TestExecutorKillStmt", econtext.NewAutocommitSession(&vtgatepb.Session{}), tc.query, nil, func(result *sqltypes.Result) error {
return nil
})
if tc.errStr != "" {
@@ -3075,7 +3073,7 @@ func (f *fakeMysqlConnection) KillConnection(ctx context.Context, connID uint32)
var _ vtgateservice.MySQLConnection = (*fakeMysqlConnection)(nil)
-func exec(executor *Executor, session *SafeSession, sql string) (*sqltypes.Result, error) {
+func exec(executor *Executor, session *econtext.SafeSession, sql string) (*sqltypes.Result, error) {
return executor.Execute(context.Background(), nil, "TestExecute", session, sql, nil)
}
diff --git a/go/vt/vtgate/executor_vexplain_test.go b/go/vt/vtgate/executor_vexplain_test.go
index 99eb77c7ed4..a9516492f1b 100644
--- a/go/vt/vtgate/executor_vexplain_test.go
+++ b/go/vt/vtgate/executor_vexplain_test.go
@@ -26,6 +26,8 @@ import (
"github.com/stretchr/testify/assert"
+ econtext "vitess.io/vitess/go/vt/vtgate/executorcontext"
+
"github.com/stretchr/testify/require"
"vitess.io/vitess/go/sqltypes"
@@ -135,7 +137,7 @@ func TestVExplainKeys(t *testing.T) {
for _, tt := range tests {
t.Run(tt.Query, func(t *testing.T) {
executor, _, _, _, _ := createExecutorEnv(t)
- session := NewSafeSession(&vtgatepb.Session{TargetString: "@primary"})
+ session := econtext.NewSafeSession(&vtgatepb.Session{TargetString: "@primary"})
gotResult, err := executor.Execute(context.Background(), nil, "Execute", session, "vexplain keys "+tt.Query, nil)
require.NoError(t, err)
diff --git a/go/vt/vtgate/executor_vschema_ddl_test.go b/go/vt/vtgate/executor_vschema_ddl_test.go
index 1c912ed0d62..1acc1ba2362 100644
--- a/go/vt/vtgate/executor_vschema_ddl_test.go
+++ b/go/vt/vtgate/executor_vschema_ddl_test.go
@@ -25,6 +25,8 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ econtext "vitess.io/vitess/go/vt/vtgate/executorcontext"
+
"vitess.io/vitess/go/sqltypes"
"vitess.io/vitess/go/test/utils"
"vitess.io/vitess/go/vt/callerid"
@@ -133,12 +135,12 @@ func waitForColVindexes(t *testing.T, ks, table string, names []string, executor
}
func TestPlanExecutorAlterVSchemaKeyspace(t *testing.T) {
- vschemaacl.AuthorizedDDLUsers = "%"
+ vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("%"))
defer func() {
- vschemaacl.AuthorizedDDLUsers = ""
+ vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers(""))
}()
executor, _, _, _, ctx := createExecutorEnv(t)
- session := NewSafeSession(&vtgatepb.Session{TargetString: "@primary", Autocommit: true})
+ session := econtext.NewSafeSession(&vtgatepb.Session{TargetString: "@primary", Autocommit: true})
vschemaUpdates := make(chan *vschemapb.SrvVSchema, 2)
executor.serv.WatchSrvVSchema(ctx, "aa", func(vschema *vschemapb.SrvVSchema, err error) bool {
@@ -161,9 +163,9 @@ func TestPlanExecutorAlterVSchemaKeyspace(t *testing.T) {
}
func TestPlanExecutorCreateVindexDDL(t *testing.T) {
- vschemaacl.AuthorizedDDLUsers = "%"
+ vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("%"))
defer func() {
- vschemaacl.AuthorizedDDLUsers = ""
+ vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers(""))
}()
executor, _, _, _, ctx := createExecutorEnv(t)
ks := "TestExecutor"
@@ -180,7 +182,7 @@ func TestPlanExecutorCreateVindexDDL(t *testing.T) {
t.Fatalf("test_vindex should not exist in original vschema")
}
- session := NewSafeSession(&vtgatepb.Session{TargetString: ks})
+ session := econtext.NewSafeSession(&vtgatepb.Session{TargetString: ks})
stmt := "alter vschema create vindex test_vindex using hash"
_, err := executor.Execute(ctx, nil, "TestExecute", session, stmt, nil)
require.NoError(t, err)
@@ -203,9 +205,9 @@ func TestPlanExecutorCreateVindexDDL(t *testing.T) {
}
func TestPlanExecutorDropVindexDDL(t *testing.T) {
- vschemaacl.AuthorizedDDLUsers = "%"
+ vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("%"))
defer func() {
- vschemaacl.AuthorizedDDLUsers = ""
+ vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers(""))
}()
executor, _, _, _, ctx := createExecutorEnv(t)
ks := "TestExecutor"
@@ -222,7 +224,7 @@ func TestPlanExecutorDropVindexDDL(t *testing.T) {
t.Fatalf("test_vindex should not exist in original vschema")
}
- session := NewSafeSession(&vtgatepb.Session{TargetString: ks})
+ session := econtext.NewSafeSession(&vtgatepb.Session{TargetString: ks})
stmt := "alter vschema drop vindex test_vindex"
_, err := executor.Execute(ctx, nil, "TestExecute", session, stmt, nil)
wantErr := "vindex test_vindex does not exists in keyspace TestExecutor"
@@ -272,9 +274,9 @@ func TestPlanExecutorDropVindexDDL(t *testing.T) {
}
func TestPlanExecutorAddDropVschemaTableDDL(t *testing.T) {
- vschemaacl.AuthorizedDDLUsers = "%"
+ vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("%"))
defer func() {
- vschemaacl.AuthorizedDDLUsers = ""
+ vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers(""))
}()
executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t)
ks := KsTestUnsharded
@@ -296,7 +298,7 @@ func TestPlanExecutorAddDropVschemaTableDDL(t *testing.T) {
vschemaTables = append(vschemaTables, t)
}
- session := NewSafeSession(&vtgatepb.Session{TargetString: ks})
+ session := econtext.NewSafeSession(&vtgatepb.Session{TargetString: ks})
stmt := "alter vschema add table test_table"
_, err := executor.Execute(ctx, nil, "TestExecute", session, stmt, nil)
require.NoError(t, err)
@@ -308,7 +310,7 @@ func TestPlanExecutorAddDropVschemaTableDDL(t *testing.T) {
_ = waitForVschemaTables(t, ks, append([]string{"test_table", "test_table2"}, vschemaTables...), executor)
// Should fail adding a table on a sharded keyspace
- session = NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"})
+ session = econtext.NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"})
stmt = "alter vschema add table test_table"
_, err = executor.Execute(ctx, nil, "TestExecute", session, stmt, nil)
wantErr := "add vschema table: unsupported on sharded keyspace TestExecutor"
@@ -329,9 +331,9 @@ func TestPlanExecutorAddDropVschemaTableDDL(t *testing.T) {
}
func TestExecutorAddSequenceDDL(t *testing.T) {
- vschemaacl.AuthorizedDDLUsers = "%"
+ vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("%"))
defer func() {
- vschemaacl.AuthorizedDDLUsers = ""
+ vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers(""))
}()
executor, _, _, _, ctx := createExecutorEnv(t)
ks := KsTestUnsharded
@@ -343,7 +345,7 @@ func TestExecutorAddSequenceDDL(t *testing.T) {
vschemaTables = append(vschemaTables, t)
}
- session := NewSafeSession(&vtgatepb.Session{TargetString: ks})
+ session := econtext.NewSafeSession(&vtgatepb.Session{TargetString: ks})
stmt := "alter vschema add sequence test_seq"
_, err := executor.Execute(ctx, nil, "TestExecute", session, stmt, nil)
require.NoError(t, err)
@@ -357,7 +359,7 @@ func TestExecutorAddSequenceDDL(t *testing.T) {
// Should fail adding a table on a sharded keyspace
ksSharded := "TestExecutor"
- session = NewSafeSession(&vtgatepb.Session{TargetString: ksSharded})
+ session = econtext.NewSafeSession(&vtgatepb.Session{TargetString: ksSharded})
stmt = "alter vschema add sequence sequence_table"
_, err = executor.Execute(ctx, nil, "TestExecute", session, stmt, nil)
@@ -389,9 +391,9 @@ func TestExecutorAddSequenceDDL(t *testing.T) {
}
func TestExecutorDropSequenceDDL(t *testing.T) {
- vschemaacl.AuthorizedDDLUsers = "%"
+ vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("%"))
defer func() {
- vschemaacl.AuthorizedDDLUsers = ""
+ vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers(""))
}()
executor, _, _, _, ctx := createExecutorEnv(t)
ks := KsTestUnsharded
@@ -403,7 +405,7 @@ func TestExecutorDropSequenceDDL(t *testing.T) {
t.Fatalf("test_seq should not exist in original vschema")
}
- session := NewSafeSession(&vtgatepb.Session{TargetString: ks})
+ session := econtext.NewSafeSession(&vtgatepb.Session{TargetString: ks})
// add test sequence
stmt := "alter vschema add sequence test_seq"
@@ -428,7 +430,7 @@ func TestExecutorDropSequenceDDL(t *testing.T) {
}
// Should fail dropping a non-existing test sequence
- session = NewSafeSession(&vtgatepb.Session{TargetString: ks})
+ session = econtext.NewSafeSession(&vtgatepb.Session{TargetString: ks})
stmt = "alter vschema drop sequence test_seq"
_, err = executor.Execute(ctx, nil, "TestExecute", session, stmt, nil)
@@ -440,14 +442,14 @@ func TestExecutorDropSequenceDDL(t *testing.T) {
}
func TestExecutorDropAutoIncDDL(t *testing.T) {
- vschemaacl.AuthorizedDDLUsers = "%"
+ vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("%"))
defer func() {
- vschemaacl.AuthorizedDDLUsers = ""
+ vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers(""))
}()
executor, _, _, _, ctx := createExecutorEnv(t)
ks := KsTestUnsharded
- session := NewSafeSession(&vtgatepb.Session{TargetString: ks})
+ session := econtext.NewSafeSession(&vtgatepb.Session{TargetString: ks})
stmt := "alter vschema add table test_table"
_, err := executor.Execute(ctx, nil, "TestExecute", session, stmt, nil)
@@ -482,13 +484,13 @@ func TestExecutorDropAutoIncDDL(t *testing.T) {
}
func TestExecutorAddDropVindexDDL(t *testing.T) {
- vschemaacl.AuthorizedDDLUsers = "%"
+ vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("%"))
defer func() {
- vschemaacl.AuthorizedDDLUsers = ""
+ vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers(""))
}()
executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t)
ks := "TestExecutor"
- session := NewSafeSession(&vtgatepb.Session{TargetString: ks})
+ session := econtext.NewSafeSession(&vtgatepb.Session{TargetString: ks})
vschemaUpdates := make(chan *vschemapb.SrvVSchema, 4)
executor.serv.WatchSrvVSchema(ctx, "aa", func(vschema *vschemapb.SrvVSchema, err error) bool {
vschemaUpdates <- vschema
@@ -706,7 +708,7 @@ func TestExecutorAddDropVindexDDL(t *testing.T) {
require.EqualError(t, err, "table TestExecutor.nonexistent not defined in vschema")
stmt = "alter vschema on nonexistent drop vindex test_lookup"
- _, err = executor.Execute(ctx, nil, "TestExecute", NewSafeSession(&vtgatepb.Session{TargetString: "InvalidKeyspace"}), stmt, nil)
+ _, err = executor.Execute(ctx, nil, "TestExecute", econtext.NewSafeSession(&vtgatepb.Session{TargetString: "InvalidKeyspace"}), stmt, nil)
require.EqualError(t, err, "VT05003: unknown database 'InvalidKeyspace' in vschema")
stmt = "alter vschema on nowhere.nohow drop vindex test_lookup"
@@ -731,7 +733,7 @@ func TestPlanExecutorVindexDDLACL(t *testing.T) {
// t.Skip("not yet planned")
executor, _, _, _, ctx := createExecutorEnv(t)
ks := "TestExecutor"
- session := NewSafeSession(&vtgatepb.Session{TargetString: ks})
+ session := econtext.NewSafeSession(&vtgatepb.Session{TargetString: ks})
ctxRedUser := callerid.NewContext(ctx, &vtrpcpb.CallerID{}, &querypb.VTGateCallerID{Username: "redUser"})
ctxBlueUser := callerid.NewContext(ctx, &vtrpcpb.CallerID{}, &querypb.VTGateCallerID{Username: "blueUser"})
@@ -745,8 +747,7 @@ func TestPlanExecutorVindexDDLACL(t *testing.T) {
require.EqualError(t, err, `User 'blueUser' is not authorized to perform vschema operations`)
// test when all users are enabled
- vschemaacl.AuthorizedDDLUsers = "%"
- vschemaacl.Init()
+ vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("%"))
_, err = executor.Execute(ctxRedUser, nil, "TestExecute", session, stmt, nil)
if err != nil {
t.Errorf("unexpected error '%v'", err)
@@ -758,8 +759,7 @@ func TestPlanExecutorVindexDDLACL(t *testing.T) {
}
// test when only one user is enabled
- vschemaacl.AuthorizedDDLUsers = "orangeUser, blueUser, greenUser"
- vschemaacl.Init()
+ vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("orangeUser, blueUser, greenUser"))
_, err = executor.Execute(ctxRedUser, nil, "TestExecute", session, stmt, nil)
require.EqualError(t, err, `User 'redUser' is not authorized to perform vschema operations`)
@@ -770,5 +770,5 @@ func TestPlanExecutorVindexDDLACL(t *testing.T) {
}
// restore the disallowed state
- vschemaacl.AuthorizedDDLUsers = ""
+ vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers(""))
}
diff --git a/go/vt/vtgate/executor_vstream_test.go b/go/vt/vtgate/executor_vstream_test.go
index 5466e9e8f3f..22fb7ee1034 100644
--- a/go/vt/vtgate/executor_vstream_test.go
+++ b/go/vt/vtgate/executor_vstream_test.go
@@ -21,6 +21,7 @@ import (
"time"
"vitess.io/vitess/go/vt/vtgate/engine"
+ econtext "vitess.io/vitess/go/vt/vtgate/executorcontext"
querypb "vitess.io/vitess/go/vt/proto/query"
@@ -76,7 +77,7 @@ func TestVStreamSQLUnsharded(t *testing.T) {
results := make(chan *sqltypes.Result, 20)
go func() {
- err := executor.StreamExecute(ctx, nil, "TestExecuteStream", NewAutocommitSession(&vtgatepb.Session{TargetString: KsTestUnsharded}), sql, nil, func(qr *sqltypes.Result) error {
+ err := executor.StreamExecute(ctx, nil, "TestExecuteStream", econtext.NewAutocommitSession(&vtgatepb.Session{TargetString: KsTestUnsharded}), sql, nil, func(qr *sqltypes.Result) error {
results <- qr
return nil
})
diff --git a/go/vt/vtgate/executorcontext/faketopo.go b/go/vt/vtgate/executorcontext/faketopo.go
new file mode 100644
index 00000000000..f61119dce15
--- /dev/null
+++ b/go/vt/vtgate/executorcontext/faketopo.go
@@ -0,0 +1,68 @@
+/*
+Copyright 2024 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package executorcontext
+
+import (
+ "context"
+ "encoding/hex"
+
+ topodatapb "vitess.io/vitess/go/vt/proto/topodata"
+ vschemapb "vitess.io/vitess/go/vt/proto/vschema"
+ "vitess.io/vitess/go/vt/topo"
+)
+
+type FakeTopoServer struct{}
+
+// GetTopoServer returns the full topo.Server instance.
+func (f *FakeTopoServer) GetTopoServer() (*topo.Server, error) {
+ return nil, nil
+}
+
+// GetSrvKeyspaceNames returns the list of keyspaces served in
+// the provided cell.
+func (f *FakeTopoServer) GetSrvKeyspaceNames(ctx context.Context, cell string, staleOK bool) ([]string, error) {
+ return []string{"ks1"}, nil
+}
+
+// GetSrvKeyspace returns the SrvKeyspace for a cell/keyspace.
+func (f *FakeTopoServer) GetSrvKeyspace(ctx context.Context, cell, keyspace string) (*topodatapb.SrvKeyspace, error) {
+ zeroHexBytes, _ := hex.DecodeString("")
+ eightyHexBytes, _ := hex.DecodeString("80")
+ ks := &topodatapb.SrvKeyspace{
+ Partitions: []*topodatapb.SrvKeyspace_KeyspacePartition{
+ {
+ ServedType: topodatapb.TabletType_PRIMARY,
+ ShardReferences: []*topodatapb.ShardReference{
+ {Name: "-80", KeyRange: &topodatapb.KeyRange{Start: zeroHexBytes, End: eightyHexBytes}},
+ {Name: "80-", KeyRange: &topodatapb.KeyRange{Start: eightyHexBytes, End: zeroHexBytes}},
+ },
+ },
+ },
+ }
+ return ks, nil
+}
+
+func (f *FakeTopoServer) WatchSrvKeyspace(ctx context.Context, cell, keyspace string, callback func(*topodatapb.SrvKeyspace, error) bool) {
+ ks, err := f.GetSrvKeyspace(ctx, cell, keyspace)
+ callback(ks, err)
+}
+
+// WatchSrvVSchema starts watching the SrvVSchema object for
+// the provided cell. It will call the callback when
+// a new value or an error occurs.
+func (f *FakeTopoServer) WatchSrvVSchema(ctx context.Context, cell string, callback func(*vschemapb.SrvVSchema, error) bool) {
+}
diff --git a/go/vt/vtgate/safe_session.go b/go/vt/vtgate/executorcontext/safe_session.go
similarity index 78%
rename from go/vt/vtgate/safe_session.go
rename to go/vt/vtgate/executorcontext/safe_session.go
index 1d57c63ef35..c77bba76ff8 100644
--- a/go/vt/vtgate/safe_session.go
+++ b/go/vt/vtgate/executorcontext/safe_session.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package vtgate
+package executorcontext
import (
"fmt"
@@ -23,22 +23,19 @@ import (
"sync"
"time"
- "vitess.io/vitess/go/sqltypes"
-
"google.golang.org/protobuf/proto"
"vitess.io/vitess/go/mysql/datetime"
-
+ "vitess.io/vitess/go/sqltypes"
+ querypb "vitess.io/vitess/go/vt/proto/query"
+ topodatapb "vitess.io/vitess/go/vt/proto/topodata"
+ vtgatepb "vitess.io/vitess/go/vt/proto/vtgate"
+ vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/srvtopo"
"vitess.io/vitess/go/vt/sysvars"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/engine"
-
- querypb "vitess.io/vitess/go/vt/proto/query"
- topodatapb "vitess.io/vitess/go/vt/proto/topodata"
- vtgatepb "vitess.io/vitess/go/vt/proto/vtgate"
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
)
type (
@@ -58,7 +55,7 @@ type (
rollbackOnPartialExec string
savepointName string
- // this is a signal that found_rows has already been handles by the primitives,
+ // this is a signal that found_rows has already been handled by the primitives,
// and doesn't have to be updated by the executor
foundRowsHandled bool
@@ -66,12 +63,12 @@ type (
// as the query that started a new transaction on the shard belong to a vindex.
queryFromVindex bool
- logging *executeLogger
+ logging *ExecuteLogger
*vtgatepb.Session
}
- executeLogger struct {
+ ExecuteLogger struct {
mu sync.Mutex
entries []engine.ExecuteEntry
lastID int
@@ -127,6 +124,8 @@ const (
savepointRollback
)
+const TxRollback = "Rollback Transaction"
+
// NewSafeSession returns a new SafeSession based on the Session
func NewSafeSession(sessn *vtgatepb.Session) *SafeSession {
if sessn == nil {
@@ -205,6 +204,50 @@ func (session *SafeSession) resetCommonLocked() {
}
}
+// NewAutocommitSession returns a SafeSession based on the original
+// session, but with autocommit enabled.
+func (session *SafeSession) NewAutocommitSession() *SafeSession {
+ ss := NewAutocommitSession(session.Session)
+ ss.logging = session.logging
+ return ss
+}
+
+// IsFoundRowsHandled returns the foundRowsHandled.
+func (session *SafeSession) IsFoundRowsHandled() bool {
+ session.mu.Lock()
+ defer session.mu.Unlock()
+ return session.foundRowsHandled
+}
+
+// SetFoundRows set the found rows value.
+func (session *SafeSession) SetFoundRows(value uint64) {
+ session.mu.Lock()
+ defer session.mu.Unlock()
+ session.FoundRows = value
+ session.foundRowsHandled = true
+}
+
+// GetRollbackOnPartialExec returns the rollbackOnPartialExec value.
+func (session *SafeSession) GetRollbackOnPartialExec() string {
+ session.mu.Lock()
+ defer session.mu.Unlock()
+ return session.rollbackOnPartialExec
+}
+
+// SetQueryFromVindex set the queryFromVindex value.
+func (session *SafeSession) SetQueryFromVindex(value bool) {
+ session.mu.Lock()
+ defer session.mu.Unlock()
+ session.queryFromVindex = value
+}
+
+// GetQueryFromVindex returns the queryFromVindex value.
+func (session *SafeSession) GetQueryFromVindex() bool {
+ session.mu.Lock()
+ defer session.mu.Unlock()
+ return session.queryFromVindex
+}
+
// SetQueryTimeout sets the query timeout
func (session *SafeSession) SetQueryTimeout(queryTimeout int64) {
session.mu.Lock()
@@ -312,7 +355,7 @@ func (session *SafeSession) SetRollbackCommand() {
if session.savepointState == savepointSet {
session.rollbackOnPartialExec = fmt.Sprintf("rollback to %s", session.savepointName)
} else {
- session.rollbackOnPartialExec = txRollback
+ session.rollbackOnPartialExec = TxRollback
}
session.savepointState = savepointRollbackSet
}
@@ -340,6 +383,18 @@ func (session *SafeSession) SetCommitOrder(co vtgatepb.CommitOrder) {
session.commitOrder = co
}
+// GetCommitOrder returns the commit order.
+func (session *SafeSession) GetCommitOrder() vtgatepb.CommitOrder {
+ session.mu.Lock()
+ defer session.mu.Unlock()
+ return session.commitOrder
+}
+
+// GetLogger returns executor logger.
+func (session *SafeSession) GetLogger() *ExecuteLogger {
+ return session.logging
+}
+
// InTransaction returns true if we are in a transaction
func (session *SafeSession) InTransaction() bool {
session.mu.Lock()
@@ -347,73 +402,88 @@ func (session *SafeSession) InTransaction() bool {
return session.Session.InTransaction
}
-// FindAndChangeSessionIfInSingleTxMode returns the transactionId and tabletAlias, if any, for a session
-// modifies the shard session in a specific case for single mode transaction.
-func (session *SafeSession) FindAndChangeSessionIfInSingleTxMode(keyspace, shard string, tabletType topodatapb.TabletType, txMode vtgatepb.TransactionMode) (int64, int64, *topodatapb.TabletAlias, error) {
+// FindAndChangeSessionIfInSingleTxMode retrieves the ShardSession matching the given keyspace, shard, and tablet type.
+// It performs additional checks and may modify the ShardSession in specific cases for single-mode transactions.
+//
+// Key behavior:
+// 1. Retrieves the appropriate list of sessions (PreSessions, PostSessions, or default ShardSessions) based on the commit order.
+// 2. Identifies a matching session by keyspace, shard, and tablet type.
+// 3. If the session meets specific conditions (e.g., non-vindex-only, single transaction mode), it updates the session state:
+// - Converts a vindex-only session to a standard session if required by the transaction type.
+// - If a multi-shard transaction is detected in Single mode, marks the session for rollback and returns an error.
+//
+// Parameters:
+// - keyspace: The keyspace of the target shard.
+// - shard: The shard name of the target.
+// - tabletType: The type of the tablet for the shard session.
+// - txMode: The transaction mode (e.g., Single, Multi).
+//
+// Returns:
+// - The matching ShardSession, if found and valid for the operation.
+// - An error if a Single-mode transaction attempts to span multiple shards.
+func (session *SafeSession) FindAndChangeSessionIfInSingleTxMode(keyspace, shard string, tabletType topodatapb.TabletType, txMode vtgatepb.TransactionMode) (*vtgatepb.Session_ShardSession, error) {
session.mu.Lock()
defer session.mu.Unlock()
- sessions := session.ShardSessions
+
+ shardSession := session.findSessionLocked(keyspace, shard, tabletType)
+
+ if shardSession == nil {
+ return nil, nil
+ }
+
+ if !shardSession.VindexOnly {
+ return shardSession, nil
+ }
+
+ if err := session.singleModeErrorOnCrossShard(txMode, 0); err != nil {
+ return nil, err
+ }
+
+ // the shard session is now used by non-vindex query as well,
+ // so it is not an exclusive vindex only shard session anymore.
+ shardSession.VindexOnly = false
+ return shardSession, nil
+}
+
+func (session *SafeSession) findSessionLocked(keyspace, shard string, tabletType topodatapb.TabletType) *vtgatepb.Session_ShardSession {
+ // Select the appropriate session list based on the commit order.
+ var sessions []*vtgatepb.Session_ShardSession
switch session.commitOrder {
case vtgatepb.CommitOrder_PRE:
sessions = session.PreSessions
case vtgatepb.CommitOrder_POST:
sessions = session.PostSessions
+ default:
+ sessions = session.ShardSessions
}
+
+ // Find and return the matching shard session.
for _, shardSession := range sessions {
- if keyspace == shardSession.Target.Keyspace && tabletType == shardSession.Target.TabletType && shard == shardSession.Target.Shard {
- if txMode != vtgatepb.TransactionMode_SINGLE || !shardSession.VindexOnly || session.queryFromVindex {
- return shardSession.TransactionId, shardSession.ReservedId, shardSession.TabletAlias, nil
- }
- count := actualNoOfShardSession(session.ShardSessions)
- // If the count of shard session which are non vindex only is greater than 0, then it is a
- if count > 0 {
- session.mustRollback = true
- return 0, 0, nil, vterrors.Errorf(vtrpcpb.Code_ABORTED, "multi-db transaction attempted: %v", session.ShardSessions)
- }
- // the shard session is now used by non-vindex query as well,
- // so it is not an exclusive vindex only shard session anymore.
- shardSession.VindexOnly = false
- return shardSession.TransactionId, shardSession.ReservedId, shardSession.TabletAlias, nil
+ if shardSession.Target.Keyspace == keyspace &&
+ shardSession.Target.Shard == shard &&
+ shardSession.Target.TabletType == tabletType {
+ return shardSession
}
}
- return 0, 0, nil, nil
-}
-
-func addOrUpdate(shardSession *vtgatepb.Session_ShardSession, sessions []*vtgatepb.Session_ShardSession) ([]*vtgatepb.Session_ShardSession, error) {
- appendSession := true
- for i, sess := range sessions {
- targetedAtSameTablet := sess.Target.Keyspace == shardSession.Target.Keyspace &&
- sess.Target.TabletType == shardSession.Target.TabletType &&
- sess.Target.Shard == shardSession.Target.Shard
- if targetedAtSameTablet {
- if !proto.Equal(sess.TabletAlias, shardSession.TabletAlias) {
- errorDetails := fmt.Sprintf("got non-matching aliases (%v vs %v) for the same target (keyspace: %v, tabletType: %v, shard: %v)",
- sess.TabletAlias, shardSession.TabletAlias,
- sess.Target.Keyspace, sess.Target.TabletType, sess.Target.Shard)
- return nil, vterrors.New(vtrpcpb.Code_FAILED_PRECONDITION, errorDetails)
- }
- // replace the old info with the new one
- sessions[i] = shardSession
- appendSession = false
- break
- }
- }
- if appendSession {
- sessions = append(sessions, shardSession)
- }
+ return nil
+}
- return sessions, nil
+type ShardActionInfo interface {
+ TransactionID() int64
+ ReservedID() int64
+ RowsAffected() bool
+ Alias() *topodatapb.TabletAlias
}
// AppendOrUpdate adds a new ShardSession, or updates an existing one if one already exists for the given shard session
-func (session *SafeSession) AppendOrUpdate(shardSession *vtgatepb.Session_ShardSession, txMode vtgatepb.TransactionMode) error {
+func (session *SafeSession) AppendOrUpdate(target *querypb.Target, info ShardActionInfo, existingSession *vtgatepb.Session_ShardSession, txMode vtgatepb.TransactionMode) error {
session.mu.Lock()
defer session.mu.Unlock()
// additional check of transaction id is required
// as now in autocommit mode there can be session due to reserved connection
// that needs to be stored as shard session.
- if session.autocommitState == autocommitted && shardSession.TransactionId != 0 {
+ if session.autocommitState == autocommitted && info.TransactionID() != 0 {
// Should be unreachable
return vterrors.VT13001("unexpected 'autocommitted' state in transaction")
}
@@ -423,45 +493,62 @@ func (session *SafeSession) AppendOrUpdate(shardSession *vtgatepb.Session_ShardS
}
session.autocommitState = notAutocommittable
- // Always append, in order for rollback to succeed.
- switch session.commitOrder {
- case vtgatepb.CommitOrder_NORMAL:
- if session.queryFromVindex {
- shardSession.VindexOnly = true
+ if existingSession != nil {
+ existingSession.TransactionId = info.TransactionID()
+ existingSession.ReservedId = info.ReservedID()
+ if !existingSession.RowsAffected {
+ existingSession.RowsAffected = info.RowsAffected()
}
- newSessions, err := addOrUpdate(shardSession, session.ShardSessions)
- if err != nil {
+ if existingSession.VindexOnly {
+ existingSession.VindexOnly = session.queryFromVindex
+ }
+ if err := session.singleModeErrorOnCrossShard(txMode, 1); err != nil {
return err
}
- session.ShardSessions = newSessions
+ return nil
+ }
+ newSession := &vtgatepb.Session_ShardSession{
+ Target: target,
+ TabletAlias: info.Alias(),
+ TransactionId: info.TransactionID(),
+ ReservedId: info.ReservedID(),
+ RowsAffected: info.RowsAffected(),
+ VindexOnly: session.queryFromVindex,
+ }
- if session.queryFromVindex {
- break
- }
- // isSingle is enforced only for normal commit order operations.
- if session.isSingleDB(txMode) && len(session.ShardSessions) > 1 {
- count := actualNoOfShardSession(session.ShardSessions)
- if count <= 1 {
- break
- }
- session.mustRollback = true
- return vterrors.Errorf(vtrpcpb.Code_ABORTED, "multi-db transaction attempted: %v", session.ShardSessions)
- }
- case vtgatepb.CommitOrder_PRE:
- newSessions, err := addOrUpdate(shardSession, session.PreSessions)
- if err != nil {
+ // Always append, in order for rollback to succeed.
+ switch session.commitOrder {
+ case vtgatepb.CommitOrder_NORMAL:
+ session.ShardSessions = append(session.ShardSessions, newSession)
+ if err := session.singleModeErrorOnCrossShard(txMode, 1); err != nil {
return err
}
- session.PreSessions = newSessions
+ case vtgatepb.CommitOrder_PRE:
+ session.PreSessions = append(session.PreSessions, newSession)
case vtgatepb.CommitOrder_POST:
- newSessions, err := addOrUpdate(shardSession, session.PostSessions)
- if err != nil {
- return err
- }
- session.PostSessions = newSessions
+ session.PostSessions = append(session.PostSessions, newSession)
default:
// Should be unreachable
- return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] SafeSession.AppendOrUpdate: unexpected commitOrder")
+ return vterrors.VT13001(fmt.Sprintf("unexpected commitOrder to append shard session: %v", session.commitOrder))
+ }
+
+ return nil
+}
+
+// singleModeErrorOnCrossShard checks if a transaction violates the Single mode constraint by spanning multiple shards.
+func (session *SafeSession) singleModeErrorOnCrossShard(txMode vtgatepb.TransactionMode, exceedsCrossShard int) error {
+ // Skip the check if:
+ // 1. The query comes from a lookup vindex.
+ // 2. The transaction mode is not Single.
+ // 3. The transaction is not in the normal shard session.
+ if session.queryFromVindex || session.commitOrder != vtgatepb.CommitOrder_NORMAL || !session.isSingleDB(txMode) {
+ return nil
+ }
+
+ // If the transaction spans multiple shards, abort it.
+ if actualNoOfShardSession(session.ShardSessions) > exceedsCrossShard {
+ session.mustRollback = true // Mark the session for rollback.
+ return vterrors.Errorf(vtrpcpb.Code_ABORTED, "multi-db transaction attempted: %v", session.ShardSessions)
}
return nil
@@ -678,12 +765,11 @@ func (session *SafeSession) UpdateLockHeartbeat() {
session.LastLockHeartbeat = time.Now().Unix()
}
-// TriggerLockHeartBeat returns if it time to trigger next lock heartbeat
-func (session *SafeSession) TriggerLockHeartBeat() bool {
+// GetLockHeartbeat returns last time the lock heartbeat was sent.
+func (session *SafeSession) GetLockHeartbeat() int64 {
session.mu.Lock()
defer session.mu.Unlock()
- now := time.Now().Unix()
- return now-session.LastLockHeartbeat >= int64(lockHeartbeatTime.Seconds())
+ return session.LastLockHeartbeat
}
// InLockSession returns whether locking is used on this session.
@@ -836,9 +922,7 @@ func (session *SafeSession) GetOrCreateOptions() *querypb.ExecuteOptions {
return session.Session.Options
}
-var _ iQueryOption = (*SafeSession)(nil)
-
-func (session *SafeSession) cachePlan() bool {
+func (session *SafeSession) CachePlan() bool {
if session == nil || session.Options == nil {
return true
}
@@ -849,7 +933,7 @@ func (session *SafeSession) cachePlan() bool {
return !(session.Options.SkipQueryPlanCache || session.Options.HasCreatedTempTables)
}
-func (session *SafeSession) getSelectLimit() int {
+func (session *SafeSession) GetSelectLimit() int {
if session == nil || session.Options == nil {
return -1
}
@@ -860,16 +944,16 @@ func (session *SafeSession) getSelectLimit() int {
return int(session.Options.SqlSelectLimit)
}
-// isTxOpen returns true if there is open connection to any of the shard.
-func (session *SafeSession) isTxOpen() bool {
+// IsTxOpen returns true if there is open connection to any of the shard.
+func (session *SafeSession) IsTxOpen() bool {
session.mu.Lock()
defer session.mu.Unlock()
return len(session.ShardSessions) > 0 || len(session.PreSessions) > 0 || len(session.PostSessions) > 0
}
-// getSessions returns the shard session for the current commit order.
-func (session *SafeSession) getSessions() []*vtgatepb.Session_ShardSession {
+// GetSessions returns the shard session for the current commit order.
+func (session *SafeSession) GetSessions() []*vtgatepb.Session_ShardSession {
session.mu.Lock()
defer session.mu.Unlock()
@@ -956,7 +1040,7 @@ func (session *SafeSession) EnableLogging(parser *sqlparser.Parser) {
session.mu.Lock()
defer session.mu.Unlock()
- session.logging = &executeLogger{
+ session.logging = &ExecuteLogger{
parser: parser,
}
}
@@ -994,7 +1078,15 @@ func (session *SafeSession) GetPrepareData(name string) *vtgatepb.PrepareData {
return session.PrepareStatement[name]
}
-func (l *executeLogger) log(primitive engine.Primitive, target *querypb.Target, gateway srvtopo.Gateway, query string, begin bool, bv map[string]*querypb.BindVariable) {
+func (session *SafeSession) Log(primitive engine.Primitive, target *querypb.Target, gateway srvtopo.Gateway, query string, begin bool, bv map[string]*querypb.BindVariable) {
+ session.logging.Log(primitive, target, gateway, query, begin, bv)
+}
+
+func (session *SafeSession) GetLogs() []engine.ExecuteEntry {
+ return session.logging.GetLogs()
+}
+
+func (l *ExecuteLogger) Log(primitive engine.Primitive, target *querypb.Target, gateway srvtopo.Gateway, query string, begin bool, bv map[string]*querypb.BindVariable) {
if l == nil {
return
}
@@ -1033,7 +1125,10 @@ func (l *executeLogger) log(primitive engine.Primitive, target *querypb.Target,
})
}
-func (l *executeLogger) GetLogs() []engine.ExecuteEntry {
+func (l *ExecuteLogger) GetLogs() []engine.ExecuteEntry {
+ if l == nil {
+ return nil
+ }
l.mu.Lock()
defer l.mu.Unlock()
result := make([]engine.ExecuteEntry, len(l.entries))
diff --git a/go/vt/vtgate/executorcontext/safe_session_test.go b/go/vt/vtgate/executorcontext/safe_session_test.go
new file mode 100644
index 00000000000..14ea2ad9dac
--- /dev/null
+++ b/go/vt/vtgate/executorcontext/safe_session_test.go
@@ -0,0 +1,201 @@
+/*
+Copyright 2020 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package executorcontext
+
+import (
+ "reflect"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ querypb "vitess.io/vitess/go/vt/proto/query"
+ topodatapb "vitess.io/vitess/go/vt/proto/topodata"
+ vtgatepb "vitess.io/vitess/go/vt/proto/vtgate"
+)
+
+type fakeInfo struct {
+ transactionID int64
+ alias *topodatapb.TabletAlias
+}
+
+func (s *fakeInfo) TransactionID() int64 {
+ return s.transactionID
+}
+
+func (s *fakeInfo) ReservedID() int64 {
+ return 0
+}
+
+func (s *fakeInfo) RowsAffected() bool {
+ return false
+}
+
+func (s *fakeInfo) Alias() *topodatapb.TabletAlias {
+ return s.alias
+}
+
+func info(txId, uid int) ShardActionInfo {
+ return &fakeInfo{transactionID: int64(txId), alias: &topodatapb.TabletAlias{Cell: "cell", Uid: uint32(uid)}}
+}
+
+// TestFailToMultiShardWhenSetToSingleDb tests that single db transactions fails on going multi shard.
+func TestFailToMultiShardWhenSetToSingleDb(t *testing.T) {
+ session := NewSafeSession(&vtgatepb.Session{
+ InTransaction: true, TransactionMode: vtgatepb.TransactionMode_SINGLE,
+ })
+
+ err := session.AppendOrUpdate(
+ &querypb.Target{Keyspace: "keyspace", Shard: "0"},
+ info(1, 0),
+ nil,
+ vtgatepb.TransactionMode_SINGLE)
+ require.NoError(t, err)
+ err = session.AppendOrUpdate(
+ &querypb.Target{Keyspace: "keyspace", Shard: "1"},
+ info(1, 1),
+ nil,
+ vtgatepb.TransactionMode_SINGLE)
+ require.Error(t, err)
+}
+
+// TestSingleDbUpdateToMultiShard tests that a single db transaction cannot be updated to multi shard.
+func TestSingleDbUpdateToMultiShard(t *testing.T) {
+ session := NewSafeSession(&vtgatepb.Session{
+ InTransaction: true, TransactionMode: vtgatepb.TransactionMode_SINGLE,
+ })
+
+ // shard session s0 due to a vindex query
+ session.queryFromVindex = true
+ err := session.AppendOrUpdate(
+ &querypb.Target{Keyspace: "keyspace", Shard: "0"},
+ info(1, 0),
+ nil,
+ vtgatepb.TransactionMode_SINGLE)
+ require.NoError(t, err)
+ session.queryFromVindex = false
+
+ // shard session s1
+ err = session.AppendOrUpdate(
+ &querypb.Target{Keyspace: "keyspace", Shard: "1"},
+ info(1, 1),
+ nil,
+ vtgatepb.TransactionMode_SINGLE)
+ require.NoError(t, err)
+
+ // shard session s0 with normal query
+ err = session.AppendOrUpdate(
+ &querypb.Target{Keyspace: "keyspace", Shard: "0"},
+ info(1, 1),
+ session.ShardSessions[0],
+ vtgatepb.TransactionMode_SINGLE)
+ require.Error(t, err)
+}
+
+// TestSingleDbPreFailOnFind tests that finding a shard session fails
+// if already shard session exists on another shard and the query is not from vindex.
+func TestSingleDbPreFailOnFind(t *testing.T) {
+ session := NewSafeSession(&vtgatepb.Session{
+ InTransaction: true, TransactionMode: vtgatepb.TransactionMode_SINGLE,
+ })
+
+ // shard session s0 due to a vindex query
+ session.queryFromVindex = true
+ err := session.AppendOrUpdate(
+ &querypb.Target{Keyspace: "keyspace", Shard: "0"},
+ info(1, 0),
+ nil,
+ vtgatepb.TransactionMode_SINGLE)
+ require.NoError(t, err)
+ session.queryFromVindex = false
+
+ // shard session s1
+ err = session.AppendOrUpdate(
+ &querypb.Target{Keyspace: "keyspace", Shard: "1"},
+ info(1, 1),
+ nil,
+ vtgatepb.TransactionMode_SINGLE)
+ require.NoError(t, err)
+
+ // shard session s1 for normal query again - should not fail as already part of the session.
+ ss, err := session.FindAndChangeSessionIfInSingleTxMode(
+ "keyspace",
+ "1",
+ topodatapb.TabletType_UNKNOWN,
+ vtgatepb.TransactionMode_SINGLE)
+ require.NoError(t, err)
+ require.NotNil(t, ss)
+ require.False(t, ss.VindexOnly)
+ require.EqualValues(t, 1, ss.TabletAlias.Uid)
+
+ // shard session s0 for normal query
+ _, err = session.FindAndChangeSessionIfInSingleTxMode(
+ "keyspace",
+ "0",
+ topodatapb.TabletType_UNKNOWN,
+ vtgatepb.TransactionMode_SINGLE)
+ require.Error(t, err)
+}
+
+func TestPrequeries(t *testing.T) {
+ session := NewSafeSession(&vtgatepb.Session{
+ SystemVariables: map[string]string{
+ "s1": "'apa'",
+ "s2": "42",
+ },
+ })
+
+ want := []string{"set s1 = 'apa', s2 = 42"}
+ preQueries := session.SetPreQueries()
+
+ if !reflect.DeepEqual(want, preQueries) {
+ t.Errorf("got %v but wanted %v", preQueries, want)
+ }
+}
+
+func TestTimeZone(t *testing.T) {
+ testCases := []struct {
+ tz string
+ want string
+ }{
+ {
+ tz: "'Europe/Amsterdam'",
+ want: "Europe/Amsterdam",
+ },
+ {
+ tz: "'+02:00'",
+ want: "UTC+02:00",
+ },
+ {
+ tz: "foo",
+ want: (*time.Location)(nil).String(),
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.tz, func(t *testing.T) {
+ session := NewSafeSession(&vtgatepb.Session{
+ SystemVariables: map[string]string{
+ "time_zone": tc.tz,
+ },
+ })
+
+ assert.Equal(t, tc.want, session.TimeZone().String())
+ })
+ }
+}
diff --git a/go/vt/vtgate/vcursor_impl.go b/go/vt/vtgate/executorcontext/vcursor_impl.go
similarity index 65%
rename from go/vt/vtgate/vcursor_impl.go
rename to go/vt/vtgate/executorcontext/vcursor_impl.go
index e9b1d3d7712..c1f341b38cf 100644
--- a/go/vt/vtgate/vcursor_impl.go
+++ b/go/vt/vtgate/executorcontext/vcursor_impl.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package vtgate
+package executorcontext
import (
"context"
@@ -26,10 +26,12 @@ import (
"time"
"github.com/google/uuid"
+ "golang.org/x/exp/maps"
"vitess.io/vitess/go/mysql/collations"
"vitess.io/vitess/go/mysql/config"
"vitess.io/vitess/go/mysql/sqlerror"
+ "vitess.io/vitess/go/protoutil"
"vitess.io/vitess/go/sqltypes"
"vitess.io/vitess/go/vt/callerid"
"vitess.io/vitess/go/vt/discovery"
@@ -59,38 +61,62 @@ import (
)
var (
- _ engine.VCursor = (*vcursorImpl)(nil)
- _ plancontext.VSchema = (*vcursorImpl)(nil)
- _ iExecute = (*Executor)(nil)
- _ vindexes.VCursor = (*vcursorImpl)(nil)
+ _ engine.VCursor = (*VCursorImpl)(nil)
+ _ plancontext.VSchema = (*VCursorImpl)(nil)
+ _ vindexes.VCursor = (*VCursorImpl)(nil)
)
+var ErrNoKeyspace = vterrors.VT09005()
+
type (
+ ResultsObserver interface {
+ Observe(*sqltypes.Result)
+ }
+
+ VCursorConfig struct {
+ Collation collations.ID
+
+ MaxMemoryRows int
+ EnableShardRouting bool
+ DefaultTabletType topodatapb.TabletType
+ QueryTimeout int
+ DBDDLPlugin string
+ ForeignKeyMode vschemapb.Keyspace_ForeignKeyMode
+ SetVarEnabled bool
+ EnableViews bool
+ WarnShardedOnly bool
+ PlannerVersion plancontext.PlannerVersion
+
+ WarmingReadsPercent int
+ WarmingReadsTimeout time.Duration
+ WarmingReadsChannel chan bool
+ }
+
// vcursor_impl needs these facilities to be able to be able to execute queries for vindexes
iExecute interface {
Execute(ctx context.Context, mysqlCtx vtgateservice.MySQLConnection, method string, session *SafeSession, s string, vars map[string]*querypb.BindVariable) (*sqltypes.Result, error)
- ExecuteMultiShard(ctx context.Context, primitive engine.Primitive, rss []*srvtopo.ResolvedShard, queries []*querypb.BoundQuery, session *SafeSession, autocommit bool, ignoreMaxMemoryRows bool, resultsObserver resultsObserver) (qr *sqltypes.Result, errs []error)
- StreamExecuteMulti(ctx context.Context, primitive engine.Primitive, query string, rss []*srvtopo.ResolvedShard, vars []map[string]*querypb.BindVariable, session *SafeSession, autocommit bool, callback func(reply *sqltypes.Result) error, observer resultsObserver) []error
+ ExecuteMultiShard(ctx context.Context, primitive engine.Primitive, rss []*srvtopo.ResolvedShard, queries []*querypb.BoundQuery, session *SafeSession, autocommit bool, ignoreMaxMemoryRows bool, resultsObserver ResultsObserver) (qr *sqltypes.Result, errs []error)
+ StreamExecuteMulti(ctx context.Context, primitive engine.Primitive, query string, rss []*srvtopo.ResolvedShard, vars []map[string]*querypb.BindVariable, session *SafeSession, autocommit bool, callback func(reply *sqltypes.Result) error, observer ResultsObserver) []error
ExecuteLock(ctx context.Context, rs *srvtopo.ResolvedShard, query *querypb.BoundQuery, session *SafeSession, lockFuncType sqlparser.LockingFuncType) (*sqltypes.Result, error)
Commit(ctx context.Context, safeSession *SafeSession) error
ExecuteMessageStream(ctx context.Context, rss []*srvtopo.ResolvedShard, name string, callback func(*sqltypes.Result) error) error
ExecuteVStream(ctx context.Context, rss []*srvtopo.ResolvedShard, filter *binlogdatapb.Filter, gtid string, callback func(evs []*binlogdatapb.VEvent) error) error
ReleaseLock(ctx context.Context, session *SafeSession) error
- showVitessReplicationStatus(ctx context.Context, filter *sqlparser.ShowFilter) (*sqltypes.Result, error)
- showShards(ctx context.Context, filter *sqlparser.ShowFilter, destTabletType topodatapb.TabletType) (*sqltypes.Result, error)
- showTablets(filter *sqlparser.ShowFilter) (*sqltypes.Result, error)
- showVitessMetadata(ctx context.Context, filter *sqlparser.ShowFilter) (*sqltypes.Result, error)
- setVitessMetadata(ctx context.Context, name, value string) error
+ ShowVitessReplicationStatus(ctx context.Context, filter *sqlparser.ShowFilter) (*sqltypes.Result, error)
+ ShowShards(ctx context.Context, filter *sqlparser.ShowFilter, destTabletType topodatapb.TabletType) (*sqltypes.Result, error)
+ ShowTablets(filter *sqlparser.ShowFilter) (*sqltypes.Result, error)
+ ShowVitessMetadata(ctx context.Context, filter *sqlparser.ShowFilter) (*sqltypes.Result, error)
+ SetVitessMetadata(ctx context.Context, name, value string) error
// TODO: remove when resolver is gone
- ParseDestinationTarget(targetString string) (string, topodatapb.TabletType, key.Destination, error)
VSchema() *vindexes.VSchema
- planPrepareStmt(ctx context.Context, vcursor *vcursorImpl, query string) (*engine.Plan, sqlparser.Statement, error)
+ PlanPrepareStmt(ctx context.Context, vcursor *VCursorImpl, query string) (*engine.Plan, sqlparser.Statement, error)
- environment() *vtenv.Environment
+ Environment() *vtenv.Environment
ReadTransaction(ctx context.Context, transactionID string) (*querypb.TransactionMetadata, error)
UnresolvedTransactions(ctx context.Context, targets []*querypb.Target) ([]*querypb.TransactionMetadata, error)
+ AddWarningCount(name string, value int64)
}
// VSchemaOperator is an interface to Vschema Operations
@@ -99,10 +125,11 @@ type (
UpdateVSchema(ctx context.Context, ksName string, vschema *vschemapb.SrvVSchema) error
}
- // vcursorImpl implements the VCursor functionality used by dependent
+ // VCursorImpl implements the VCursor functionality used by dependent
// packages to call back into VTGate.
- vcursorImpl struct {
- safeSession *SafeSession
+ VCursorImpl struct {
+ config VCursorConfig
+ SafeSession *SafeSession
keyspace string
tabletType topodatapb.TabletType
destination key.Destination
@@ -111,7 +138,6 @@ type (
resolver *srvtopo.Resolver
topoServer *topo.Server
logStats *logstats.LogStats
- collation collations.ID
// fkChecksState stores the state of foreign key checks variable.
// This state is meant to be the final fk checks state after consulting the
@@ -122,16 +148,11 @@ type (
vschema *vindexes.VSchema
vm VSchemaOperator
semTable *semantics.SemTable
- warnShardedOnly bool // when using sharded only features, a warning will be warnings field
queryTimeout time.Duration
warnings []*querypb.QueryWarning // any warnings that are accumulated during the planning phase are stored here
- pv plancontext.PlannerVersion
- warmingReadsPercent int
- warmingReadsChannel chan bool
-
- resultsObserver resultsObserver
+ observer ResultsObserver
// this is a map of the number of rows that every primitive has returned
// if this field is nil, it means that we are not logging operator traffic
@@ -140,23 +161,23 @@ type (
}
)
-// newVcursorImpl creates a vcursorImpl. Before creating this object, you have to separate out any marginComments that came with
+// NewVCursorImpl creates a VCursorImpl. Before creating this object, you have to separate out any marginComments that came with
// the query and supply it here. Trailing comments are typically sent by the application for various reasons,
// including as identifying markers. So, they have to be added back to all queries that are executed
// on behalf of the original query.
-func newVCursorImpl(
+func NewVCursorImpl(
safeSession *SafeSession,
marginComments sqlparser.MarginComments,
- executor *Executor,
+ executor iExecute,
logStats *logstats.LogStats,
vm VSchemaOperator,
vschema *vindexes.VSchema,
resolver *srvtopo.Resolver,
serv srvtopo.Server,
- warnShardedOnly bool,
- pv plancontext.PlannerVersion,
-) (*vcursorImpl, error) {
- keyspace, tabletType, destination, err := parseDestinationTarget(safeSession.TargetString, vschema)
+ observer ResultsObserver,
+ cfg VCursorConfig,
+) (*VCursorImpl, error) {
+ keyspace, tabletType, destination, err := ParseDestinationTarget(safeSession.TargetString, cfg.DefaultTabletType, vschema)
if err != nil {
return nil, err
}
@@ -171,107 +192,175 @@ func newVCursorImpl(
}
}
- // we only support collations for the new TabletGateway implementation
- var connCollation collations.ID
- if executor != nil {
- if gw, isTabletGw := executor.resolver.resolver.GetGateway().(*TabletGateway); isTabletGw {
- connCollation = gw.DefaultConnCollation()
- }
- }
- if connCollation == collations.Unknown {
- connCollation = executor.env.CollationEnv().DefaultConnectionCharset()
- }
-
- warmingReadsPct := 0
- var warmingReadsChan chan bool
- if executor != nil {
- warmingReadsPct = executor.warmingReadsPercent
- warmingReadsChan = executor.warmingReadsChannel
- }
- return &vcursorImpl{
- safeSession: safeSession,
- keyspace: keyspace,
- tabletType: tabletType,
- destination: destination,
- marginComments: marginComments,
- executor: executor,
- logStats: logStats,
- collation: connCollation,
- resolver: resolver,
- vschema: vschema,
- vm: vm,
- topoServer: ts,
- warnShardedOnly: warnShardedOnly,
- pv: pv,
- warmingReadsPercent: warmingReadsPct,
- warmingReadsChannel: warmingReadsChan,
- resultsObserver: nullResultsObserver{},
+ return &VCursorImpl{
+ config: cfg,
+ SafeSession: safeSession,
+ keyspace: keyspace,
+ tabletType: tabletType,
+ destination: destination,
+ marginComments: marginComments,
+ executor: executor,
+ logStats: logStats,
+ resolver: resolver,
+ vschema: vschema,
+ vm: vm,
+ topoServer: ts,
+
+ observer: observer,
}, nil
}
+func (vc *VCursorImpl) CloneForMirroring(ctx context.Context) engine.VCursor {
+ callerId := callerid.EffectiveCallerIDFromContext(ctx)
+ immediateCallerId := callerid.ImmediateCallerIDFromContext(ctx)
+
+ clonedCtx := callerid.NewContext(ctx, callerId, immediateCallerId)
+
+ v := &VCursorImpl{
+ config: vc.config,
+ SafeSession: NewAutocommitSession(vc.SafeSession.Session),
+ keyspace: vc.keyspace,
+ tabletType: vc.tabletType,
+ destination: vc.destination,
+ marginComments: vc.marginComments,
+ executor: vc.executor,
+ resolver: vc.resolver,
+ topoServer: vc.topoServer,
+ logStats: &logstats.LogStats{Ctx: clonedCtx},
+ ignoreMaxMemoryRows: vc.ignoreMaxMemoryRows,
+ vschema: vc.vschema,
+ vm: vc.vm,
+ semTable: vc.semTable,
+ warnings: vc.warnings,
+ observer: vc.observer,
+ }
+
+ v.marginComments.Trailing += "/* mirror query */"
+
+ return v
+}
+
+func (vc *VCursorImpl) CloneForReplicaWarming(ctx context.Context) engine.VCursor {
+ callerId := callerid.EffectiveCallerIDFromContext(ctx)
+ immediateCallerId := callerid.ImmediateCallerIDFromContext(ctx)
+
+ timedCtx, _ := context.WithTimeout(context.Background(), vc.config.WarmingReadsTimeout) // nolint
+ clonedCtx := callerid.NewContext(timedCtx, callerId, immediateCallerId)
+
+ v := &VCursorImpl{
+ config: vc.config,
+ SafeSession: NewAutocommitSession(vc.SafeSession.Session),
+ keyspace: vc.keyspace,
+ tabletType: topodatapb.TabletType_REPLICA,
+ destination: vc.destination,
+ marginComments: vc.marginComments,
+ executor: vc.executor,
+ resolver: vc.resolver,
+ topoServer: vc.topoServer,
+ logStats: &logstats.LogStats{Ctx: clonedCtx},
+
+ ignoreMaxMemoryRows: vc.ignoreMaxMemoryRows,
+ vschema: vc.vschema,
+ vm: vc.vm,
+ semTable: vc.semTable,
+ warnings: vc.warnings,
+ observer: vc.observer,
+ }
+
+ v.marginComments.Trailing += "/* warming read */"
+
+ return v
+}
+
+func (vc *VCursorImpl) cloneWithAutocommitSession() *VCursorImpl {
+ safeSession := vc.SafeSession.NewAutocommitSession()
+ return &VCursorImpl{
+ config: vc.config,
+ SafeSession: safeSession,
+ keyspace: vc.keyspace,
+ tabletType: vc.tabletType,
+ destination: vc.destination,
+ marginComments: vc.marginComments,
+ executor: vc.executor,
+ logStats: vc.logStats,
+ resolver: vc.resolver,
+ vschema: vc.vschema,
+ vm: vc.vm,
+ topoServer: vc.topoServer,
+ observer: vc.observer,
+ }
+}
+
// HasSystemVariables returns whether the session has set system variables or not
-func (vc *vcursorImpl) HasSystemVariables() bool {
- return vc.safeSession.HasSystemVariables()
+func (vc *VCursorImpl) HasSystemVariables() bool {
+ return vc.SafeSession.HasSystemVariables()
}
// GetSystemVariables takes a visitor function that will save each system variables of the session
-func (vc *vcursorImpl) GetSystemVariables(f func(k string, v string)) {
- vc.safeSession.GetSystemVariables(f)
+func (vc *VCursorImpl) GetSystemVariables(f func(k string, v string)) {
+ vc.SafeSession.GetSystemVariables(f)
+}
+
+// GetSystemVariablesCopy returns a copy of the system variables of the session. Changes to the original map will not affect the session.
+func (vc *VCursorImpl) GetSystemVariablesCopy() map[string]string {
+ vc.SafeSession.mu.Lock()
+ defer vc.SafeSession.mu.Unlock()
+ return maps.Clone(vc.SafeSession.SystemVariables)
}
// ConnCollation returns the collation of this session
-func (vc *vcursorImpl) ConnCollation() collations.ID {
- return vc.collation
+func (vc *VCursorImpl) ConnCollation() collations.ID {
+ return vc.config.Collation
}
// Environment returns the vtenv associated with this session
-func (vc *vcursorImpl) Environment() *vtenv.Environment {
- return vc.executor.environment()
+func (vc *VCursorImpl) Environment() *vtenv.Environment {
+ return vc.executor.Environment()
}
-func (vc *vcursorImpl) TimeZone() *time.Location {
- return vc.safeSession.TimeZone()
+func (vc *VCursorImpl) TimeZone() *time.Location {
+ return vc.SafeSession.TimeZone()
}
-func (vc *vcursorImpl) SQLMode() string {
+func (vc *VCursorImpl) SQLMode() string {
// TODO: Implement return the current sql_mode.
// This is currently hardcoded to the default in MySQL 8.0.
return config.DefaultSQLMode
}
// MaxMemoryRows returns the maxMemoryRows flag value.
-func (vc *vcursorImpl) MaxMemoryRows() int {
- return maxMemoryRows
+func (vc *VCursorImpl) MaxMemoryRows() int {
+ return vc.config.MaxMemoryRows
}
// ExceedsMaxMemoryRows returns a boolean indicating whether the maxMemoryRows value has been exceeded.
// Returns false if the max memory rows override directive is set to true.
-func (vc *vcursorImpl) ExceedsMaxMemoryRows(numRows int) bool {
- return !vc.ignoreMaxMemoryRows && numRows > maxMemoryRows
+func (vc *VCursorImpl) ExceedsMaxMemoryRows(numRows int) bool {
+ return !vc.ignoreMaxMemoryRows && numRows > vc.config.MaxMemoryRows
}
// SetIgnoreMaxMemoryRows sets the ignoreMaxMemoryRows value.
-func (vc *vcursorImpl) SetIgnoreMaxMemoryRows(ignoreMaxMemoryRows bool) {
+func (vc *VCursorImpl) SetIgnoreMaxMemoryRows(ignoreMaxMemoryRows bool) {
vc.ignoreMaxMemoryRows = ignoreMaxMemoryRows
}
// RecordWarning stores the given warning in the current session
-func (vc *vcursorImpl) RecordWarning(warning *querypb.QueryWarning) {
- vc.safeSession.RecordWarning(warning)
+func (vc *VCursorImpl) RecordWarning(warning *querypb.QueryWarning) {
+ vc.SafeSession.RecordWarning(warning)
}
// IsShardRoutingEnabled implements the VCursor interface.
-func (vc *vcursorImpl) IsShardRoutingEnabled() bool {
- return enableShardRouting
+func (vc *VCursorImpl) IsShardRoutingEnabled() bool {
+ return vc.config.EnableShardRouting
}
-func (vc *vcursorImpl) ReadTransaction(ctx context.Context, transactionID string) (*querypb.TransactionMetadata, error) {
+func (vc *VCursorImpl) ReadTransaction(ctx context.Context, transactionID string) (*querypb.TransactionMetadata, error) {
return vc.executor.ReadTransaction(ctx, transactionID)
}
// UnresolvedTransactions gets the unresolved transactions for the given keyspace. If the keyspace is not given,
// then we use the default keyspace.
-func (vc *vcursorImpl) UnresolvedTransactions(ctx context.Context, keyspace string) ([]*querypb.TransactionMetadata, error) {
+func (vc *VCursorImpl) UnresolvedTransactions(ctx context.Context, keyspace string) ([]*querypb.TransactionMetadata, error) {
if keyspace == "" {
keyspace = vc.GetKeyspace()
}
@@ -286,7 +375,7 @@ func (vc *vcursorImpl) UnresolvedTransactions(ctx context.Context, keyspace stri
return vc.executor.UnresolvedTransactions(ctx, targets)
}
-func (vc *vcursorImpl) StartPrimitiveTrace() func() engine.Stats {
+func (vc *VCursorImpl) StartPrimitiveTrace() func() engine.Stats {
vc.interOpStats = make(map[engine.Primitive]engine.RowsReceived)
vc.shardsStats = make(map[engine.Primitive]engine.ShardsQueried)
return func() engine.Stats {
@@ -299,8 +388,8 @@ func (vc *vcursorImpl) StartPrimitiveTrace() func() engine.Stats {
// FindTable finds the specified table. If the keyspace what specified in the input, it gets used as qualifier.
// Otherwise, the keyspace from the request is used, if one was provided.
-func (vc *vcursorImpl) FindTable(name sqlparser.TableName) (*vindexes.Table, string, topodatapb.TabletType, key.Destination, error) {
- destKeyspace, destTabletType, dest, err := vc.executor.ParseDestinationTarget(name.Qualifier.String())
+func (vc *VCursorImpl) FindTable(name sqlparser.TableName) (*vindexes.Table, string, topodatapb.TabletType, key.Destination, error) {
+ destKeyspace, destTabletType, dest, err := vc.ParseDestinationTarget(name.Qualifier.String())
if err != nil {
return nil, "", destTabletType, nil, err
}
@@ -314,8 +403,8 @@ func (vc *vcursorImpl) FindTable(name sqlparser.TableName) (*vindexes.Table, str
return table, destKeyspace, destTabletType, dest, err
}
-func (vc *vcursorImpl) FindView(name sqlparser.TableName) sqlparser.SelectStatement {
- ks, _, _, err := vc.executor.ParseDestinationTarget(name.Qualifier.String())
+func (vc *VCursorImpl) FindView(name sqlparser.TableName) sqlparser.SelectStatement {
+ ks, _, _, err := vc.ParseDestinationTarget(name.Qualifier.String())
if err != nil {
return nil
}
@@ -325,8 +414,8 @@ func (vc *vcursorImpl) FindView(name sqlparser.TableName) sqlparser.SelectStatem
return vc.vschema.FindView(ks, name.Name.String())
}
-func (vc *vcursorImpl) FindRoutedTable(name sqlparser.TableName) (*vindexes.Table, error) {
- destKeyspace, destTabletType, _, err := vc.executor.ParseDestinationTarget(name.Qualifier.String())
+func (vc *VCursorImpl) FindRoutedTable(name sqlparser.TableName) (*vindexes.Table, error) {
+ destKeyspace, destTabletType, _, err := vc.ParseDestinationTarget(name.Qualifier.String())
if err != nil {
return nil, err
}
@@ -343,14 +432,14 @@ func (vc *vcursorImpl) FindRoutedTable(name sqlparser.TableName) (*vindexes.Tabl
}
// FindTableOrVindex finds the specified table or vindex.
-func (vc *vcursorImpl) FindTableOrVindex(name sqlparser.TableName) (*vindexes.Table, vindexes.Vindex, string, topodatapb.TabletType, key.Destination, error) {
+func (vc *VCursorImpl) FindTableOrVindex(name sqlparser.TableName) (*vindexes.Table, vindexes.Vindex, string, topodatapb.TabletType, key.Destination, error) {
if name.Qualifier.IsEmpty() && name.Name.String() == "dual" {
// The magical MySQL dual table should only be resolved
// when it is not qualified by a database name.
return vc.getDualTable()
}
- destKeyspace, destTabletType, dest, err := vc.executor.ParseDestinationTarget(name.Qualifier.String())
+ destKeyspace, destTabletType, dest, err := ParseDestinationTarget(name.Qualifier.String(), vc.tabletType, vc.vschema)
if err != nil {
return nil, nil, "", destTabletType, nil, err
}
@@ -364,7 +453,23 @@ func (vc *vcursorImpl) FindTableOrVindex(name sqlparser.TableName) (*vindexes.Ta
return table, vindex, destKeyspace, destTabletType, dest, nil
}
-func (vc *vcursorImpl) getDualTable() (*vindexes.Table, vindexes.Vindex, string, topodatapb.TabletType, key.Destination, error) {
+func (vc *VCursorImpl) ParseDestinationTarget(targetString string) (string, topodatapb.TabletType, key.Destination, error) {
+ return ParseDestinationTarget(targetString, vc.tabletType, vc.vschema)
+}
+
+// ParseDestinationTarget parses destination target string and provides a keyspace if possible.
+func ParseDestinationTarget(targetString string, tablet topodatapb.TabletType, vschema *vindexes.VSchema) (string, topodatapb.TabletType, key.Destination, error) {
+ destKeyspace, destTabletType, dest, err := topoprotopb.ParseDestination(targetString, tablet)
+ // If the keyspace is not specified, and there is only one keyspace in the VSchema, use that.
+ if destKeyspace == "" && len(vschema.Keyspaces) == 1 {
+ for k := range vschema.Keyspaces {
+ destKeyspace = k
+ }
+ }
+ return destKeyspace, destTabletType, dest, err
+}
+
+func (vc *VCursorImpl) getDualTable() (*vindexes.Table, vindexes.Vindex, string, topodatapb.TabletType, key.Destination, error) {
ksName := vc.getActualKeyspace()
var ks *vindexes.Keyspace
if ksName == "" {
@@ -381,7 +486,7 @@ func (vc *vcursorImpl) getDualTable() (*vindexes.Table, vindexes.Vindex, string,
return tbl, nil, ksName, topodatapb.TabletType_PRIMARY, nil, nil
}
-func (vc *vcursorImpl) getActualKeyspace() string {
+func (vc *VCursorImpl) getActualKeyspace() string {
if !sqlparser.SystemSchema(vc.keyspace) {
return vc.keyspace
}
@@ -392,12 +497,12 @@ func (vc *vcursorImpl) getActualKeyspace() string {
return ks.Name
}
-// DefaultKeyspace returns the default keyspace of the current request
+// SelectedKeyspace returns the selected keyspace of the current request
// if there is one. If the keyspace specified in the target cannot be
// identified, it returns an error.
-func (vc *vcursorImpl) DefaultKeyspace() (*vindexes.Keyspace, error) {
+func (vc *VCursorImpl) SelectedKeyspace() (*vindexes.Keyspace, error) {
if ignoreKeyspace(vc.keyspace) {
- return nil, errNoKeyspace
+ return nil, ErrNoKeyspace
}
ks, ok := vc.vschema.Keyspaces[vc.keyspace]
if !ok {
@@ -408,12 +513,12 @@ func (vc *vcursorImpl) DefaultKeyspace() (*vindexes.Keyspace, error) {
var errNoDbAvailable = vterrors.NewErrorf(vtrpcpb.Code_FAILED_PRECONDITION, vterrors.NoDB, "no database available")
-func (vc *vcursorImpl) AnyKeyspace() (*vindexes.Keyspace, error) {
- keyspace, err := vc.DefaultKeyspace()
+func (vc *VCursorImpl) AnyKeyspace() (*vindexes.Keyspace, error) {
+ keyspace, err := vc.SelectedKeyspace()
if err == nil {
return keyspace, nil
}
- if err != errNoKeyspace {
+ if err != ErrNoKeyspace {
return nil, err
}
@@ -434,7 +539,7 @@ func (vc *vcursorImpl) AnyKeyspace() (*vindexes.Keyspace, error) {
}
// getSortedServingKeyspaces gets the sorted serving keyspaces
-func (vc *vcursorImpl) getSortedServingKeyspaces() []*vindexes.Keyspace {
+func (vc *VCursorImpl) getSortedServingKeyspaces() []*vindexes.Keyspace {
var keyspaces []*vindexes.Keyspace
if vc.resolver != nil && vc.resolver.GetGateway() != nil {
@@ -458,7 +563,7 @@ func (vc *vcursorImpl) getSortedServingKeyspaces() []*vindexes.Keyspace {
return keyspaces
}
-func (vc *vcursorImpl) FirstSortedKeyspace() (*vindexes.Keyspace, error) {
+func (vc *VCursorImpl) FirstSortedKeyspace() (*vindexes.Keyspace, error) {
if len(vc.vschema.Keyspaces) == 0 {
return nil, errNoDbAvailable
}
@@ -468,17 +573,17 @@ func (vc *vcursorImpl) FirstSortedKeyspace() (*vindexes.Keyspace, error) {
}
// SysVarSetEnabled implements the ContextVSchema interface
-func (vc *vcursorImpl) SysVarSetEnabled() bool {
+func (vc *VCursorImpl) SysVarSetEnabled() bool {
return vc.GetSessionEnableSystemSettings()
}
// KeyspaceExists provides whether the keyspace exists or not.
-func (vc *vcursorImpl) KeyspaceExists(ks string) bool {
+func (vc *VCursorImpl) KeyspaceExists(ks string) bool {
return vc.vschema.Keyspaces[ks] != nil
}
// AllKeyspace implements the ContextVSchema interface
-func (vc *vcursorImpl) AllKeyspace() ([]*vindexes.Keyspace, error) {
+func (vc *VCursorImpl) AllKeyspace() ([]*vindexes.Keyspace, error) {
if len(vc.vschema.Keyspaces) == 0 {
return nil, errNoDbAvailable
}
@@ -490,7 +595,7 @@ func (vc *vcursorImpl) AllKeyspace() ([]*vindexes.Keyspace, error) {
}
// FindKeyspace implements the VSchema interface
-func (vc *vcursorImpl) FindKeyspace(keyspace string) (*vindexes.Keyspace, error) {
+func (vc *VCursorImpl) FindKeyspace(keyspace string) (*vindexes.Keyspace, error) {
if len(vc.vschema.Keyspaces) == 0 {
return nil, errNoDbAvailable
}
@@ -503,28 +608,28 @@ func (vc *vcursorImpl) FindKeyspace(keyspace string) (*vindexes.Keyspace, error)
}
// Planner implements the ContextVSchema interface
-func (vc *vcursorImpl) Planner() plancontext.PlannerVersion {
- if vc.safeSession.Options != nil &&
- vc.safeSession.Options.PlannerVersion != querypb.ExecuteOptions_DEFAULT_PLANNER {
- return vc.safeSession.Options.PlannerVersion
+func (vc *VCursorImpl) Planner() plancontext.PlannerVersion {
+ if vc.SafeSession.Options != nil &&
+ vc.SafeSession.Options.PlannerVersion != querypb.ExecuteOptions_DEFAULT_PLANNER {
+ return vc.SafeSession.Options.PlannerVersion
}
- return vc.pv
+ return vc.config.PlannerVersion
}
// GetSemTable implements the ContextVSchema interface
-func (vc *vcursorImpl) GetSemTable() *semantics.SemTable {
+func (vc *VCursorImpl) GetSemTable() *semantics.SemTable {
return vc.semTable
}
// TargetString returns the current TargetString of the session.
-func (vc *vcursorImpl) TargetString() string {
- return vc.safeSession.TargetString
+func (vc *VCursorImpl) TargetString() string {
+ return vc.SafeSession.TargetString
}
// MaxBufferingRetries is to represent max retries on buffering.
const MaxBufferingRetries = 3
-func (vc *vcursorImpl) ExecutePrimitive(ctx context.Context, primitive engine.Primitive, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error) {
+func (vc *VCursorImpl) ExecutePrimitive(ctx context.Context, primitive engine.Primitive, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error) {
for try := 0; try < MaxBufferingRetries; try++ {
res, err := primitive.TryExecute(ctx, vc, bindVars, wantfields)
if err != nil && vterrors.RootCause(err) == buffer.ShardMissingError {
@@ -536,7 +641,7 @@ func (vc *vcursorImpl) ExecutePrimitive(ctx context.Context, primitive engine.Pr
return nil, vterrors.New(vtrpcpb.Code_UNAVAILABLE, "upstream shards are not available")
}
-func (vc *vcursorImpl) logOpTraffic(primitive engine.Primitive, res *sqltypes.Result) {
+func (vc *VCursorImpl) logOpTraffic(primitive engine.Primitive, res *sqltypes.Result) {
if vc.interOpStats != nil {
rows := vc.interOpStats[primitive]
if res == nil {
@@ -548,14 +653,14 @@ func (vc *vcursorImpl) logOpTraffic(primitive engine.Primitive, res *sqltypes.Re
}
}
-func (vc *vcursorImpl) logShardsQueried(primitive engine.Primitive, shardsNb int) {
+func (vc *VCursorImpl) logShardsQueried(primitive engine.Primitive, shardsNb int) {
if vc.shardsStats != nil {
vc.shardsStats[primitive] += engine.ShardsQueried(shardsNb)
}
}
-func (vc *vcursorImpl) ExecutePrimitiveStandalone(ctx context.Context, primitive engine.Primitive, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error) {
- // clone the vcursorImpl with a new session.
+func (vc *VCursorImpl) ExecutePrimitiveStandalone(ctx context.Context, primitive engine.Primitive, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error) {
+ // clone the VCursorImpl with a new session.
newVC := vc.cloneWithAutocommitSession()
for try := 0; try < MaxBufferingRetries; try++ {
res, err := primitive.TryExecute(ctx, newVC, bindVars, wantfields)
@@ -568,7 +673,7 @@ func (vc *vcursorImpl) ExecutePrimitiveStandalone(ctx context.Context, primitive
return nil, vterrors.New(vtrpcpb.Code_UNAVAILABLE, "upstream shards are not available")
}
-func (vc *vcursorImpl) wrapCallback(callback func(*sqltypes.Result) error, primitive engine.Primitive) func(*sqltypes.Result) error {
+func (vc *VCursorImpl) wrapCallback(callback func(*sqltypes.Result) error, primitive engine.Primitive) func(*sqltypes.Result) error {
if vc.interOpStats == nil {
return callback
}
@@ -579,7 +684,7 @@ func (vc *vcursorImpl) wrapCallback(callback func(*sqltypes.Result) error, primi
}
}
-func (vc *vcursorImpl) StreamExecutePrimitive(ctx context.Context, primitive engine.Primitive, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error {
+func (vc *VCursorImpl) StreamExecutePrimitive(ctx context.Context, primitive engine.Primitive, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error {
callback = vc.wrapCallback(callback, primitive)
for try := 0; try < MaxBufferingRetries; try++ {
@@ -592,10 +697,10 @@ func (vc *vcursorImpl) StreamExecutePrimitive(ctx context.Context, primitive eng
return vterrors.New(vtrpcpb.Code_UNAVAILABLE, "upstream shards are not available")
}
-func (vc *vcursorImpl) StreamExecutePrimitiveStandalone(ctx context.Context, primitive engine.Primitive, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(result *sqltypes.Result) error) error {
+func (vc *VCursorImpl) StreamExecutePrimitiveStandalone(ctx context.Context, primitive engine.Primitive, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(result *sqltypes.Result) error) error {
callback = vc.wrapCallback(callback, primitive)
- // clone the vcursorImpl with a new session.
+ // clone the VCursorImpl with a new session.
newVC := vc.cloneWithAutocommitSession()
for try := 0; try < MaxBufferingRetries; try++ {
err := primitive.TryStreamExecute(ctx, newVC, bindVars, wantfields, callback)
@@ -608,12 +713,11 @@ func (vc *vcursorImpl) StreamExecutePrimitiveStandalone(ctx context.Context, pri
}
// Execute is part of the engine.VCursor interface.
-func (vc *vcursorImpl) Execute(ctx context.Context, method string, query string, bindVars map[string]*querypb.BindVariable, rollbackOnError bool, co vtgatepb.CommitOrder) (*sqltypes.Result, error) {
- session := vc.safeSession
+func (vc *VCursorImpl) Execute(ctx context.Context, method string, query string, bindVars map[string]*querypb.BindVariable, rollbackOnError bool, co vtgatepb.CommitOrder) (*sqltypes.Result, error) {
+ session := vc.SafeSession
if co == vtgatepb.CommitOrder_AUTOCOMMIT {
// For autocommit, we have to create an independent session.
- session = NewAutocommitSession(vc.safeSession.Session)
- session.logging = vc.safeSession.logging
+ session = vc.SafeSession.NewAutocommitSession()
rollbackOnError = false
} else {
session.SetCommitOrder(co)
@@ -634,24 +738,22 @@ func (vc *vcursorImpl) Execute(ctx context.Context, method string, query string,
// markSavepoint opens an internal savepoint before executing the original query.
// This happens only when rollback is allowed and no other savepoint was executed
// and the query is executed in an explicit transaction (i.e. started by the client).
-func (vc *vcursorImpl) markSavepoint(ctx context.Context, needsRollbackOnParialExec bool, bindVars map[string]*querypb.BindVariable) error {
- if !needsRollbackOnParialExec || !vc.safeSession.CanAddSavepoint() {
+func (vc *VCursorImpl) markSavepoint(ctx context.Context, needsRollbackOnParialExec bool, bindVars map[string]*querypb.BindVariable) error {
+ if !needsRollbackOnParialExec || !vc.SafeSession.CanAddSavepoint() {
return nil
}
uID := fmt.Sprintf("_vt%s", strings.ReplaceAll(uuid.NewString(), "-", "_"))
spQuery := fmt.Sprintf("%ssavepoint %s%s", vc.marginComments.Leading, uID, vc.marginComments.Trailing)
- _, err := vc.executor.Execute(ctx, nil, "MarkSavepoint", vc.safeSession, spQuery, bindVars)
+ _, err := vc.executor.Execute(ctx, nil, "MarkSavepoint", vc.SafeSession, spQuery, bindVars)
if err != nil {
return err
}
- vc.safeSession.SetSavepoint(uID)
+ vc.SafeSession.SetSavepoint(uID)
return nil
}
-const txRollback = "Rollback Transaction"
-
// ExecuteMultiShard is part of the engine.VCursor interface.
-func (vc *vcursorImpl) ExecuteMultiShard(ctx context.Context, primitive engine.Primitive, rss []*srvtopo.ResolvedShard, queries []*querypb.BoundQuery, rollbackOnError, canAutocommit bool) (*sqltypes.Result, []error) {
+func (vc *VCursorImpl) ExecuteMultiShard(ctx context.Context, primitive engine.Primitive, rss []*srvtopo.ResolvedShard, queries []*querypb.BoundQuery, rollbackOnError, canAutocommit bool) (*sqltypes.Result, []error) {
noOfShards := len(rss)
atomic.AddUint64(&vc.logStats.ShardQueries, uint64(noOfShards))
err := vc.markSavepoint(ctx, rollbackOnError && (noOfShards > 1), map[string]*querypb.BindVariable{})
@@ -659,14 +761,14 @@ func (vc *vcursorImpl) ExecuteMultiShard(ctx context.Context, primitive engine.P
return nil, []error{err}
}
- qr, errs := vc.executor.ExecuteMultiShard(ctx, primitive, rss, commentedShardQueries(queries, vc.marginComments), vc.safeSession, canAutocommit, vc.ignoreMaxMemoryRows, vc.resultsObserver)
+ qr, errs := vc.executor.ExecuteMultiShard(ctx, primitive, rss, commentedShardQueries(queries, vc.marginComments), vc.SafeSession, canAutocommit, vc.ignoreMaxMemoryRows, vc.observer)
vc.setRollbackOnPartialExecIfRequired(len(errs) != len(rss), rollbackOnError)
vc.logShardsQueried(primitive, len(rss))
return qr, errs
}
// StreamExecuteMulti is the streaming version of ExecuteMultiShard.
-func (vc *vcursorImpl) StreamExecuteMulti(ctx context.Context, primitive engine.Primitive, query string, rss []*srvtopo.ResolvedShard, bindVars []map[string]*querypb.BindVariable, rollbackOnError bool, autocommit bool, callback func(reply *sqltypes.Result) error) []error {
+func (vc *VCursorImpl) StreamExecuteMulti(ctx context.Context, primitive engine.Primitive, query string, rss []*srvtopo.ResolvedShard, bindVars []map[string]*querypb.BindVariable, rollbackOnError bool, autocommit bool, callback func(reply *sqltypes.Result) error) []error {
callback = vc.wrapCallback(callback, primitive)
noOfShards := len(rss)
@@ -676,20 +778,20 @@ func (vc *vcursorImpl) StreamExecuteMulti(ctx context.Context, primitive engine.
return []error{err}
}
- errs := vc.executor.StreamExecuteMulti(ctx, primitive, vc.marginComments.Leading+query+vc.marginComments.Trailing, rss, bindVars, vc.safeSession, autocommit, callback, vc.resultsObserver)
+ errs := vc.executor.StreamExecuteMulti(ctx, primitive, vc.marginComments.Leading+query+vc.marginComments.Trailing, rss, bindVars, vc.SafeSession, autocommit, callback, vc.observer)
vc.setRollbackOnPartialExecIfRequired(len(errs) != len(rss), rollbackOnError)
return errs
}
// ExecuteLock is for executing advisory lock statements.
-func (vc *vcursorImpl) ExecuteLock(ctx context.Context, rs *srvtopo.ResolvedShard, query *querypb.BoundQuery, lockFuncType sqlparser.LockingFuncType) (*sqltypes.Result, error) {
+func (vc *VCursorImpl) ExecuteLock(ctx context.Context, rs *srvtopo.ResolvedShard, query *querypb.BoundQuery, lockFuncType sqlparser.LockingFuncType) (*sqltypes.Result, error) {
query.Sql = vc.marginComments.Leading + query.Sql + vc.marginComments.Trailing
- return vc.executor.ExecuteLock(ctx, rs, query, vc.safeSession, lockFuncType)
+ return vc.executor.ExecuteLock(ctx, rs, query, vc.SafeSession, lockFuncType)
}
// ExecuteStandalone is part of the engine.VCursor interface.
-func (vc *vcursorImpl) ExecuteStandalone(ctx context.Context, primitive engine.Primitive, query string, bindVars map[string]*querypb.BindVariable, rs *srvtopo.ResolvedShard) (*sqltypes.Result, error) {
+func (vc *VCursorImpl) ExecuteStandalone(ctx context.Context, primitive engine.Primitive, query string, bindVars map[string]*querypb.BindVariable, rs *srvtopo.ResolvedShard) (*sqltypes.Result, error) {
rss := []*srvtopo.ResolvedShard{rs}
bqs := []*querypb.BoundQuery{
{
@@ -699,13 +801,13 @@ func (vc *vcursorImpl) ExecuteStandalone(ctx context.Context, primitive engine.P
}
// The autocommit flag is always set to false because we currently don't
// execute DMLs through ExecuteStandalone.
- qr, errs := vc.executor.ExecuteMultiShard(ctx, primitive, rss, bqs, NewAutocommitSession(vc.safeSession.Session), false /* autocommit */, vc.ignoreMaxMemoryRows, vc.resultsObserver)
+ qr, errs := vc.executor.ExecuteMultiShard(ctx, primitive, rss, bqs, NewAutocommitSession(vc.SafeSession.Session), false /* autocommit */, vc.ignoreMaxMemoryRows, vc.observer)
vc.logShardsQueried(primitive, len(rss))
return qr, vterrors.Aggregate(errs)
}
// ExecuteKeyspaceID is part of the engine.VCursor interface.
-func (vc *vcursorImpl) ExecuteKeyspaceID(ctx context.Context, keyspace string, ksid []byte, query string, bindVars map[string]*querypb.BindVariable, rollbackOnError, autocommit bool) (*sqltypes.Result, error) {
+func (vc *VCursorImpl) ExecuteKeyspaceID(ctx context.Context, keyspace string, ksid []byte, query string, bindVars map[string]*querypb.BindVariable, rollbackOnError, autocommit bool) (*sqltypes.Result, error) {
atomic.AddUint64(&vc.logStats.ShardQueries, 1)
rss, _, err := vc.ResolveDestinations(ctx, keyspace, nil, []key.Destination{key.DestinationKeyspaceID(ksid)})
if err != nil {
@@ -722,17 +824,17 @@ func (vc *vcursorImpl) ExecuteKeyspaceID(ctx context.Context, keyspace string, k
// This creates a transaction but that transaction is for locking purpose only and should not cause multi-db transaction error.
// This fields helps in to ignore multi-db transaction error when it states `queryFromVindex`.
if !rollbackOnError {
- vc.safeSession.queryFromVindex = true
+ vc.SafeSession.SetQueryFromVindex(true)
defer func() {
- vc.safeSession.queryFromVindex = false
+ vc.SafeSession.SetQueryFromVindex(false)
}()
}
qr, errs := vc.ExecuteMultiShard(ctx, nil, rss, queries, rollbackOnError, autocommit)
return qr, vterrors.Aggregate(errs)
}
-func (vc *vcursorImpl) InTransactionAndIsDML() bool {
- if !vc.safeSession.InTransaction() {
+func (vc *VCursorImpl) InTransactionAndIsDML() bool {
+ if !vc.SafeSession.InTransaction() {
return false
}
switch vc.logStats.StmtType {
@@ -742,7 +844,7 @@ func (vc *vcursorImpl) InTransactionAndIsDML() bool {
return false
}
-func (vc *vcursorImpl) LookupRowLockShardSession() vtgatepb.CommitOrder {
+func (vc *VCursorImpl) LookupRowLockShardSession() vtgatepb.CommitOrder {
switch vc.logStats.StmtType {
case "DELETE", "UPDATE":
return vtgatepb.CommitOrder_POST
@@ -751,23 +853,23 @@ func (vc *vcursorImpl) LookupRowLockShardSession() vtgatepb.CommitOrder {
}
// AutocommitApproval is part of the engine.VCursor interface.
-func (vc *vcursorImpl) AutocommitApproval() bool {
- return vc.safeSession.AutocommitApproval()
+func (vc *VCursorImpl) AutocommitApproval() bool {
+ return vc.SafeSession.AutocommitApproval()
}
// setRollbackOnPartialExecIfRequired sets the value on SafeSession.rollbackOnPartialExec
// when the query gets successfully executed on at least one shard,
// there does not exist any old savepoint for which rollback is already set
// and rollback on error is allowed.
-func (vc *vcursorImpl) setRollbackOnPartialExecIfRequired(atleastOneSuccess bool, rollbackOnError bool) {
- if atleastOneSuccess && rollbackOnError && !vc.safeSession.IsRollbackSet() {
- vc.safeSession.SetRollbackCommand()
+func (vc *VCursorImpl) setRollbackOnPartialExecIfRequired(atleastOneSuccess bool, rollbackOnError bool) {
+ if atleastOneSuccess && rollbackOnError && !vc.SafeSession.IsRollbackSet() {
+ vc.SafeSession.SetRollbackCommand()
}
}
// fixupPartiallyMovedShards checks if any of the shards in the route has a ShardRoutingRule (true when a keyspace
// is in the middle of being moved to another keyspace using MoveTables moving a subset of shards at a time
-func (vc *vcursorImpl) fixupPartiallyMovedShards(rss []*srvtopo.ResolvedShard) ([]*srvtopo.ResolvedShard, error) {
+func (vc *VCursorImpl) fixupPartiallyMovedShards(rss []*srvtopo.ResolvedShard) ([]*srvtopo.ResolvedShard, error) {
if vc.vschema.ShardRoutingRules == nil {
return rss, nil
}
@@ -784,12 +886,12 @@ func (vc *vcursorImpl) fixupPartiallyMovedShards(rss []*srvtopo.ResolvedShard) (
return rss, nil
}
-func (vc *vcursorImpl) ResolveDestinations(ctx context.Context, keyspace string, ids []*querypb.Value, destinations []key.Destination) ([]*srvtopo.ResolvedShard, [][]*querypb.Value, error) {
+func (vc *VCursorImpl) ResolveDestinations(ctx context.Context, keyspace string, ids []*querypb.Value, destinations []key.Destination) ([]*srvtopo.ResolvedShard, [][]*querypb.Value, error) {
rss, values, err := vc.resolver.ResolveDestinations(ctx, keyspace, vc.tabletType, ids, destinations)
if err != nil {
return nil, nil, err
}
- if enableShardRouting {
+ if vc.config.EnableShardRouting {
rss, err = vc.fixupPartiallyMovedShards(rss)
if err != nil {
return nil, nil, err
@@ -798,12 +900,12 @@ func (vc *vcursorImpl) ResolveDestinations(ctx context.Context, keyspace string,
return rss, values, err
}
-func (vc *vcursorImpl) ResolveDestinationsMultiCol(ctx context.Context, keyspace string, ids [][]sqltypes.Value, destinations []key.Destination) ([]*srvtopo.ResolvedShard, [][][]sqltypes.Value, error) {
+func (vc *VCursorImpl) ResolveDestinationsMultiCol(ctx context.Context, keyspace string, ids [][]sqltypes.Value, destinations []key.Destination) ([]*srvtopo.ResolvedShard, [][][]sqltypes.Value, error) {
rss, values, err := vc.resolver.ResolveDestinationsMultiCol(ctx, keyspace, vc.tabletType, ids, destinations)
if err != nil {
return nil, nil, err
}
- if enableShardRouting {
+ if vc.config.EnableShardRouting {
rss, err = vc.fixupPartiallyMovedShards(rss)
if err != nil {
return nil, nil, err
@@ -812,12 +914,12 @@ func (vc *vcursorImpl) ResolveDestinationsMultiCol(ctx context.Context, keyspace
return rss, values, err
}
-func (vc *vcursorImpl) Session() engine.SessionActions {
+func (vc *VCursorImpl) Session() engine.SessionActions {
return vc
}
-func (vc *vcursorImpl) SetTarget(target string) error {
- keyspace, tabletType, _, err := topoprotopb.ParseDestination(target, defaultTabletType)
+func (vc *VCursorImpl) SetTarget(target string) error {
+ keyspace, tabletType, _, err := topoprotopb.ParseDestination(target, vc.config.DefaultTabletType)
if err != nil {
return err
}
@@ -825,10 +927,12 @@ func (vc *vcursorImpl) SetTarget(target string) error {
return vterrors.VT05003(keyspace)
}
- if vc.safeSession.InTransaction() && tabletType != topodatapb.TabletType_PRIMARY {
+ if vc.SafeSession.InTransaction() && tabletType != topodatapb.TabletType_PRIMARY {
return vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.LockOrActiveTransaction, "can't execute the given command because you have an active transaction")
}
- vc.safeSession.SetTargetString(target)
+ vc.SafeSession.SetTargetString(target)
+ vc.keyspace = keyspace
+ vc.tabletType = tabletType
return nil
}
@@ -836,30 +940,30 @@ func ignoreKeyspace(keyspace string) bool {
return keyspace == "" || sqlparser.SystemSchema(keyspace)
}
-func (vc *vcursorImpl) SetUDV(key string, value any) error {
+func (vc *VCursorImpl) SetUDV(key string, value any) error {
bindValue, err := sqltypes.BuildBindVariable(value)
if err != nil {
return err
}
- vc.safeSession.SetUserDefinedVariable(key, bindValue)
+ vc.SafeSession.SetUserDefinedVariable(key, bindValue)
return nil
}
-func (vc *vcursorImpl) SetSysVar(name string, expr string) {
- vc.safeSession.SetSystemVariable(name, expr)
+func (vc *VCursorImpl) SetSysVar(name string, expr string) {
+ vc.SafeSession.SetSystemVariable(name, expr)
}
// NeedsReservedConn implements the SessionActions interface
-func (vc *vcursorImpl) NeedsReservedConn() {
- vc.safeSession.SetReservedConn(true)
+func (vc *VCursorImpl) NeedsReservedConn() {
+ vc.SafeSession.SetReservedConn(true)
}
-func (vc *vcursorImpl) InReservedConn() bool {
- return vc.safeSession.InReservedConn()
+func (vc *VCursorImpl) InReservedConn() bool {
+ return vc.SafeSession.InReservedConn()
}
-func (vc *vcursorImpl) ShardSession() []*srvtopo.ResolvedShard {
- ss := vc.safeSession.GetShardSessions()
+func (vc *VCursorImpl) ShardSession() []*srvtopo.ResolvedShard {
+ ss := vc.SafeSession.GetShardSessions()
if len(ss) == 0 {
return nil
}
@@ -874,12 +978,12 @@ func (vc *vcursorImpl) ShardSession() []*srvtopo.ResolvedShard {
}
// Destination implements the ContextVSchema interface
-func (vc *vcursorImpl) Destination() key.Destination {
+func (vc *VCursorImpl) Destination() key.Destination {
return vc.destination
}
// TabletType implements the ContextVSchema interface
-func (vc *vcursorImpl) TabletType() topodatapb.TabletType {
+func (vc *VCursorImpl) TabletType() topodatapb.TabletType {
return vc.tabletType
}
@@ -898,13 +1002,13 @@ func commentedShardQueries(shardQueries []*querypb.BoundQuery, marginComments sq
}
// TargetDestination implements the ContextVSchema interface
-func (vc *vcursorImpl) TargetDestination(qualifier string) (key.Destination, *vindexes.Keyspace, topodatapb.TabletType, error) {
+func (vc *VCursorImpl) TargetDestination(qualifier string) (key.Destination, *vindexes.Keyspace, topodatapb.TabletType, error) {
keyspaceName := vc.getActualKeyspace()
if vc.destination == nil && qualifier != "" {
keyspaceName = qualifier
}
if keyspaceName == "" {
- return nil, nil, 0, errNoKeyspace
+ return nil, nil, 0, ErrNoKeyspace
}
keyspace := vc.vschema.Keyspaces[keyspaceName]
if keyspace == nil {
@@ -914,63 +1018,63 @@ func (vc *vcursorImpl) TargetDestination(qualifier string) (key.Destination, *vi
}
// SetAutocommit implements the SessionActions interface
-func (vc *vcursorImpl) SetAutocommit(ctx context.Context, autocommit bool) error {
- if autocommit && vc.safeSession.InTransaction() {
- if err := vc.executor.Commit(ctx, vc.safeSession); err != nil {
+func (vc *VCursorImpl) SetAutocommit(ctx context.Context, autocommit bool) error {
+ if autocommit && vc.SafeSession.InTransaction() {
+ if err := vc.executor.Commit(ctx, vc.SafeSession); err != nil {
return err
}
}
- vc.safeSession.Autocommit = autocommit
+ vc.SafeSession.Autocommit = autocommit
return nil
}
// SetQueryTimeout implements the SessionActions interface
-func (vc *vcursorImpl) SetQueryTimeout(maxExecutionTime int64) {
- vc.safeSession.QueryTimeout = maxExecutionTime
+func (vc *VCursorImpl) SetQueryTimeout(maxExecutionTime int64) {
+ vc.SafeSession.QueryTimeout = maxExecutionTime
}
// SetClientFoundRows implements the SessionActions interface
-func (vc *vcursorImpl) SetClientFoundRows(_ context.Context, clientFoundRows bool) error {
- vc.safeSession.GetOrCreateOptions().ClientFoundRows = clientFoundRows
+func (vc *VCursorImpl) SetClientFoundRows(_ context.Context, clientFoundRows bool) error {
+ vc.SafeSession.GetOrCreateOptions().ClientFoundRows = clientFoundRows
return nil
}
// SetSkipQueryPlanCache implements the SessionActions interface
-func (vc *vcursorImpl) SetSkipQueryPlanCache(_ context.Context, skipQueryPlanCache bool) error {
- vc.safeSession.GetOrCreateOptions().SkipQueryPlanCache = skipQueryPlanCache
+func (vc *VCursorImpl) SetSkipQueryPlanCache(_ context.Context, skipQueryPlanCache bool) error {
+ vc.SafeSession.GetOrCreateOptions().SkipQueryPlanCache = skipQueryPlanCache
return nil
}
// SetSQLSelectLimit implements the SessionActions interface
-func (vc *vcursorImpl) SetSQLSelectLimit(limit int64) error {
- vc.safeSession.GetOrCreateOptions().SqlSelectLimit = limit
+func (vc *VCursorImpl) SetSQLSelectLimit(limit int64) error {
+ vc.SafeSession.GetOrCreateOptions().SqlSelectLimit = limit
return nil
}
// SetTransactionMode implements the SessionActions interface
-func (vc *vcursorImpl) SetTransactionMode(mode vtgatepb.TransactionMode) {
- vc.safeSession.TransactionMode = mode
+func (vc *VCursorImpl) SetTransactionMode(mode vtgatepb.TransactionMode) {
+ vc.SafeSession.TransactionMode = mode
}
// SetWorkload implements the SessionActions interface
-func (vc *vcursorImpl) SetWorkload(workload querypb.ExecuteOptions_Workload) {
- vc.safeSession.GetOrCreateOptions().Workload = workload
+func (vc *VCursorImpl) SetWorkload(workload querypb.ExecuteOptions_Workload) {
+ vc.SafeSession.GetOrCreateOptions().Workload = workload
}
// SetPlannerVersion implements the SessionActions interface
-func (vc *vcursorImpl) SetPlannerVersion(v plancontext.PlannerVersion) {
- vc.safeSession.GetOrCreateOptions().PlannerVersion = v
+func (vc *VCursorImpl) SetPlannerVersion(v plancontext.PlannerVersion) {
+ vc.SafeSession.GetOrCreateOptions().PlannerVersion = v
}
-func (vc *vcursorImpl) SetPriority(priority string) {
+func (vc *VCursorImpl) SetPriority(priority string) {
if priority != "" {
- vc.safeSession.GetOrCreateOptions().Priority = priority
- } else if vc.safeSession.Options != nil && vc.safeSession.Options.Priority != "" {
- vc.safeSession.Options.Priority = ""
+ vc.SafeSession.GetOrCreateOptions().Priority = priority
+ } else if vc.SafeSession.Options != nil && vc.SafeSession.Options.Priority != "" {
+ vc.SafeSession.Options.Priority = ""
}
}
-func (vc *vcursorImpl) SetExecQueryTimeout(timeout *int) {
+func (vc *VCursorImpl) SetExecQueryTimeout(timeout *int) {
// Determine the effective timeout: use passed timeout if non-nil, otherwise use session's query timeout if available
var execTimeout *int
if timeout != nil {
@@ -981,153 +1085,152 @@ func (vc *vcursorImpl) SetExecQueryTimeout(timeout *int) {
// If no effective timeout and no session options, return early
if execTimeout == nil {
- if vc.safeSession.GetOptions() == nil {
+ if vc.SafeSession.GetOptions() == nil {
return
}
- vc.safeSession.GetOrCreateOptions().Timeout = nil
+ vc.SafeSession.GetOrCreateOptions().Timeout = nil
return
}
vc.queryTimeout = time.Duration(*execTimeout) * time.Millisecond
// Set the authoritative timeout using the determined execTimeout
- vc.safeSession.GetOrCreateOptions().Timeout = &querypb.ExecuteOptions_AuthoritativeTimeout{
+ vc.SafeSession.GetOrCreateOptions().Timeout = &querypb.ExecuteOptions_AuthoritativeTimeout{
AuthoritativeTimeout: int64(*execTimeout),
}
}
// getQueryTimeout returns timeout based on the priority
// session setting > global default specified by a flag.
-func (vc *vcursorImpl) getQueryTimeout() int {
- sessionQueryTimeout := int(vc.safeSession.GetQueryTimeout())
+func (vc *VCursorImpl) getQueryTimeout() int {
+ sessionQueryTimeout := int(vc.SafeSession.GetQueryTimeout())
if sessionQueryTimeout != 0 {
return sessionQueryTimeout
}
- return queryTimeout
+ return vc.config.QueryTimeout
}
// SetConsolidator implements the SessionActions interface
-func (vc *vcursorImpl) SetConsolidator(consolidator querypb.ExecuteOptions_Consolidator) {
+func (vc *VCursorImpl) SetConsolidator(consolidator querypb.ExecuteOptions_Consolidator) {
// Avoid creating session Options when they do not yet exist and the
// consolidator is unspecified.
- if consolidator == querypb.ExecuteOptions_CONSOLIDATOR_UNSPECIFIED && vc.safeSession.GetOptions() == nil {
+ if consolidator == querypb.ExecuteOptions_CONSOLIDATOR_UNSPECIFIED && vc.SafeSession.GetOptions() == nil {
return
}
- vc.safeSession.GetOrCreateOptions().Consolidator = consolidator
+ vc.SafeSession.GetOrCreateOptions().Consolidator = consolidator
}
-func (vc *vcursorImpl) SetWorkloadName(workloadName string) {
+func (vc *VCursorImpl) SetWorkloadName(workloadName string) {
if workloadName != "" {
- vc.safeSession.GetOrCreateOptions().WorkloadName = workloadName
+ vc.SafeSession.GetOrCreateOptions().WorkloadName = workloadName
}
}
// SetFoundRows implements the SessionActions interface
-func (vc *vcursorImpl) SetFoundRows(foundRows uint64) {
- vc.safeSession.FoundRows = foundRows
- vc.safeSession.foundRowsHandled = true
+func (vc *VCursorImpl) SetFoundRows(foundRows uint64) {
+ vc.SafeSession.SetFoundRows(foundRows)
}
// SetDDLStrategy implements the SessionActions interface
-func (vc *vcursorImpl) SetDDLStrategy(strategy string) {
- vc.safeSession.SetDDLStrategy(strategy)
+func (vc *VCursorImpl) SetDDLStrategy(strategy string) {
+ vc.SafeSession.SetDDLStrategy(strategy)
}
// GetDDLStrategy implements the SessionActions interface
-func (vc *vcursorImpl) GetDDLStrategy() string {
- return vc.safeSession.GetDDLStrategy()
+func (vc *VCursorImpl) GetDDLStrategy() string {
+ return vc.SafeSession.GetDDLStrategy()
}
// SetMigrationContext implements the SessionActions interface
-func (vc *vcursorImpl) SetMigrationContext(migrationContext string) {
- vc.safeSession.SetMigrationContext(migrationContext)
+func (vc *VCursorImpl) SetMigrationContext(migrationContext string) {
+ vc.SafeSession.SetMigrationContext(migrationContext)
}
// GetMigrationContext implements the SessionActions interface
-func (vc *vcursorImpl) GetMigrationContext() string {
- return vc.safeSession.GetMigrationContext()
+func (vc *VCursorImpl) GetMigrationContext() string {
+ return vc.SafeSession.GetMigrationContext()
}
// GetSessionUUID implements the SessionActions interface
-func (vc *vcursorImpl) GetSessionUUID() string {
- return vc.safeSession.GetSessionUUID()
+func (vc *VCursorImpl) GetSessionUUID() string {
+ return vc.SafeSession.GetSessionUUID()
}
// SetSessionEnableSystemSettings implements the SessionActions interface
-func (vc *vcursorImpl) SetSessionEnableSystemSettings(_ context.Context, allow bool) error {
- vc.safeSession.SetSessionEnableSystemSettings(allow)
+func (vc *VCursorImpl) SetSessionEnableSystemSettings(_ context.Context, allow bool) error {
+ vc.SafeSession.SetSessionEnableSystemSettings(allow)
return nil
}
// GetSessionEnableSystemSettings implements the SessionActions interface
-func (vc *vcursorImpl) GetSessionEnableSystemSettings() bool {
- return vc.safeSession.GetSessionEnableSystemSettings()
+func (vc *VCursorImpl) GetSessionEnableSystemSettings() bool {
+ return vc.SafeSession.GetSessionEnableSystemSettings()
}
// SetReadAfterWriteGTID implements the SessionActions interface
-func (vc *vcursorImpl) SetReadAfterWriteGTID(vtgtid string) {
- vc.safeSession.SetReadAfterWriteGTID(vtgtid)
+func (vc *VCursorImpl) SetReadAfterWriteGTID(vtgtid string) {
+ vc.SafeSession.SetReadAfterWriteGTID(vtgtid)
}
// SetReadAfterWriteTimeout implements the SessionActions interface
-func (vc *vcursorImpl) SetReadAfterWriteTimeout(timeout float64) {
- vc.safeSession.SetReadAfterWriteTimeout(timeout)
+func (vc *VCursorImpl) SetReadAfterWriteTimeout(timeout float64) {
+ vc.SafeSession.SetReadAfterWriteTimeout(timeout)
}
// SetSessionTrackGTIDs implements the SessionActions interface
-func (vc *vcursorImpl) SetSessionTrackGTIDs(enable bool) {
- vc.safeSession.SetSessionTrackGtids(enable)
+func (vc *VCursorImpl) SetSessionTrackGTIDs(enable bool) {
+ vc.SafeSession.SetSessionTrackGtids(enable)
}
// HasCreatedTempTable implements the SessionActions interface
-func (vc *vcursorImpl) HasCreatedTempTable() {
- vc.safeSession.GetOrCreateOptions().HasCreatedTempTables = true
+func (vc *VCursorImpl) HasCreatedTempTable() {
+ vc.SafeSession.GetOrCreateOptions().HasCreatedTempTables = true
}
// GetWarnings implements the SessionActions interface
-func (vc *vcursorImpl) GetWarnings() []*querypb.QueryWarning {
- return vc.safeSession.GetWarnings()
+func (vc *VCursorImpl) GetWarnings() []*querypb.QueryWarning {
+ return vc.SafeSession.GetWarnings()
}
// AnyAdvisoryLockTaken implements the SessionActions interface
-func (vc *vcursorImpl) AnyAdvisoryLockTaken() bool {
- return vc.safeSession.HasAdvisoryLock()
+func (vc *VCursorImpl) AnyAdvisoryLockTaken() bool {
+ return vc.SafeSession.HasAdvisoryLock()
}
// AddAdvisoryLock implements the SessionActions interface
-func (vc *vcursorImpl) AddAdvisoryLock(name string) {
- vc.safeSession.AddAdvisoryLock(name)
+func (vc *VCursorImpl) AddAdvisoryLock(name string) {
+ vc.SafeSession.AddAdvisoryLock(name)
}
// RemoveAdvisoryLock implements the SessionActions interface
-func (vc *vcursorImpl) RemoveAdvisoryLock(name string) {
- vc.safeSession.RemoveAdvisoryLock(name)
+func (vc *VCursorImpl) RemoveAdvisoryLock(name string) {
+ vc.SafeSession.RemoveAdvisoryLock(name)
}
-func (vc *vcursorImpl) SetCommitOrder(co vtgatepb.CommitOrder) {
- vc.safeSession.SetCommitOrder(co)
+func (vc *VCursorImpl) SetCommitOrder(co vtgatepb.CommitOrder) {
+ vc.SafeSession.SetCommitOrder(co)
}
-func (vc *vcursorImpl) InTransaction() bool {
- return vc.safeSession.InTransaction()
+func (vc *VCursorImpl) InTransaction() bool {
+ return vc.SafeSession.InTransaction()
}
-func (vc *vcursorImpl) Commit(ctx context.Context) error {
- return vc.executor.Commit(ctx, vc.safeSession)
+func (vc *VCursorImpl) Commit(ctx context.Context) error {
+ return vc.executor.Commit(ctx, vc.SafeSession)
}
// GetDBDDLPluginName implements the VCursor interface
-func (vc *vcursorImpl) GetDBDDLPluginName() string {
- return dbDDLPlugin
+func (vc *VCursorImpl) GetDBDDLPluginName() string {
+ return vc.config.DBDDLPlugin
}
// KeyspaceAvailable implements the VCursor interface
-func (vc *vcursorImpl) KeyspaceAvailable(ks string) bool {
+func (vc *VCursorImpl) KeyspaceAvailable(ks string) bool {
_, exists := vc.executor.VSchema().Keyspaces[ks]
return exists
}
// ErrorIfShardedF implements the VCursor interface
-func (vc *vcursorImpl) ErrorIfShardedF(ks *vindexes.Keyspace, warn, errFormat string, params ...any) error {
+func (vc *VCursorImpl) ErrorIfShardedF(ks *vindexes.Keyspace, warn, errFormat string, params ...any) error {
if ks.Sharded {
return vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, errFormat, params...)
}
@@ -1136,19 +1239,25 @@ func (vc *vcursorImpl) ErrorIfShardedF(ks *vindexes.Keyspace, warn, errFormat st
return nil
}
+func (vc *VCursorImpl) GetAndEmptyWarnings() []*querypb.QueryWarning {
+ w := vc.warnings
+ vc.warnings = nil
+ return w
+}
+
// WarnUnshardedOnly implements the VCursor interface
-func (vc *vcursorImpl) WarnUnshardedOnly(format string, params ...any) {
- if vc.warnShardedOnly {
+func (vc *VCursorImpl) WarnUnshardedOnly(format string, params ...any) {
+ if vc.config.WarnShardedOnly {
vc.warnings = append(vc.warnings, &querypb.QueryWarning{
Code: uint32(sqlerror.ERNotSupportedYet),
Message: fmt.Sprintf(format, params...),
})
- warnings.Add("WarnUnshardedOnly", 1)
+ vc.executor.AddWarningCount("WarnUnshardedOnly", 1)
}
}
// PlannerWarning implements the VCursor interface
-func (vc *vcursorImpl) PlannerWarning(message string) {
+func (vc *VCursorImpl) PlannerWarning(message string) {
if message == "" {
return
}
@@ -1159,8 +1268,8 @@ func (vc *vcursorImpl) PlannerWarning(message string) {
}
// ForeignKeyMode implements the VCursor interface
-func (vc *vcursorImpl) ForeignKeyMode(keyspace string) (vschemapb.Keyspace_ForeignKeyMode, error) {
- if strings.ToLower(foreignKeyMode) == "disallow" {
+func (vc *VCursorImpl) ForeignKeyMode(keyspace string) (vschemapb.Keyspace_ForeignKeyMode, error) {
+ if vc.config.ForeignKeyMode == vschemapb.Keyspace_disallow {
return vschemapb.Keyspace_disallow, nil
}
ks := vc.vschema.Keyspaces[keyspace]
@@ -1170,7 +1279,7 @@ func (vc *vcursorImpl) ForeignKeyMode(keyspace string) (vschemapb.Keyspace_Forei
return ks.ForeignKeyMode, nil
}
-func (vc *vcursorImpl) KeyspaceError(keyspace string) error {
+func (vc *VCursorImpl) KeyspaceError(keyspace string) error {
ks := vc.vschema.Keyspaces[keyspace]
if ks == nil {
return vterrors.VT14004(keyspace)
@@ -1178,14 +1287,14 @@ func (vc *vcursorImpl) KeyspaceError(keyspace string) error {
return ks.Error
}
-func (vc *vcursorImpl) GetAggregateUDFs() []string {
+func (vc *VCursorImpl) GetAggregateUDFs() []string {
return vc.vschema.GetAggregateUDFs()
}
// FindMirrorRule finds the mirror rule for the requested table name and
// VSchema tablet type.
-func (vc *vcursorImpl) FindMirrorRule(name sqlparser.TableName) (*vindexes.MirrorRule, error) {
- destKeyspace, destTabletType, _, err := vc.executor.ParseDestinationTarget(name.Qualifier.String())
+func (vc *VCursorImpl) FindMirrorRule(name sqlparser.TableName) (*vindexes.MirrorRule, error) {
+ destKeyspace, destTabletType, _, err := vc.ParseDestinationTarget(name.Qualifier.String())
if err != nil {
return nil, err
}
@@ -1199,23 +1308,11 @@ func (vc *vcursorImpl) FindMirrorRule(name sqlparser.TableName) (*vindexes.Mirro
return mirrorRule, err
}
-// ParseDestinationTarget parses destination target string and sets default keyspace if possible.
-func parseDestinationTarget(targetString string, vschema *vindexes.VSchema) (string, topodatapb.TabletType, key.Destination, error) {
- destKeyspace, destTabletType, dest, err := topoprotopb.ParseDestination(targetString, defaultTabletType)
- // Set default keyspace
- if destKeyspace == "" && len(vschema.Keyspaces) == 1 {
- for k := range vschema.Keyspaces {
- destKeyspace = k
- }
- }
- return destKeyspace, destTabletType, dest, err
-}
-
-func (vc *vcursorImpl) keyForPlan(ctx context.Context, query string, buf io.StringWriter) {
+func (vc *VCursorImpl) KeyForPlan(ctx context.Context, query string, buf io.StringWriter) {
_, _ = buf.WriteString(vc.keyspace)
_, _ = buf.WriteString(vindexes.TabletTypeSuffix[vc.tabletType])
_, _ = buf.WriteString("+Collate:")
- _, _ = buf.WriteString(vc.Environment().CollationEnv().LookupName(vc.collation))
+ _, _ = buf.WriteString(vc.Environment().CollationEnv().LookupName(vc.config.Collation))
if vc.destination != nil {
switch vc.destination.(type) {
@@ -1245,11 +1342,11 @@ func (vc *vcursorImpl) keyForPlan(ctx context.Context, query string, buf io.Stri
_, _ = buf.WriteString(query)
}
-func (vc *vcursorImpl) GetKeyspace() string {
+func (vc *VCursorImpl) GetKeyspace() string {
return vc.keyspace
}
-func (vc *vcursorImpl) ExecuteVSchema(ctx context.Context, keyspace string, vschemaDDL *sqlparser.AlterVschema) error {
+func (vc *VCursorImpl) ExecuteVSchema(ctx context.Context, keyspace string, vschemaDDL *sqlparser.AlterVschema) error {
srvVschema := vc.vm.GetCurrentSrvVschema()
if srvVschema == nil {
return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "vschema not loaded")
@@ -1270,7 +1367,7 @@ func (vc *vcursorImpl) ExecuteVSchema(ctx context.Context, keyspace string, vsch
ksName = keyspace
}
if ksName == "" {
- return errNoKeyspace
+ return ErrNoKeyspace
}
ks := srvVschema.Keyspaces[ksName]
@@ -1284,43 +1381,43 @@ func (vc *vcursorImpl) ExecuteVSchema(ctx context.Context, keyspace string, vsch
return vc.vm.UpdateVSchema(ctx, ksName, srvVschema)
}
-func (vc *vcursorImpl) MessageStream(ctx context.Context, rss []*srvtopo.ResolvedShard, tableName string, callback func(*sqltypes.Result) error) error {
+func (vc *VCursorImpl) MessageStream(ctx context.Context, rss []*srvtopo.ResolvedShard, tableName string, callback func(*sqltypes.Result) error) error {
atomic.AddUint64(&vc.logStats.ShardQueries, uint64(len(rss)))
return vc.executor.ExecuteMessageStream(ctx, rss, tableName, callback)
}
-func (vc *vcursorImpl) VStream(ctx context.Context, rss []*srvtopo.ResolvedShard, filter *binlogdatapb.Filter, gtid string, callback func(evs []*binlogdatapb.VEvent) error) error {
+func (vc *VCursorImpl) VStream(ctx context.Context, rss []*srvtopo.ResolvedShard, filter *binlogdatapb.Filter, gtid string, callback func(evs []*binlogdatapb.VEvent) error) error {
return vc.executor.ExecuteVStream(ctx, rss, filter, gtid, callback)
}
-func (vc *vcursorImpl) ShowExec(ctx context.Context, command sqlparser.ShowCommandType, filter *sqlparser.ShowFilter) (*sqltypes.Result, error) {
+func (vc *VCursorImpl) ShowExec(ctx context.Context, command sqlparser.ShowCommandType, filter *sqlparser.ShowFilter) (*sqltypes.Result, error) {
switch command {
case sqlparser.VitessReplicationStatus:
- return vc.executor.showVitessReplicationStatus(ctx, filter)
+ return vc.executor.ShowVitessReplicationStatus(ctx, filter)
case sqlparser.VitessShards:
- return vc.executor.showShards(ctx, filter, vc.tabletType)
+ return vc.executor.ShowShards(ctx, filter, vc.tabletType)
case sqlparser.VitessTablets:
- return vc.executor.showTablets(filter)
+ return vc.executor.ShowTablets(filter)
case sqlparser.VitessVariables:
- return vc.executor.showVitessMetadata(ctx, filter)
+ return vc.executor.ShowVitessMetadata(ctx, filter)
default:
return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "bug: unexpected show command: %v", command)
}
}
-func (vc *vcursorImpl) GetVSchema() *vindexes.VSchema {
+func (vc *VCursorImpl) GetVSchema() *vindexes.VSchema {
return vc.vschema
}
-func (vc *vcursorImpl) GetSrvVschema() *vschemapb.SrvVSchema {
+func (vc *VCursorImpl) GetSrvVschema() *vschemapb.SrvVSchema {
return vc.vm.GetCurrentSrvVschema()
}
-func (vc *vcursorImpl) SetExec(ctx context.Context, name string, value string) error {
- return vc.executor.setVitessMetadata(ctx, name, value)
+func (vc *VCursorImpl) SetExec(ctx context.Context, name string, value string) error {
+ return vc.executor.SetVitessMetadata(ctx, name, value)
}
-func (vc *vcursorImpl) ThrottleApp(ctx context.Context, throttledAppRule *topodatapb.ThrottledAppRule) (err error) {
+func (vc *VCursorImpl) ThrottleApp(ctx context.Context, throttledAppRule *topodatapb.ThrottledAppRule) (err error) {
if throttledAppRule == nil {
return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "ThrottleApp: nil rule")
}
@@ -1343,14 +1440,12 @@ func (vc *vcursorImpl) ThrottleApp(ctx context.Context, throttledAppRule *topoda
throttlerConfig.ThrottledApps = make(map[string]*topodatapb.ThrottledAppRule)
}
if req.ThrottledApp != nil && req.ThrottledApp.Name != "" {
- // TODO(shlomi) in v22: replace the following line with the commented out block
- throttlerConfig.ThrottledApps[req.ThrottledApp.Name] = req.ThrottledApp
- // timeNow := time.Now()
- // if protoutil.TimeFromProto(req.ThrottledApp.ExpiresAt).After(timeNow) {
- // throttlerConfig.ThrottledApps[req.ThrottledApp.Name] = req.ThrottledApp
- // } else {
- // delete(throttlerConfig.ThrottledApps, req.ThrottledApp.Name)
- // }
+ timeNow := time.Now()
+ if protoutil.TimeFromProto(req.ThrottledApp.ExpiresAt).After(timeNow) {
+ throttlerConfig.ThrottledApps[req.ThrottledApp.Name] = req.ThrottledApp
+ } else {
+ delete(throttlerConfig.ThrottledApps, req.ThrottledApp.Name)
+ }
}
return throttlerConfig
}
@@ -1378,147 +1473,60 @@ func (vc *vcursorImpl) ThrottleApp(ctx context.Context, throttledAppRule *topoda
return err
}
-func (vc *vcursorImpl) CanUseSetVar() bool {
- return vc.Environment().Parser().IsMySQL80AndAbove() && setVarEnabled
+func (vc *VCursorImpl) CanUseSetVar() bool {
+ return vc.Environment().Parser().IsMySQL80AndAbove() && vc.config.SetVarEnabled
}
-func (vc *vcursorImpl) ReleaseLock(ctx context.Context) error {
- return vc.executor.ReleaseLock(ctx, vc.safeSession)
+func (vc *VCursorImpl) ReleaseLock(ctx context.Context) error {
+ return vc.executor.ReleaseLock(ctx, vc.SafeSession)
}
-func (vc *vcursorImpl) cloneWithAutocommitSession() *vcursorImpl {
- safeSession := NewAutocommitSession(vc.safeSession.Session)
- safeSession.logging = vc.safeSession.logging
- return &vcursorImpl{
- safeSession: safeSession,
- keyspace: vc.keyspace,
- tabletType: vc.tabletType,
- destination: vc.destination,
- marginComments: vc.marginComments,
- executor: vc.executor,
- logStats: vc.logStats,
- collation: vc.collation,
- resolver: vc.resolver,
- vschema: vc.vschema,
- vm: vc.vm,
- topoServer: vc.topoServer,
- warnShardedOnly: vc.warnShardedOnly,
- pv: vc.pv,
- resultsObserver: vc.resultsObserver,
- }
-}
-
-func (vc *vcursorImpl) VExplainLogging() {
- vc.safeSession.EnableLogging(vc.Environment().Parser())
+func (vc *VCursorImpl) VExplainLogging() {
+ vc.SafeSession.EnableLogging(vc.Environment().Parser())
}
-func (vc *vcursorImpl) GetVExplainLogs() []engine.ExecuteEntry {
- return vc.safeSession.logging.GetLogs()
+func (vc *VCursorImpl) GetVExplainLogs() []engine.ExecuteEntry {
+ return vc.SafeSession.GetLogs()
}
-func (vc *vcursorImpl) FindRoutedShard(keyspace, shard string) (keyspaceName string, err error) {
+func (vc *VCursorImpl) FindRoutedShard(keyspace, shard string) (keyspaceName string, err error) {
return vc.vschema.FindRoutedShard(keyspace, shard)
}
-func (vc *vcursorImpl) IsViewsEnabled() bool {
- return enableViews
-}
-
-func (vc *vcursorImpl) GetUDV(name string) *querypb.BindVariable {
- return vc.safeSession.GetUDV(name)
+func (vc *VCursorImpl) IsViewsEnabled() bool {
+ return vc.config.EnableViews
}
-func (vc *vcursorImpl) PlanPrepareStatement(ctx context.Context, query string) (*engine.Plan, sqlparser.Statement, error) {
- return vc.executor.planPrepareStmt(ctx, vc, query)
+func (vc *VCursorImpl) GetUDV(name string) *querypb.BindVariable {
+ return vc.SafeSession.GetUDV(name)
}
-func (vc *vcursorImpl) ClearPrepareData(name string) {
- delete(vc.safeSession.PrepareStatement, name)
+func (vc *VCursorImpl) PlanPrepareStatement(ctx context.Context, query string) (*engine.Plan, sqlparser.Statement, error) {
+ return vc.executor.PlanPrepareStmt(ctx, vc, query)
}
-func (vc *vcursorImpl) StorePrepareData(stmtName string, prepareData *vtgatepb.PrepareData) {
- vc.safeSession.StorePrepareData(stmtName, prepareData)
+func (vc *VCursorImpl) ClearPrepareData(name string) {
+ delete(vc.SafeSession.PrepareStatement, name)
}
-func (vc *vcursorImpl) GetPrepareData(stmtName string) *vtgatepb.PrepareData {
- return vc.safeSession.GetPrepareData(stmtName)
+func (vc *VCursorImpl) StorePrepareData(stmtName string, prepareData *vtgatepb.PrepareData) {
+ vc.SafeSession.StorePrepareData(stmtName, prepareData)
}
-func (vc *vcursorImpl) GetWarmingReadsPercent() int {
- return vc.warmingReadsPercent
+func (vc *VCursorImpl) GetPrepareData(stmtName string) *vtgatepb.PrepareData {
+ return vc.SafeSession.GetPrepareData(stmtName)
}
-func (vc *vcursorImpl) GetWarmingReadsChannel() chan bool {
- return vc.warmingReadsChannel
-}
-
-func (vc *vcursorImpl) CloneForReplicaWarming(ctx context.Context) engine.VCursor {
- callerId := callerid.EffectiveCallerIDFromContext(ctx)
- immediateCallerId := callerid.ImmediateCallerIDFromContext(ctx)
-
- timedCtx, _ := context.WithTimeout(context.Background(), warmingReadsQueryTimeout) // nolint
- clonedCtx := callerid.NewContext(timedCtx, callerId, immediateCallerId)
-
- v := &vcursorImpl{
- safeSession: NewAutocommitSession(vc.safeSession.Session),
- keyspace: vc.keyspace,
- tabletType: topodatapb.TabletType_REPLICA,
- destination: vc.destination,
- marginComments: vc.marginComments,
- executor: vc.executor,
- resolver: vc.resolver,
- topoServer: vc.topoServer,
- logStats: &logstats.LogStats{Ctx: clonedCtx},
- collation: vc.collation,
- ignoreMaxMemoryRows: vc.ignoreMaxMemoryRows,
- vschema: vc.vschema,
- vm: vc.vm,
- semTable: vc.semTable,
- warnShardedOnly: vc.warnShardedOnly,
- warnings: vc.warnings,
- pv: vc.pv,
- resultsObserver: nullResultsObserver{},
- }
-
- v.marginComments.Trailing += "/* warming read */"
-
- return v
+func (vc *VCursorImpl) GetWarmingReadsPercent() int {
+ return vc.config.WarmingReadsPercent
}
-func (vc *vcursorImpl) CloneForMirroring(ctx context.Context) engine.VCursor {
- callerId := callerid.EffectiveCallerIDFromContext(ctx)
- immediateCallerId := callerid.ImmediateCallerIDFromContext(ctx)
-
- clonedCtx := callerid.NewContext(ctx, callerId, immediateCallerId)
-
- v := &vcursorImpl{
- safeSession: NewAutocommitSession(vc.safeSession.Session),
- keyspace: vc.keyspace,
- tabletType: vc.tabletType,
- destination: vc.destination,
- marginComments: vc.marginComments,
- executor: vc.executor,
- resolver: vc.resolver,
- topoServer: vc.topoServer,
- logStats: &logstats.LogStats{Ctx: clonedCtx},
- collation: vc.collation,
- ignoreMaxMemoryRows: vc.ignoreMaxMemoryRows,
- vschema: vc.vschema,
- vm: vc.vm,
- semTable: vc.semTable,
- warnShardedOnly: vc.warnShardedOnly,
- warnings: vc.warnings,
- pv: vc.pv,
- resultsObserver: nullResultsObserver{},
- }
-
- v.marginComments.Trailing += "/* mirror query */"
-
- return v
+func (vc *VCursorImpl) GetWarmingReadsChannel() chan bool {
+ return vc.config.WarmingReadsChannel
}
// UpdateForeignKeyChecksState updates the foreign key checks state of the vcursor.
-func (vc *vcursorImpl) UpdateForeignKeyChecksState(fkStateFromQuery *bool) {
+func (vc *VCursorImpl) UpdateForeignKeyChecksState(fkStateFromQuery *bool) {
// Initialize the state to unspecified.
vc.fkChecksState = nil
// If the query has a SET_VAR optimizer hint that explicitly sets the foreign key checks state,
@@ -1528,17 +1536,36 @@ func (vc *vcursorImpl) UpdateForeignKeyChecksState(fkStateFromQuery *bool) {
return
}
// If the query doesn't have anything, then we consult the session state.
- vc.fkChecksState = vc.safeSession.ForeignKeyChecks()
+ vc.fkChecksState = vc.SafeSession.ForeignKeyChecks()
}
// GetForeignKeyChecksState gets the stored foreign key checks state in the vcursor.
-func (vc *vcursorImpl) GetForeignKeyChecksState() *bool {
+func (vc *VCursorImpl) GetForeignKeyChecksState() *bool {
return vc.fkChecksState
}
// RecordMirrorStats is used to record stats about a mirror query.
-func (vc *vcursorImpl) RecordMirrorStats(sourceExecTime, targetExecTime time.Duration, targetErr error) {
+func (vc *VCursorImpl) RecordMirrorStats(sourceExecTime, targetExecTime time.Duration, targetErr error) {
vc.logStats.MirrorSourceExecuteTime = sourceExecTime
vc.logStats.MirrorTargetExecuteTime = targetExecTime
vc.logStats.MirrorTargetError = targetErr
}
+
+func (vc *VCursorImpl) GetMarginComments() sqlparser.MarginComments {
+ return vc.marginComments
+}
+
+func (vc *VCursorImpl) CachePlan() bool {
+ return vc.SafeSession.CachePlan()
+}
+
+func (vc *VCursorImpl) GetContextWithTimeOut(ctx context.Context) (context.Context, context.CancelFunc) {
+ if vc.queryTimeout == 0 {
+ return ctx, func() {}
+ }
+ return context.WithTimeout(ctx, vc.queryTimeout)
+}
+
+func (vc *VCursorImpl) IgnoreMaxMemoryRows() bool {
+ return vc.ignoreMaxMemoryRows
+}
diff --git a/go/vt/vtgate/vcursor_impl_test.go b/go/vt/vtgate/executorcontext/vcursor_impl_test.go
similarity index 60%
rename from go/vt/vtgate/vcursor_impl_test.go
rename to go/vt/vtgate/executorcontext/vcursor_impl_test.go
index 95d9a18078d..16d2c03bf1c 100644
--- a/go/vt/vtgate/vcursor_impl_test.go
+++ b/go/vt/vtgate/executorcontext/vcursor_impl_test.go
@@ -1,8 +1,23 @@
-package vtgate
+/*
+Copyright 2024 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package executorcontext
import (
"context"
- "encoding/hex"
"errors"
"fmt"
"strconv"
@@ -12,10 +27,16 @@ import (
"github.com/stretchr/testify/require"
+ "vitess.io/vitess/go/mysql/collations"
+ "vitess.io/vitess/go/sqltypes"
+ binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata"
+ "vitess.io/vitess/go/vt/vtenv"
+ "vitess.io/vitess/go/vt/vtgate/engine"
+ "vitess.io/vitess/go/vt/vtgate/vtgateservice"
+
"vitess.io/vitess/go/vt/key"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/srvtopo"
- "vitess.io/vitess/go/vt/topo"
"vitess.io/vitess/go/vt/vtgate/logstats"
"vitess.io/vitess/go/vt/vtgate/vindexes"
@@ -39,48 +60,6 @@ func (f fakeVSchemaOperator) UpdateVSchema(ctx context.Context, ksName string, v
panic("implement me")
}
-type fakeTopoServer struct{}
-
-// GetTopoServer returns the full topo.Server instance.
-func (f *fakeTopoServer) GetTopoServer() (*topo.Server, error) {
- return nil, nil
-}
-
-// GetSrvKeyspaceNames returns the list of keyspaces served in
-// the provided cell.
-func (f *fakeTopoServer) GetSrvKeyspaceNames(ctx context.Context, cell string, staleOK bool) ([]string, error) {
- return []string{"ks1"}, nil
-}
-
-// GetSrvKeyspace returns the SrvKeyspace for a cell/keyspace.
-func (f *fakeTopoServer) GetSrvKeyspace(ctx context.Context, cell, keyspace string) (*topodatapb.SrvKeyspace, error) {
- zeroHexBytes, _ := hex.DecodeString("")
- eightyHexBytes, _ := hex.DecodeString("80")
- ks := &topodatapb.SrvKeyspace{
- Partitions: []*topodatapb.SrvKeyspace_KeyspacePartition{
- {
- ServedType: topodatapb.TabletType_PRIMARY,
- ShardReferences: []*topodatapb.ShardReference{
- {Name: "-80", KeyRange: &topodatapb.KeyRange{Start: zeroHexBytes, End: eightyHexBytes}},
- {Name: "80-", KeyRange: &topodatapb.KeyRange{Start: eightyHexBytes, End: zeroHexBytes}},
- },
- },
- },
- }
- return ks, nil
-}
-
-func (f *fakeTopoServer) WatchSrvKeyspace(ctx context.Context, cell, keyspace string, callback func(*topodatapb.SrvKeyspace, error) bool) {
- ks, err := f.GetSrvKeyspace(ctx, cell, keyspace)
- callback(ks, err)
-}
-
-// WatchSrvVSchema starts watching the SrvVSchema object for
-// the provided cell. It will call the callback when
-// a new value or an error occurs.
-func (f *fakeTopoServer) WatchSrvVSchema(ctx context.Context, cell string, callback func(*vschemapb.SrvVSchema, error) bool) {
-}
-
func TestDestinationKeyspace(t *testing.T) {
ks1 := &vindexes.Keyspace{
Name: "ks1",
@@ -184,13 +163,17 @@ func TestDestinationKeyspace(t *testing.T) {
}, {
vschema: vschemaWith2KS,
targetString: "",
- expectedError: errNoKeyspace.Error(),
+ expectedError: ErrNoKeyspace.Error(),
}}
- r, _, _, _, _ := createExecutorEnv(t)
for i, tc := range tests {
t.Run(strconv.Itoa(i)+tc.targetString, func(t *testing.T) {
- impl, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: tc.targetString}), sqlparser.MarginComments{}, r, nil, &fakeVSchemaOperator{vschema: tc.vschema}, tc.vschema, nil, nil, false, querypb.ExecuteOptions_Gen4)
+ session := NewSafeSession(&vtgatepb.Session{TargetString: tc.targetString})
+ impl, _ := NewVCursorImpl(session, sqlparser.MarginComments{}, nil, nil,
+ &fakeVSchemaOperator{vschema: tc.vschema}, tc.vschema, nil, nil,
+ fakeObserver{}, VCursorConfig{
+ DefaultTabletType: topodatapb.TabletType_PRIMARY,
+ })
impl.vschema = tc.vschema
dest, keyspace, tabletType, err := impl.TargetDestination(tc.qualifier)
if tc.expectedError == "" {
@@ -250,15 +233,15 @@ func TestSetTarget(t *testing.T) {
expectedError: "can't execute the given command because you have an active transaction",
}}
- r, _, _, _, _ := createExecutorEnv(t)
for i, tc := range tests {
t.Run(fmt.Sprintf("%d#%s", i, tc.targetString), func(t *testing.T) {
- vc, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{InTransaction: true}), sqlparser.MarginComments{}, r, nil, &fakeVSchemaOperator{vschema: tc.vschema}, tc.vschema, nil, nil, false, querypb.ExecuteOptions_Gen4)
+ cfg := VCursorConfig{DefaultTabletType: topodatapb.TabletType_PRIMARY}
+ vc, _ := NewVCursorImpl(NewSafeSession(&vtgatepb.Session{InTransaction: true}), sqlparser.MarginComments{}, nil, nil, &fakeVSchemaOperator{vschema: tc.vschema}, tc.vschema, nil, nil, fakeObserver{}, cfg)
vc.vschema = tc.vschema
err := vc.SetTarget(tc.targetString)
if tc.expectedError == "" {
require.NoError(t, err)
- require.Equal(t, vc.safeSession.TargetString, tc.targetString)
+ require.Equal(t, vc.SafeSession.TargetString, tc.targetString)
} else {
require.EqualError(t, err, tc.expectedError)
}
@@ -299,17 +282,20 @@ func TestKeyForPlan(t *testing.T) {
expectedPlanPrefixKey: "ks1@replica+Collate:utf8mb4_0900_ai_ci+Query:SELECT 1",
}}
- r, _, _, _, _ := createExecutorEnv(t)
for i, tc := range tests {
t.Run(fmt.Sprintf("%d#%s", i, tc.targetString), func(t *testing.T) {
ss := NewSafeSession(&vtgatepb.Session{InTransaction: false})
ss.SetTargetString(tc.targetString)
- vc, err := newVCursorImpl(ss, sqlparser.MarginComments{}, r, nil, &fakeVSchemaOperator{vschema: tc.vschema}, tc.vschema, srvtopo.NewResolver(&fakeTopoServer{}, nil, ""), nil, false, querypb.ExecuteOptions_Gen4)
+ cfg := VCursorConfig{
+ Collation: collations.CollationUtf8mb4ID,
+ DefaultTabletType: topodatapb.TabletType_PRIMARY,
+ }
+ vc, err := NewVCursorImpl(ss, sqlparser.MarginComments{}, &fakeExecutor{}, nil, &fakeVSchemaOperator{vschema: tc.vschema}, tc.vschema, srvtopo.NewResolver(&FakeTopoServer{}, nil, ""), nil, fakeObserver{}, cfg)
require.NoError(t, err)
vc.vschema = tc.vschema
var buf strings.Builder
- vc.keyForPlan(context.Background(), "SELECT 1", &buf)
+ vc.KeyForPlan(context.Background(), "SELECT 1", &buf)
require.Equal(t, tc.expectedPlanPrefixKey, buf.String())
})
}
@@ -327,8 +313,7 @@ func TestFirstSortedKeyspace(t *testing.T) {
},
}
- r, _, _, _, _ := createExecutorEnv(t)
- vc, err := newVCursorImpl(NewSafeSession(nil), sqlparser.MarginComments{}, r, nil, &fakeVSchemaOperator{vschema: vschemaWith2KS}, vschemaWith2KS, srvtopo.NewResolver(&fakeTopoServer{}, nil, ""), nil, false, querypb.ExecuteOptions_Gen4)
+ vc, err := NewVCursorImpl(NewSafeSession(nil), sqlparser.MarginComments{}, nil, nil, &fakeVSchemaOperator{vschema: vschemaWith2KS}, vschemaWith2KS, srvtopo.NewResolver(&FakeTopoServer{}, nil, ""), nil, fakeObserver{}, VCursorConfig{})
require.NoError(t, err)
ks, err := vc.FirstSortedKeyspace()
require.NoError(t, err)
@@ -338,13 +323,13 @@ func TestFirstSortedKeyspace(t *testing.T) {
// TestSetExecQueryTimeout tests the SetExecQueryTimeout method.
// Validates the timeout value is set based on override rule.
func TestSetExecQueryTimeout(t *testing.T) {
- executor, _, _, _, _ := createExecutorEnv(t)
safeSession := NewSafeSession(nil)
- vc, err := newVCursorImpl(safeSession, sqlparser.MarginComments{}, executor, nil, nil, &vindexes.VSchema{}, nil, nil, false, querypb.ExecuteOptions_Gen4)
+ vc, err := NewVCursorImpl(safeSession, sqlparser.MarginComments{}, nil, nil, nil, &vindexes.VSchema{}, nil, nil, fakeObserver{}, VCursorConfig{
+ // flag timeout
+ QueryTimeout: 20,
+ })
require.NoError(t, err)
- // flag timeout
- queryTimeout = 20
vc.SetExecQueryTimeout(nil)
require.Equal(t, 20*time.Millisecond, vc.queryTimeout)
require.NotNil(t, safeSession.Options.Timeout)
@@ -371,8 +356,8 @@ func TestSetExecQueryTimeout(t *testing.T) {
require.NotNil(t, safeSession.Options.Timeout)
require.EqualValues(t, 0, safeSession.Options.GetAuthoritativeTimeout())
- // reset
- queryTimeout = 0
+ // reset flag timeout
+ vc.config.QueryTimeout = 0
safeSession.SetQueryTimeout(0)
vc.SetExecQueryTimeout(nil)
require.Equal(t, 0*time.Millisecond, vc.queryTimeout)
@@ -381,10 +366,9 @@ func TestSetExecQueryTimeout(t *testing.T) {
}
func TestRecordMirrorStats(t *testing.T) {
- executor, _, _, _, _ := createExecutorEnv(t)
safeSession := NewSafeSession(nil)
logStats := logstats.NewLogStats(context.Background(), t.Name(), "select 1", "", nil)
- vc, err := newVCursorImpl(safeSession, sqlparser.MarginComments{}, executor, logStats, nil, &vindexes.VSchema{}, nil, nil, false, querypb.ExecuteOptions_Gen4)
+ vc, err := NewVCursorImpl(safeSession, sqlparser.MarginComments{}, nil, logStats, nil, &vindexes.VSchema{}, nil, nil, fakeObserver{}, VCursorConfig{})
require.NoError(t, err)
require.Zero(t, logStats.MirrorSourceExecuteTime)
@@ -397,3 +381,113 @@ func TestRecordMirrorStats(t *testing.T) {
require.Equal(t, 20*time.Millisecond, logStats.MirrorTargetExecuteTime)
require.ErrorContains(t, logStats.MirrorTargetError, "test error")
}
+
+type fakeExecutor struct{}
+
+func (f fakeExecutor) Execute(ctx context.Context, mysqlCtx vtgateservice.MySQLConnection, method string, session *SafeSession, s string, vars map[string]*querypb.BindVariable) (*sqltypes.Result, error) {
+ // TODO implement me
+ panic("implement me")
+}
+
+func (f fakeExecutor) ExecuteMultiShard(ctx context.Context, primitive engine.Primitive, rss []*srvtopo.ResolvedShard, queries []*querypb.BoundQuery, session *SafeSession, autocommit bool, ignoreMaxMemoryRows bool, resultsObserver ResultsObserver) (qr *sqltypes.Result, errs []error) {
+ // TODO implement me
+ panic("implement me")
+}
+
+func (f fakeExecutor) StreamExecuteMulti(ctx context.Context, primitive engine.Primitive, query string, rss []*srvtopo.ResolvedShard, vars []map[string]*querypb.BindVariable, session *SafeSession, autocommit bool, callback func(reply *sqltypes.Result) error, observer ResultsObserver) []error {
+ // TODO implement me
+ panic("implement me")
+}
+
+func (f fakeExecutor) ExecuteLock(ctx context.Context, rs *srvtopo.ResolvedShard, query *querypb.BoundQuery, session *SafeSession, lockFuncType sqlparser.LockingFuncType) (*sqltypes.Result, error) {
+ // TODO implement me
+ panic("implement me")
+}
+
+func (f fakeExecutor) Commit(ctx context.Context, safeSession *SafeSession) error {
+ // TODO implement me
+ panic("implement me")
+}
+
+func (f fakeExecutor) ExecuteMessageStream(ctx context.Context, rss []*srvtopo.ResolvedShard, name string, callback func(*sqltypes.Result) error) error {
+ // TODO implement me
+ panic("implement me")
+}
+
+func (f fakeExecutor) ExecuteVStream(ctx context.Context, rss []*srvtopo.ResolvedShard, filter *binlogdatapb.Filter, gtid string, callback func(evs []*binlogdatapb.VEvent) error) error {
+ // TODO implement me
+ panic("implement me")
+}
+
+func (f fakeExecutor) ReleaseLock(ctx context.Context, session *SafeSession) error {
+ // TODO implement me
+ panic("implement me")
+}
+
+func (f fakeExecutor) ShowVitessReplicationStatus(ctx context.Context, filter *sqlparser.ShowFilter) (*sqltypes.Result, error) {
+ // TODO implement me
+ panic("implement me")
+}
+
+func (f fakeExecutor) ShowShards(ctx context.Context, filter *sqlparser.ShowFilter, destTabletType topodatapb.TabletType) (*sqltypes.Result, error) {
+ // TODO implement me
+ panic("implement me")
+}
+
+func (f fakeExecutor) ShowTablets(filter *sqlparser.ShowFilter) (*sqltypes.Result, error) {
+ // TODO implement me
+ panic("implement me")
+}
+
+func (f fakeExecutor) ShowVitessMetadata(ctx context.Context, filter *sqlparser.ShowFilter) (*sqltypes.Result, error) {
+ // TODO implement me
+ panic("implement me")
+}
+
+func (f fakeExecutor) SetVitessMetadata(ctx context.Context, name, value string) error {
+ // TODO implement me
+ panic("implement me")
+}
+
+func (f fakeExecutor) ParseDestinationTarget(targetString string) (string, topodatapb.TabletType, key.Destination, error) {
+ // TODO implement me
+ panic("implement me")
+}
+
+func (f fakeExecutor) VSchema() *vindexes.VSchema {
+ // TODO implement me
+ panic("implement me")
+}
+
+func (f fakeExecutor) PlanPrepareStmt(ctx context.Context, vcursor *VCursorImpl, query string) (*engine.Plan, sqlparser.Statement, error) {
+ // TODO implement me
+ panic("implement me")
+}
+
+func (f fakeExecutor) Environment() *vtenv.Environment {
+ return vtenv.NewTestEnv()
+}
+
+func (f fakeExecutor) ReadTransaction(ctx context.Context, transactionID string) (*querypb.TransactionMetadata, error) {
+ // TODO implement me
+ panic("implement me")
+}
+
+func (f fakeExecutor) UnresolvedTransactions(ctx context.Context, targets []*querypb.Target) ([]*querypb.TransactionMetadata, error) {
+ // TODO implement me
+ panic("implement me")
+}
+
+func (f fakeExecutor) AddWarningCount(name string, value int64) {
+ // TODO implement me
+ panic("implement me")
+}
+
+var _ iExecute = (*fakeExecutor)(nil)
+
+type fakeObserver struct{}
+
+func (f fakeObserver) Observe(*sqltypes.Result) {
+}
+
+var _ ResultsObserver = (*fakeObserver)(nil)
diff --git a/go/vt/vtgate/legacy_scatter_conn_test.go b/go/vt/vtgate/legacy_scatter_conn_test.go
index 4512fc0724e..0d49e7b7bd9 100644
--- a/go/vt/vtgate/legacy_scatter_conn_test.go
+++ b/go/vt/vtgate/legacy_scatter_conn_test.go
@@ -26,6 +26,8 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ econtext "vitess.io/vitess/go/vt/vtgate/executorcontext"
+
"vitess.io/vitess/go/mysql/collations"
"vitess.io/vitess/go/sqltypes"
@@ -99,7 +101,7 @@ func TestLegacyExecuteFailOnAutocommit(t *testing.T) {
},
Autocommit: false,
}
- _, errs := sc.ExecuteMultiShard(ctx, nil, rss, queries, NewSafeSession(session), true /*autocommit*/, false, nullResultsObserver{})
+ _, errs := sc.ExecuteMultiShard(ctx, nil, rss, queries, econtext.NewSafeSession(session), true /*autocommit*/, false, nullResultsObserver{})
err := vterrors.Aggregate(errs)
require.Error(t, err)
require.Contains(t, err.Error(), "in autocommit mode, transactionID should be zero but was: 123")
@@ -123,7 +125,7 @@ func TestScatterConnExecuteMulti(t *testing.T) {
}
}
- qr, errs := sc.ExecuteMultiShard(ctx, nil, rss, queries, NewSafeSession(nil), false /*autocommit*/, false, nullResultsObserver{})
+ qr, errs := sc.ExecuteMultiShard(ctx, nil, rss, queries, econtext.NewSafeSession(nil), false /*autocommit*/, false, nullResultsObserver{})
return qr, vterrors.Aggregate(errs)
})
}
@@ -138,7 +140,7 @@ func TestScatterConnStreamExecuteMulti(t *testing.T) {
bvs := make([]map[string]*querypb.BindVariable, len(rss))
qr := new(sqltypes.Result)
var mu sync.Mutex
- errors := sc.StreamExecuteMulti(ctx, nil, "query", rss, bvs, NewSafeSession(&vtgatepb.Session{InTransaction: true}), true /* autocommit */, func(r *sqltypes.Result) error {
+ errors := sc.StreamExecuteMulti(ctx, nil, "query", rss, bvs, econtext.NewSafeSession(&vtgatepb.Session{InTransaction: true}), true /* autocommit */, func(r *sqltypes.Result) error {
mu.Lock()
defer mu.Unlock()
qr.AppendResult(r)
@@ -280,7 +282,7 @@ func TestMaxMemoryRows(t *testing.T) {
[]key.Destination{key.DestinationShard("0"), key.DestinationShard("1")})
require.NoError(t, err)
- session := NewSafeSession(&vtgatepb.Session{InTransaction: true})
+ session := econtext.NewSafeSession(&vtgatepb.Session{InTransaction: true})
queries := []*querypb.BoundQuery{{
Sql: "query1",
BindVariables: map[string]*querypb.BindVariable{},
@@ -328,7 +330,7 @@ func TestLegaceHealthCheckFailsOnReservedConnections(t *testing.T) {
res := srvtopo.NewResolver(newSandboxForCells(ctx, []string{"aa"}), sc.gateway, "aa")
- session := NewSafeSession(&vtgatepb.Session{InTransaction: false, InReservedConn: true})
+ session := econtext.NewSafeSession(&vtgatepb.Session{InTransaction: false, InReservedConn: true})
destinations := []key.Destination{key.DestinationShard("0")}
rss, _, err := res.ResolveDestinations(ctx, keyspace, topodatapb.TabletType_REPLICA, nil, destinations)
require.NoError(t, err)
@@ -346,12 +348,12 @@ func TestLegaceHealthCheckFailsOnReservedConnections(t *testing.T) {
require.Error(t, vterrors.Aggregate(errs))
}
-func executeOnShards(t *testing.T, ctx context.Context, res *srvtopo.Resolver, keyspace string, sc *ScatterConn, session *SafeSession, destinations []key.Destination) {
+func executeOnShards(t *testing.T, ctx context.Context, res *srvtopo.Resolver, keyspace string, sc *ScatterConn, session *econtext.SafeSession, destinations []key.Destination) {
t.Helper()
require.Empty(t, executeOnShardsReturnsErr(t, ctx, res, keyspace, sc, session, destinations))
}
-func executeOnShardsReturnsErr(t *testing.T, ctx context.Context, res *srvtopo.Resolver, keyspace string, sc *ScatterConn, session *SafeSession, destinations []key.Destination) error {
+func executeOnShardsReturnsErr(t *testing.T, ctx context.Context, res *srvtopo.Resolver, keyspace string, sc *ScatterConn, session *econtext.SafeSession, destinations []key.Destination) error {
t.Helper()
rss, _, err := res.ResolveDestinations(ctx, keyspace, topodatapb.TabletType_REPLICA, nil, destinations)
require.NoError(t, err)
@@ -374,7 +376,7 @@ type recordingResultsObserver struct {
recorded []*sqltypes.Result
}
-func (o *recordingResultsObserver) observe(result *sqltypes.Result) {
+func (o *recordingResultsObserver) Observe(result *sqltypes.Result) {
mu.Lock()
o.recorded = append(o.recorded, result)
mu.Unlock()
@@ -429,7 +431,7 @@ func TestMultiExecs(t *testing.T) {
observer := recordingResultsObserver{}
- session := NewSafeSession(&vtgatepb.Session{})
+ session := econtext.NewSafeSession(&vtgatepb.Session{})
_, err := sc.ExecuteMultiShard(ctx, nil, rss, queries, session, false, false, &observer)
require.NoError(t, vterrors.Aggregate(err))
if len(sbc0.Queries) == 0 || len(sbc1.Queries) == 0 {
@@ -511,7 +513,7 @@ func TestScatterConnSingleDB(t *testing.T) {
want := "multi-db transaction attempted"
// TransactionMode_SINGLE in session
- session := NewSafeSession(&vtgatepb.Session{InTransaction: true, TransactionMode: vtgatepb.TransactionMode_SINGLE})
+ session := econtext.NewSafeSession(&vtgatepb.Session{InTransaction: true, TransactionMode: vtgatepb.TransactionMode_SINGLE})
queries := []*querypb.BoundQuery{{Sql: "query1"}}
_, errors := sc.ExecuteMultiShard(ctx, nil, rss0, queries, session, false, false, nullResultsObserver{})
require.Empty(t, errors)
@@ -521,7 +523,7 @@ func TestScatterConnSingleDB(t *testing.T) {
// TransactionMode_SINGLE in txconn
sc.txConn.mode = vtgatepb.TransactionMode_SINGLE
- session = NewSafeSession(&vtgatepb.Session{InTransaction: true})
+ session = econtext.NewSafeSession(&vtgatepb.Session{InTransaction: true})
_, errors = sc.ExecuteMultiShard(ctx, nil, rss0, queries, session, false, false, nullResultsObserver{})
require.Empty(t, errors)
_, errors = sc.ExecuteMultiShard(ctx, nil, rss1, queries, session, false, false, nullResultsObserver{})
@@ -530,7 +532,7 @@ func TestScatterConnSingleDB(t *testing.T) {
// TransactionMode_MULTI in txconn. Should not fail.
sc.txConn.mode = vtgatepb.TransactionMode_MULTI
- session = NewSafeSession(&vtgatepb.Session{InTransaction: true})
+ session = econtext.NewSafeSession(&vtgatepb.Session{InTransaction: true})
_, errors = sc.ExecuteMultiShard(ctx, nil, rss0, queries, session, false, false, nullResultsObserver{})
require.Empty(t, errors)
_, errors = sc.ExecuteMultiShard(ctx, nil, rss1, queries, session, false, false, nullResultsObserver{})
@@ -601,7 +603,7 @@ func TestReservePrequeries(t *testing.T) {
res := srvtopo.NewResolver(newSandboxForCells(ctx, []string{"aa"}), sc.gateway, "aa")
- session := NewSafeSession(&vtgatepb.Session{
+ session := econtext.NewSafeSession(&vtgatepb.Session{
InTransaction: false,
InReservedConn: true,
SystemVariables: map[string]string{
diff --git a/go/vt/vtgate/plan_execute.go b/go/vt/vtgate/plan_execute.go
index 1c0915470ef..db7923c09f0 100644
--- a/go/vt/vtgate/plan_execute.go
+++ b/go/vt/vtgate/plan_execute.go
@@ -29,11 +29,12 @@ import (
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/engine"
+ econtext "vitess.io/vitess/go/vt/vtgate/executorcontext"
"vitess.io/vitess/go/vt/vtgate/logstats"
"vitess.io/vitess/go/vt/vtgate/vtgateservice"
)
-type planExec func(ctx context.Context, plan *engine.Plan, vc *vcursorImpl, bindVars map[string]*querypb.BindVariable, startTime time.Time) error
+type planExec func(ctx context.Context, plan *engine.Plan, vc *econtext.VCursorImpl, bindVars map[string]*querypb.BindVariable, startTime time.Time) error
type txResult func(sqlparser.StatementType, *sqltypes.Result) error
var vschemaWaitTimeout = 30 * time.Second
@@ -56,10 +57,12 @@ func waitForNewerVSchema(ctx context.Context, e *Executor, lastVSchemaCreated ti
}
}
+const MaxBufferingRetries = 3
+
func (e *Executor) newExecute(
ctx context.Context,
mysqlCtx vtgateservice.MySQLConnection,
- safeSession *SafeSession,
+ safeSession *econtext.SafeSession,
sql string,
bindVars map[string]*querypb.BindVariable,
logStats *logstats.LogStats,
@@ -116,7 +119,7 @@ func (e *Executor) newExecute(
}
}
- vcursor, err := newVCursorImpl(safeSession, comments, e, logStats, e.vm, vs, e.resolver.resolver, e.serv, e.warnShardedOnly, e.pv)
+ vcursor, err := econtext.NewVCursorImpl(safeSession, comments, e, logStats, e.vm, vs, e.resolver.resolver, e.serv, nullResultsObserver{}, e.vConfig)
if err != nil {
return err
}
@@ -146,10 +149,8 @@ func (e *Executor) newExecute(
}
// set the overall query timeout if it is not already set
- if vcursor.queryTimeout > 0 && cancel == nil {
- ctx, cancel = context.WithTimeout(ctx, vcursor.queryTimeout)
- defer cancel()
- }
+ ctx, cancel = vcursor.GetContextWithTimeOut(ctx)
+ defer cancel()
result, err = e.handleTransactions(ctx, mysqlCtx, safeSession, plan, logStats, vcursor, stmt)
if err != nil {
@@ -225,10 +226,10 @@ func (e *Executor) newExecute(
func (e *Executor) handleTransactions(
ctx context.Context,
mysqlCtx vtgateservice.MySQLConnection,
- safeSession *SafeSession,
+ safeSession *econtext.SafeSession,
plan *engine.Plan,
logStats *logstats.LogStats,
- vcursor *vcursorImpl,
+ vcursor *econtext.VCursorImpl,
stmt sqlparser.Statement,
) (*sqltypes.Result, error) {
// We need to explicitly handle errors, and begin/commit/rollback, since these control transactions. Everything else
@@ -247,19 +248,19 @@ func (e *Executor) handleTransactions(
qr, err := e.handleSavepoint(ctx, safeSession, plan.Original, "Savepoint", logStats, func(_ string) (*sqltypes.Result, error) {
// Safely to ignore as there is no transaction.
return &sqltypes.Result{}, nil
- }, vcursor.ignoreMaxMemoryRows)
+ }, vcursor.IgnoreMaxMemoryRows())
return qr, err
case sqlparser.StmtSRollback:
qr, err := e.handleSavepoint(ctx, safeSession, plan.Original, "Rollback Savepoint", logStats, func(query string) (*sqltypes.Result, error) {
// Error as there is no transaction, so there is no savepoint that exists.
return nil, vterrors.NewErrorf(vtrpcpb.Code_NOT_FOUND, vterrors.SPDoesNotExist, "SAVEPOINT does not exist: %s", query)
- }, vcursor.ignoreMaxMemoryRows)
+ }, vcursor.IgnoreMaxMemoryRows())
return qr, err
case sqlparser.StmtRelease:
qr, err := e.handleSavepoint(ctx, safeSession, plan.Original, "Release Savepoint", logStats, func(query string) (*sqltypes.Result, error) {
// Error as there is no transaction, so there is no savepoint that exists.
return nil, vterrors.NewErrorf(vtrpcpb.Code_NOT_FOUND, vterrors.SPDoesNotExist, "SAVEPOINT does not exist: %s", query)
- }, vcursor.ignoreMaxMemoryRows)
+ }, vcursor.IgnoreMaxMemoryRows())
return qr, err
case sqlparser.StmtKill:
return e.handleKill(ctx, mysqlCtx, stmt, logStats)
@@ -267,7 +268,7 @@ func (e *Executor) handleTransactions(
return nil, nil
}
-func (e *Executor) startTxIfNecessary(ctx context.Context, safeSession *SafeSession) error {
+func (e *Executor) startTxIfNecessary(ctx context.Context, safeSession *econtext.SafeSession) error {
if !safeSession.Autocommit && !safeSession.InTransaction() {
if err := e.txConn.Begin(ctx, safeSession, nil); err != nil {
return err
@@ -276,7 +277,7 @@ func (e *Executor) startTxIfNecessary(ctx context.Context, safeSession *SafeSess
return nil
}
-func (e *Executor) insideTransaction(ctx context.Context, safeSession *SafeSession, logStats *logstats.LogStats, execPlan func() error) error {
+func (e *Executor) insideTransaction(ctx context.Context, safeSession *econtext.SafeSession, logStats *logstats.LogStats, execPlan func() error) error {
mustCommit := false
if safeSession.Autocommit && !safeSession.InTransaction() {
mustCommit = true
@@ -320,9 +321,9 @@ func (e *Executor) insideTransaction(ctx context.Context, safeSession *SafeSessi
func (e *Executor) executePlan(
ctx context.Context,
- safeSession *SafeSession,
+ safeSession *econtext.SafeSession,
plan *engine.Plan,
- vcursor *vcursorImpl,
+ vcursor *econtext.VCursorImpl,
bindVars map[string]*querypb.BindVariable,
logStats *logstats.LogStats,
execStart time.Time,
@@ -342,7 +343,7 @@ func (e *Executor) executePlan(
}
// rollbackExecIfNeeded rollbacks the partial execution if earlier it was detected that it needs partial query execution to be rolled back.
-func (e *Executor) rollbackExecIfNeeded(ctx context.Context, safeSession *SafeSession, bindVars map[string]*querypb.BindVariable, logStats *logstats.LogStats, err error) error {
+func (e *Executor) rollbackExecIfNeeded(ctx context.Context, safeSession *econtext.SafeSession, bindVars map[string]*querypb.BindVariable, logStats *logstats.LogStats, err error) error {
if safeSession.InTransaction() && safeSession.IsRollbackSet() {
rErr := e.rollbackPartialExec(ctx, safeSession, bindVars, logStats)
return vterrors.Wrap(err, rErr.Error())
@@ -353,7 +354,7 @@ func (e *Executor) rollbackExecIfNeeded(ctx context.Context, safeSession *SafeSe
// rollbackPartialExec rollbacks to the savepoint or rollbacks transaction based on the value set on SafeSession.rollbackOnPartialExec.
// Once, it is used the variable is reset.
// If it fails to rollback to the previous savepoint then, the transaction is forced to be rolled back.
-func (e *Executor) rollbackPartialExec(ctx context.Context, safeSession *SafeSession, bindVars map[string]*querypb.BindVariable, logStats *logstats.LogStats) error {
+func (e *Executor) rollbackPartialExec(ctx context.Context, safeSession *econtext.SafeSession, bindVars map[string]*querypb.BindVariable, logStats *logstats.LogStats) error {
var err error
var errMsg strings.Builder
@@ -367,8 +368,8 @@ func (e *Executor) rollbackPartialExec(ctx context.Context, safeSession *SafeSes
}
// needs to rollback only once.
- rQuery := safeSession.rollbackOnPartialExec
- if rQuery != txRollback {
+ rQuery := safeSession.GetRollbackOnPartialExec()
+ if rQuery != econtext.TxRollback {
safeSession.SavepointRollback()
_, _, err = e.execute(ctx, nil, safeSession, rQuery, bindVars, logStats)
// If no error, the revert is successful with the savepoint. Notify the reason as error to the client.
@@ -388,9 +389,9 @@ func (e *Executor) rollbackPartialExec(ctx context.Context, safeSession *SafeSes
return vterrors.New(vtrpcpb.Code_ABORTED, errMsg.String())
}
-func (e *Executor) setLogStats(logStats *logstats.LogStats, plan *engine.Plan, vcursor *vcursorImpl, execStart time.Time, err error, qr *sqltypes.Result) {
+func (e *Executor) setLogStats(logStats *logstats.LogStats, plan *engine.Plan, vcursor *econtext.VCursorImpl, execStart time.Time, err error, qr *sqltypes.Result) {
logStats.StmtType = plan.Type.String()
- logStats.ActiveKeyspace = vcursor.keyspace
+ logStats.ActiveKeyspace = vcursor.GetKeyspace()
logStats.TablesUsed = plan.TablesUsed
logStats.TabletType = vcursor.TabletType().String()
errCount := e.logExecutionEnd(logStats, execStart, plan, err, qr)
diff --git a/go/vt/vtgate/planbuilder/builder.go b/go/vt/vtgate/planbuilder/builder.go
index 27b994b1730..ca4ccb7ac5a 100644
--- a/go/vt/vtgate/planbuilder/builder.go
+++ b/go/vt/vtgate/planbuilder/builder.go
@@ -28,6 +28,7 @@ import (
topodatapb "vitess.io/vitess/go/vt/proto/topodata"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vterrors"
+ "vitess.io/vitess/go/vt/vtgate/dynamicconfig"
"vitess.io/vitess/go/vt/vtgate/engine"
"vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
"vitess.io/vitess/go/vt/vtgate/vindexes"
@@ -63,6 +64,16 @@ func singleTable(ks, tbl string) string {
return fmt.Sprintf("%s.%s", ks, tbl)
}
+type staticConfig struct{}
+
+func (staticConfig) OnlineEnabled() bool {
+ return true
+}
+
+func (staticConfig) DirectEnabled() bool {
+ return true
+}
+
// TestBuilder builds a plan for a query based on the specified vschema.
// This method is only used from tests
func TestBuilder(query string, vschema plancontext.VSchema, keyspace string) (*engine.Plan, error) {
@@ -92,12 +103,12 @@ func TestBuilder(query string, vschema plancontext.VSchema, keyspace string) (*e
}
reservedVars := sqlparser.NewReservedVars("vtg", reserved)
- return BuildFromStmt(context.Background(), query, result.AST, reservedVars, vschema, result.BindVarNeeds, true, true)
+ return BuildFromStmt(context.Background(), query, result.AST, reservedVars, vschema, result.BindVarNeeds, staticConfig{})
}
// BuildFromStmt builds a plan based on the AST provided.
-func BuildFromStmt(ctx context.Context, query string, stmt sqlparser.Statement, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema, bindVarNeeds *sqlparser.BindVarNeeds, enableOnlineDDL, enableDirectDDL bool) (*engine.Plan, error) {
- planResult, err := createInstructionFor(ctx, query, stmt, reservedVars, vschema, enableOnlineDDL, enableDirectDDL)
+func BuildFromStmt(ctx context.Context, query string, stmt sqlparser.Statement, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema, bindVarNeeds *sqlparser.BindVarNeeds, cfg dynamicconfig.DDL) (*engine.Plan, error) {
+ planResult, err := createInstructionFor(ctx, query, stmt, reservedVars, vschema, cfg)
if err != nil {
return nil, err
}
@@ -154,7 +165,7 @@ func buildRoutePlan(stmt sqlparser.Statement, reservedVars *sqlparser.ReservedVa
return f(stmt, reservedVars, vschema)
}
-func createInstructionFor(ctx context.Context, query string, stmt sqlparser.Statement, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema, enableOnlineDDL, enableDirectDDL bool) (*planResult, error) {
+func createInstructionFor(ctx context.Context, query string, stmt sqlparser.Statement, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema, cfg dynamicconfig.DDL) (*planResult, error) {
switch stmt := stmt.(type) {
case *sqlparser.Select, *sqlparser.Insert, *sqlparser.Update, *sqlparser.Delete:
configuredPlanner, err := getConfiguredPlanner(vschema, stmt, query)
@@ -169,13 +180,13 @@ func createInstructionFor(ctx context.Context, query string, stmt sqlparser.Stat
}
return buildRoutePlan(stmt, reservedVars, vschema, configuredPlanner)
case sqlparser.DDLStatement:
- return buildGeneralDDLPlan(ctx, query, stmt, reservedVars, vschema, enableOnlineDDL, enableDirectDDL)
+ return buildGeneralDDLPlan(ctx, query, stmt, reservedVars, vschema, cfg)
case *sqlparser.AlterMigration:
- return buildAlterMigrationPlan(query, stmt, vschema, enableOnlineDDL)
+ return buildAlterMigrationPlan(query, stmt, vschema, cfg)
case *sqlparser.RevertMigration:
- return buildRevertMigrationPlan(query, stmt, vschema, enableOnlineDDL)
+ return buildRevertMigrationPlan(query, stmt, vschema, cfg)
case *sqlparser.ShowMigrationLogs:
- return buildShowMigrationLogsPlan(query, vschema, enableOnlineDDL)
+ return buildShowMigrationLogsPlan(query, vschema, cfg)
case *sqlparser.ShowThrottledApps:
return buildShowThrottledAppsPlan(query, vschema)
case *sqlparser.ShowThrottlerStatus:
@@ -189,7 +200,7 @@ func createInstructionFor(ctx context.Context, query string, stmt sqlparser.Stat
case *sqlparser.ExplainStmt:
return buildRoutePlan(stmt, reservedVars, vschema, buildExplainStmtPlan)
case *sqlparser.VExplainStmt:
- return buildVExplainPlan(ctx, stmt, reservedVars, vschema, enableOnlineDDL, enableDirectDDL)
+ return buildVExplainPlan(ctx, stmt, reservedVars, vschema, cfg)
case *sqlparser.OtherAdmin:
return buildOtherReadAndAdmin(query, vschema)
case *sqlparser.Analyze:
@@ -275,7 +286,7 @@ func buildDBDDLPlan(stmt sqlparser.Statement, _ *sqlparser.ReservedVars, vschema
dbDDLstmt := stmt.(sqlparser.DBDDLStatement)
ksName := dbDDLstmt.GetDatabaseName()
if ksName == "" {
- ks, err := vschema.DefaultKeyspace()
+ ks, err := vschema.SelectedKeyspace()
if err != nil {
return nil, err
}
@@ -310,7 +321,7 @@ func buildDBDDLPlan(stmt sqlparser.Statement, _ *sqlparser.ReservedVars, vschema
}
func buildLoadPlan(query string, vschema plancontext.VSchema) (*planResult, error) {
- keyspace, err := vschema.DefaultKeyspace()
+ keyspace, err := vschema.SelectedKeyspace()
if err != nil {
return nil, err
}
@@ -355,7 +366,7 @@ func buildFlushOptions(stmt *sqlparser.Flush, vschema plancontext.VSchema) (*pla
return nil, vterrors.VT09012("FLUSH", vschema.TabletType().String())
}
- keyspace, err := vschema.DefaultKeyspace()
+ keyspace, err := vschema.SelectedKeyspace()
if err != nil {
return nil, err
}
diff --git a/go/vt/vtgate/planbuilder/bypass.go b/go/vt/vtgate/planbuilder/bypass.go
index 62cae9655b1..d3384d509c1 100644
--- a/go/vt/vtgate/planbuilder/bypass.go
+++ b/go/vt/vtgate/planbuilder/bypass.go
@@ -26,7 +26,7 @@ import (
)
func buildPlanForBypass(stmt sqlparser.Statement, _ *sqlparser.ReservedVars, vschema plancontext.VSchema) (*planResult, error) {
- keyspace, err := vschema.DefaultKeyspace()
+ keyspace, err := vschema.SelectedKeyspace()
if err != nil {
return nil, err
}
diff --git a/go/vt/vtgate/planbuilder/collations_test.go b/go/vt/vtgate/planbuilder/collations_test.go
index b393e186679..0595039e673 100644
--- a/go/vt/vtgate/planbuilder/collations_test.go
+++ b/go/vt/vtgate/planbuilder/collations_test.go
@@ -41,15 +41,13 @@ type collationTestCase struct {
}
func (tc *collationTestCase) run(t *testing.T) {
- vschemaWrapper := &vschemawrapper.VSchemaWrapper{
- V: loadSchema(t, "vschemas/schema.json", false),
- SysVarEnabled: true,
- Version: Gen4,
- Env: vtenv.NewTestEnv(),
- }
+ env := vtenv.NewTestEnv()
+ vschema := loadSchema(t, "vschemas/schema.json", true)
+ vw, err := vschemawrapper.NewVschemaWrapper(env, vschema, TestBuilder)
+ require.NoError(t, err)
- tc.addCollationsToSchema(vschemaWrapper)
- plan, err := TestBuilder(tc.query, vschemaWrapper, vschemaWrapper.CurrentDb())
+ tc.addCollationsToSchema(vw)
+ plan, err := TestBuilder(tc.query, vw, vw.CurrentDb())
require.NoError(t, err)
tc.check(t, tc.collations, plan.Instructions)
}
diff --git a/go/vt/vtgate/planbuilder/ddl.go b/go/vt/vtgate/planbuilder/ddl.go
index f4b8ab6976f..a0045cec060 100644
--- a/go/vt/vtgate/planbuilder/ddl.go
+++ b/go/vt/vtgate/planbuilder/ddl.go
@@ -9,6 +9,7 @@ import (
vschemapb "vitess.io/vitess/go/vt/proto/vschema"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vterrors"
+ "vitess.io/vitess/go/vt/vtgate/dynamicconfig"
"vitess.io/vitess/go/vt/vtgate/engine"
"vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
"vitess.io/vitess/go/vt/vtgate/vindexes"
@@ -43,11 +44,11 @@ func (fk *fkContraint) FkWalk(node sqlparser.SQLNode) (kontinue bool, err error)
// a session context. It's only when we Execute() the primitive that we have that context.
// This is why we return a compound primitive (DDL) which contains fully populated primitives (Send & OnlineDDL),
// and which chooses which of the two to invoke at runtime.
-func buildGeneralDDLPlan(ctx context.Context, sql string, ddlStatement sqlparser.DDLStatement, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema, enableOnlineDDL, enableDirectDDL bool) (*planResult, error) {
+func buildGeneralDDLPlan(ctx context.Context, sql string, ddlStatement sqlparser.DDLStatement, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema, cfg dynamicconfig.DDL) (*planResult, error) {
if vschema.Destination() != nil {
return buildByPassPlan(sql, vschema, true)
}
- normalDDLPlan, onlineDDLPlan, err := buildDDLPlans(ctx, sql, ddlStatement, reservedVars, vschema, enableOnlineDDL, enableDirectDDL)
+ normalDDLPlan, onlineDDLPlan, err := buildDDLPlans(ctx, sql, ddlStatement, reservedVars, vschema, cfg)
if err != nil {
return nil, err
}
@@ -61,15 +62,12 @@ func buildGeneralDDLPlan(ctx context.Context, sql string, ddlStatement sqlparser
}
eddl := &engine.DDL{
- Keyspace: normalDDLPlan.Keyspace,
- SQL: normalDDLPlan.Query,
- DDL: ddlStatement,
- NormalDDL: normalDDLPlan,
- OnlineDDL: onlineDDLPlan,
-
- DirectDDLEnabled: enableDirectDDL,
- OnlineDDLEnabled: enableOnlineDDL,
-
+ Keyspace: normalDDLPlan.Keyspace,
+ SQL: normalDDLPlan.Query,
+ DDL: ddlStatement,
+ NormalDDL: normalDDLPlan,
+ OnlineDDL: onlineDDLPlan,
+ Config: cfg,
CreateTempTable: ddlStatement.IsTemporary(),
}
tc := &tableCollector{}
@@ -81,7 +79,7 @@ func buildGeneralDDLPlan(ctx context.Context, sql string, ddlStatement sqlparser
}
func buildByPassPlan(sql string, vschema plancontext.VSchema, isDDL bool) (*planResult, error) {
- keyspace, err := vschema.DefaultKeyspace()
+ keyspace, err := vschema.SelectedKeyspace()
if err != nil {
return nil, err
}
@@ -94,7 +92,7 @@ func buildByPassPlan(sql string, vschema plancontext.VSchema, isDDL bool) (*plan
return newPlanResult(send), nil
}
-func buildDDLPlans(ctx context.Context, sql string, ddlStatement sqlparser.DDLStatement, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema, enableOnlineDDL, enableDirectDDL bool) (*engine.Send, *engine.OnlineDDL, error) {
+func buildDDLPlans(ctx context.Context, sql string, ddlStatement sqlparser.DDLStatement, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema, cfg dynamicconfig.DDL) (*engine.Send, *engine.OnlineDDL, error) {
var destination key.Destination
var keyspace *vindexes.Keyspace
var err error
@@ -113,9 +111,9 @@ func buildDDLPlans(ctx context.Context, sql string, ddlStatement sqlparser.DDLSt
}
err = checkFKError(vschema, ddlStatement, keyspace)
case *sqlparser.CreateView:
- destination, keyspace, err = buildCreateViewCommon(ctx, vschema, reservedVars, enableOnlineDDL, enableDirectDDL, ddl.Select, ddl)
+ destination, keyspace, err = buildCreateViewCommon(ctx, vschema, reservedVars, cfg, ddl.Select, ddl)
case *sqlparser.AlterView:
- destination, keyspace, err = buildCreateViewCommon(ctx, vschema, reservedVars, enableOnlineDDL, enableDirectDDL, ddl.Select, ddl)
+ destination, keyspace, err = buildCreateViewCommon(ctx, vschema, reservedVars, cfg, ddl.Select, ddl)
case *sqlparser.DropView:
destination, keyspace, err = buildDropView(vschema, ddlStatement)
case *sqlparser.DropTable:
@@ -197,7 +195,7 @@ func buildCreateViewCommon(
ctx context.Context,
vschema plancontext.VSchema,
reservedVars *sqlparser.ReservedVars,
- enableOnlineDDL, enableDirectDDL bool,
+ cfg dynamicconfig.DDL,
ddlSelect sqlparser.SelectStatement,
ddl sqlparser.DDLStatement,
) (key.Destination, *vindexes.Keyspace, error) {
@@ -214,7 +212,7 @@ func buildCreateViewCommon(
expressions = append(expressions, sqlparser.Clone(p.SelectExprs))
return nil
})
- selectPlan, err := createInstructionFor(ctx, sqlparser.String(ddlSelect), ddlSelect, reservedVars, vschema, enableOnlineDDL, enableDirectDDL)
+ selectPlan, err := createInstructionFor(ctx, sqlparser.String(ddlSelect), ddlSelect, reservedVars, vschema, cfg)
if err != nil {
return nil, nil, err
}
diff --git a/go/vt/vtgate/planbuilder/migration.go b/go/vt/vtgate/planbuilder/migration.go
index 6fb73a9039d..e64b990aa6b 100644
--- a/go/vt/vtgate/planbuilder/migration.go
+++ b/go/vt/vtgate/planbuilder/migration.go
@@ -27,6 +27,7 @@ import (
"vitess.io/vitess/go/vt/schema"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vterrors"
+ "vitess.io/vitess/go/vt/vtgate/dynamicconfig"
"vitess.io/vitess/go/vt/vtgate/engine"
"vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
"vitess.io/vitess/go/vt/vtgate/vindexes"
@@ -80,8 +81,8 @@ func buildAlterMigrationThrottleAppPlan(query string, alterMigration *sqlparser.
}), nil
}
-func buildAlterMigrationPlan(query string, alterMigration *sqlparser.AlterMigration, vschema plancontext.VSchema, enableOnlineDDL bool) (*planResult, error) {
- if !enableOnlineDDL {
+func buildAlterMigrationPlan(query string, alterMigration *sqlparser.AlterMigration, vschema plancontext.VSchema, cfg dynamicconfig.DDL) (*planResult, error) {
+ if !cfg.OnlineEnabled() {
return nil, schema.ErrOnlineDDLDisabled
}
@@ -118,8 +119,8 @@ func buildAlterMigrationPlan(query string, alterMigration *sqlparser.AlterMigrat
return newPlanResult(send), nil
}
-func buildRevertMigrationPlan(query string, stmt *sqlparser.RevertMigration, vschema plancontext.VSchema, enableOnlineDDL bool) (*planResult, error) {
- if !enableOnlineDDL {
+func buildRevertMigrationPlan(query string, stmt *sqlparser.RevertMigration, vschema plancontext.VSchema, cfg dynamicconfig.DDL) (*planResult, error) {
+ if !cfg.OnlineEnabled() {
return nil, schema.ErrOnlineDDLDisabled
}
dest, ks, tabletType, err := vschema.TargetDestination("")
@@ -147,8 +148,8 @@ func buildRevertMigrationPlan(query string, stmt *sqlparser.RevertMigration, vsc
return newPlanResult(emig), nil
}
-func buildShowMigrationLogsPlan(query string, vschema plancontext.VSchema, enableOnlineDDL bool) (*planResult, error) {
- if !enableOnlineDDL {
+func buildShowMigrationLogsPlan(query string, vschema plancontext.VSchema, cfg dynamicconfig.DDL) (*planResult, error) {
+ if !cfg.OnlineEnabled() {
return nil, schema.ErrOnlineDDLDisabled
}
dest, ks, tabletType, err := vschema.TargetDestination("")
diff --git a/go/vt/vtgate/planbuilder/operator_transformers.go b/go/vt/vtgate/planbuilder/operator_transformers.go
index a22719b4489..df14745e6b2 100644
--- a/go/vt/vtgate/planbuilder/operator_transformers.go
+++ b/go/vt/vtgate/planbuilder/operator_transformers.go
@@ -545,7 +545,7 @@ func routeToEngineRoute(ctx *plancontext.PlanningContext, op *operators.Route, h
}
func newRoutingParams(ctx *plancontext.PlanningContext, opCode engine.Opcode) *engine.RoutingParameters {
- ks, _ := ctx.VSchema.DefaultKeyspace()
+ ks, _ := ctx.VSchema.SelectedKeyspace()
if ks == nil {
// if we don't have a selected keyspace, any keyspace will do
// this is used by operators that do not set the keyspace
diff --git a/go/vt/vtgate/planbuilder/operators/delete.go b/go/vt/vtgate/planbuilder/operators/delete.go
index 4d30d9b9cc1..81e36d54315 100644
--- a/go/vt/vtgate/planbuilder/operators/delete.go
+++ b/go/vt/vtgate/planbuilder/operators/delete.go
@@ -328,7 +328,7 @@ func updateQueryGraphWithSource(ctx *plancontext.PlanningContext, input Operator
if tbl.ID != tblID {
continue
}
- tbl.Alias = sqlparser.NewAliasedTableExpr(sqlparser.NewTableName(vTbl.Name.String()), tbl.Alias.As.String())
+ tbl.Alias = sqlparser.NewAliasedTableExpr(sqlparser.NewTableNameWithQualifier(vTbl.Name.String(), vTbl.Keyspace.Name), tbl.Alias.As.String())
tbl.Table, _ = tbl.Alias.TableName()
}
return op, Rewrote("change query table point to source table")
diff --git a/go/vt/vtgate/planbuilder/plan_test.go b/go/vt/vtgate/planbuilder/plan_test.go
index 9cf92a91ddf..7135f4dff29 100644
--- a/go/vt/vtgate/planbuilder/plan_test.go
+++ b/go/vt/vtgate/planbuilder/plan_test.go
@@ -74,17 +74,16 @@ func TestPlanTestSuite(t *testing.T) {
func (s *planTestSuite) TestPlan() {
defer utils.EnsureNoLeaks(s.T())
- vschemaWrapper := &vschemawrapper.VSchemaWrapper{
- V: loadSchema(s.T(), "vschemas/schema.json", true),
- TabletType_: topodatapb.TabletType_PRIMARY,
- SysVarEnabled: true,
- TestBuilder: TestBuilder,
- Env: vtenv.NewTestEnv(),
- }
- s.addPKs(vschemaWrapper.V, "user", []string{"user", "music"})
- s.addPKsProvided(vschemaWrapper.V, "user", []string{"user_extra"}, []string{"id", "user_id"})
- s.addPKsProvided(vschemaWrapper.V, "ordering", []string{"order"}, []string{"oid", "region_id"})
- s.addPKsProvided(vschemaWrapper.V, "ordering", []string{"order_event"}, []string{"oid", "ename"})
+
+ env := vtenv.NewTestEnv()
+ vschema := loadSchema(s.T(), "vschemas/schema.json", true)
+ vw, err := vschemawrapper.NewVschemaWrapper(env, vschema, TestBuilder)
+ require.NoError(s.T(), err)
+
+ s.addPKs(vschema, "user", []string{"user", "music"})
+ s.addPKsProvided(vschema, "user", []string{"user_extra"}, []string{"id", "user_id"})
+ s.addPKsProvided(vschema, "ordering", []string{"order"}, []string{"oid", "region_id"})
+ s.addPKsProvided(vschema, "ordering", []string{"order_event"}, []string{"oid", "ename"})
// You will notice that some tests expect user.Id instead of user.id.
// This is because we now pre-create vindex columns in the symbol
@@ -92,77 +91,73 @@ func (s *planTestSuite) TestPlan() {
// the column is named as Id. This is to make sure that
// column names are case-preserved, but treated as
// case-insensitive even if they come from the vschema.
- s.testFile("aggr_cases.json", vschemaWrapper, false)
- s.testFile("dml_cases.json", vschemaWrapper, false)
- s.testFile("from_cases.json", vschemaWrapper, false)
- s.testFile("filter_cases.json", vschemaWrapper, false)
- s.testFile("postprocess_cases.json", vschemaWrapper, false)
- s.testFile("select_cases.json", vschemaWrapper, false)
- s.testFile("symtab_cases.json", vschemaWrapper, false)
- s.testFile("unsupported_cases.json", vschemaWrapper, false)
- s.testFile("unknown_schema_cases.json", vschemaWrapper, false)
- s.testFile("vindex_func_cases.json", vschemaWrapper, false)
- s.testFile("wireup_cases.json", vschemaWrapper, false)
- s.testFile("memory_sort_cases.json", vschemaWrapper, false)
- s.testFile("use_cases.json", vschemaWrapper, false)
- s.testFile("set_cases.json", vschemaWrapper, false)
- s.testFile("union_cases.json", vschemaWrapper, false)
- s.testFile("large_union_cases.json", vschemaWrapper, false)
- s.testFile("transaction_cases.json", vschemaWrapper, false)
- s.testFile("lock_cases.json", vschemaWrapper, false)
- s.testFile("large_cases.json", vschemaWrapper, false)
- s.testFile("ddl_cases_no_default_keyspace.json", vschemaWrapper, false)
- s.testFile("flush_cases_no_default_keyspace.json", vschemaWrapper, false)
- s.testFile("show_cases_no_default_keyspace.json", vschemaWrapper, false)
- s.testFile("stream_cases.json", vschemaWrapper, false)
- s.testFile("info_schema80_cases.json", vschemaWrapper, false)
- s.testFile("reference_cases.json", vschemaWrapper, false)
- s.testFile("vexplain_cases.json", vschemaWrapper, false)
- s.testFile("misc_cases.json", vschemaWrapper, false)
- s.testFile("cte_cases.json", vschemaWrapper, false)
+ s.testFile("aggr_cases.json", vw, false)
+ s.testFile("dml_cases.json", vw, false)
+ s.testFile("from_cases.json", vw, false)
+ s.testFile("filter_cases.json", vw, false)
+ s.testFile("postprocess_cases.json", vw, false)
+ s.testFile("select_cases.json", vw, false)
+ s.testFile("symtab_cases.json", vw, false)
+ s.testFile("unsupported_cases.json", vw, false)
+ s.testFile("unknown_schema_cases.json", vw, false)
+ s.testFile("vindex_func_cases.json", vw, false)
+ s.testFile("wireup_cases.json", vw, false)
+ s.testFile("memory_sort_cases.json", vw, false)
+ s.testFile("use_cases.json", vw, false)
+ s.testFile("set_cases.json", vw, false)
+ s.testFile("union_cases.json", vw, false)
+ s.testFile("large_union_cases.json", vw, false)
+ s.testFile("transaction_cases.json", vw, false)
+ s.testFile("lock_cases.json", vw, false)
+ s.testFile("large_cases.json", vw, false)
+ s.testFile("ddl_cases_no_default_keyspace.json", vw, false)
+ s.testFile("flush_cases_no_default_keyspace.json", vw, false)
+ s.testFile("show_cases_no_default_keyspace.json", vw, false)
+ s.testFile("stream_cases.json", vw, false)
+ s.testFile("info_schema80_cases.json", vw, false)
+ s.testFile("reference_cases.json", vw, false)
+ s.testFile("vexplain_cases.json", vw, false)
+ s.testFile("misc_cases.json", vw, false)
+ s.testFile("cte_cases.json", vw, false)
}
// TestForeignKeyPlanning tests the planning of foreign keys in a managed mode by Vitess.
func (s *planTestSuite) TestForeignKeyPlanning() {
+ env := vtenv.NewTestEnv()
vschema := loadSchema(s.T(), "vschemas/schema.json", true)
- s.setFks(vschema)
- vschemaWrapper := &vschemawrapper.VSchemaWrapper{
- V: vschema,
- TestBuilder: TestBuilder,
- Env: vtenv.NewTestEnv(),
- }
+ vw, err := vschemawrapper.NewVschemaWrapper(env, vschema, TestBuilder)
+ require.NoError(s.T(), err)
- s.testFile("foreignkey_cases.json", vschemaWrapper, false)
+ s.setFks(vschema)
+ s.testFile("foreignkey_cases.json", vw, false)
}
// TestForeignKeyChecksOn tests the planning when the session variable for foreign_key_checks is set to ON.
func (s *planTestSuite) TestForeignKeyChecksOn() {
+ env := vtenv.NewTestEnv()
vschema := loadSchema(s.T(), "vschemas/schema.json", true)
- s.setFks(vschema)
+ vw, err := vschemawrapper.NewVschemaWrapper(env, vschema, TestBuilder)
+ require.NoError(s.T(), err)
+
fkChecksState := true
- vschemaWrapper := &vschemawrapper.VSchemaWrapper{
- V: vschema,
- TestBuilder: TestBuilder,
- ForeignKeyChecksState: &fkChecksState,
- Env: vtenv.NewTestEnv(),
- }
+ vw.ForeignKeyChecksState = &fkChecksState
- s.testFile("foreignkey_checks_on_cases.json", vschemaWrapper, false)
+ s.setFks(vschema)
+ s.testFile("foreignkey_checks_on_cases.json", vw, false)
}
// TestForeignKeyChecksOff tests the planning when the session variable for foreign_key_checks is set to OFF.
func (s *planTestSuite) TestForeignKeyChecksOff() {
+ env := vtenv.NewTestEnv()
vschema := loadSchema(s.T(), "vschemas/schema.json", true)
- s.setFks(vschema)
+ vw, err := vschemawrapper.NewVschemaWrapper(env, vschema, TestBuilder)
+ require.NoError(s.T(), err)
+
fkChecksState := false
- vschemaWrapper := &vschemawrapper.VSchemaWrapper{
- V: vschema,
- TestBuilder: TestBuilder,
- ForeignKeyChecksState: &fkChecksState,
- Env: vtenv.NewTestEnv(),
- }
+ vw.ForeignKeyChecksState = &fkChecksState
- s.testFile("foreignkey_checks_off_cases.json", vschemaWrapper, false)
+ s.setFks(vschema)
+ s.testFile("foreignkey_checks_off_cases.json", vw, false)
}
func (s *planTestSuite) setFks(vschema *vindexes.VSchema) {
@@ -266,120 +261,127 @@ func (s *planTestSuite) TestSystemTables57() {
MySQLServerVersion: "5.7.9",
})
require.NoError(s.T(), err)
- vschemaWrapper := &vschemawrapper.VSchemaWrapper{
- V: loadSchema(s.T(), "vschemas/schema.json", true),
- Env: env,
- }
- s.testFile("info_schema57_cases.json", vschemaWrapper, false)
+ vschema := loadSchema(s.T(), "vschemas/schema.json", true)
+ vw, err := vschemawrapper.NewVschemaWrapper(env, vschema, TestBuilder)
+ require.NoError(s.T(), err)
+
+ s.testFile("info_schema57_cases.json", vw, false)
}
func (s *planTestSuite) TestSysVarSetDisabled() {
- vschemaWrapper := &vschemawrapper.VSchemaWrapper{
- V: loadSchema(s.T(), "vschemas/schema.json", true),
- SysVarEnabled: false,
- Env: vtenv.NewTestEnv(),
- }
+ env := vtenv.NewTestEnv()
+ vschema := loadSchema(s.T(), "vschemas/schema.json", true)
+ vw, err := vschemawrapper.NewVschemaWrapper(env, vschema, TestBuilder)
+ require.NoError(s.T(), err)
+
+ vw.SysVarEnabled = false
- s.testFile("set_sysvar_disabled_cases.json", vschemaWrapper, false)
+ s.testFile("set_sysvar_disabled_cases.json", vw, false)
}
func (s *planTestSuite) TestViews() {
- vschemaWrapper := &vschemawrapper.VSchemaWrapper{
- V: loadSchema(s.T(), "vschemas/schema.json", true),
- EnableViews: true,
- Env: vtenv.NewTestEnv(),
- }
+ env := vtenv.NewTestEnv()
+ vschema := loadSchema(s.T(), "vschemas/schema.json", true)
+ vw, err := vschemawrapper.NewVschemaWrapper(env, vschema, TestBuilder)
+ require.NoError(s.T(), err)
+
+ vw.EnableViews = true
- s.testFile("view_cases.json", vschemaWrapper, false)
+ s.testFile("view_cases.json", vw, false)
}
func (s *planTestSuite) TestOne() {
reset := operators.EnableDebugPrinting()
defer reset()
- lv := loadSchema(s.T(), "vschemas/schema.json", true)
- s.setFks(lv)
- s.addPKs(lv, "user", []string{"user", "music"})
- s.addPKs(lv, "main", []string{"unsharded"})
- s.addPKsProvided(lv, "user", []string{"user_extra"}, []string{"id", "user_id"})
- s.addPKsProvided(lv, "ordering", []string{"order"}, []string{"oid", "region_id"})
- s.addPKsProvided(lv, "ordering", []string{"order_event"}, []string{"oid", "ename"})
- vschema := &vschemawrapper.VSchemaWrapper{
- V: lv,
- TestBuilder: TestBuilder,
- Env: vtenv.NewTestEnv(),
- }
+ env := vtenv.NewTestEnv()
+ vschema := loadSchema(s.T(), "vschemas/schema.json", true)
+ vw, err := vschemawrapper.NewVschemaWrapper(env, vschema, TestBuilder)
+ require.NoError(s.T(), err)
- s.testFile("onecase.json", vschema, false)
+ s.setFks(vschema)
+ s.addPKs(vschema, "user", []string{"user", "music"})
+ s.addPKs(vschema, "main", []string{"unsharded"})
+ s.addPKsProvided(vschema, "user", []string{"user_extra"}, []string{"id", "user_id"})
+ s.addPKsProvided(vschema, "ordering", []string{"order"}, []string{"oid", "region_id"})
+ s.addPKsProvided(vschema, "ordering", []string{"order_event"}, []string{"oid", "ename"})
+
+ s.testFile("onecase.json", vw, false)
}
func (s *planTestSuite) TestOneTPCC() {
reset := operators.EnableDebugPrinting()
defer reset()
- vschema := &vschemawrapper.VSchemaWrapper{
- V: loadSchema(s.T(), "vschemas/tpcc_schema.json", true),
- Env: vtenv.NewTestEnv(),
- }
+ env := vtenv.NewTestEnv()
+ vschema := loadSchema(s.T(), "vschemas/tpcc_schema.json", true)
+ vw, err := vschemawrapper.NewVschemaWrapper(env, vschema, TestBuilder)
+ require.NoError(s.T(), err)
- s.testFile("onecase.json", vschema, false)
+ s.testFile("onecase.json", vw, false)
}
func (s *planTestSuite) TestOneWithMainAsDefault() {
reset := operators.EnableDebugPrinting()
defer reset()
- vschema := &vschemawrapper.VSchemaWrapper{
- V: loadSchema(s.T(), "vschemas/schema.json", true),
- Keyspace: &vindexes.Keyspace{
- Name: "main",
- Sharded: false,
- },
- Env: vtenv.NewTestEnv(),
- }
- s.testFile("onecase.json", vschema, false)
+ env := vtenv.NewTestEnv()
+ vschema := loadSchema(s.T(), "vschemas/schema.json", true)
+ vw, err := vschemawrapper.NewVschemaWrapper(env, vschema, TestBuilder)
+ require.NoError(s.T(), err)
+
+ vw.Vcursor.SetTarget("main")
+ vw.Keyspace = &vindexes.Keyspace{Name: "main"}
+
+ s.testFile("onecase.json", vw, false)
}
func (s *planTestSuite) TestOneWithSecondUserAsDefault() {
reset := operators.EnableDebugPrinting()
defer reset()
- vschema := &vschemawrapper.VSchemaWrapper{
- V: loadSchema(s.T(), "vschemas/schema.json", true),
- Keyspace: &vindexes.Keyspace{
- Name: "second_user",
- Sharded: true,
- },
- Env: vtenv.NewTestEnv(),
+
+ env := vtenv.NewTestEnv()
+ vschema := loadSchema(s.T(), "vschemas/schema.json", true)
+ vw, err := vschemawrapper.NewVschemaWrapper(env, vschema, TestBuilder)
+ require.NoError(s.T(), err)
+
+ vw.Vcursor.SetTarget("second_user")
+ vw.Keyspace = &vindexes.Keyspace{
+ Name: "second_user",
+ Sharded: true,
}
- s.testFile("onecase.json", vschema, false)
+ s.testFile("onecase.json", vw, false)
}
func (s *planTestSuite) TestOneWithUserAsDefault() {
reset := operators.EnableDebugPrinting()
defer reset()
- vschema := &vschemawrapper.VSchemaWrapper{
- V: loadSchema(s.T(), "vschemas/schema.json", true),
- Keyspace: &vindexes.Keyspace{
- Name: "user",
- Sharded: true,
- },
- Env: vtenv.NewTestEnv(),
+
+ env := vtenv.NewTestEnv()
+ vschema := loadSchema(s.T(), "vschemas/schema.json", true)
+ vw, err := vschemawrapper.NewVschemaWrapper(env, vschema, TestBuilder)
+ require.NoError(s.T(), err)
+
+ vw.Vcursor.SetTarget("user")
+ vw.Keyspace = &vindexes.Keyspace{
+ Name: "user",
+ Sharded: true,
}
- s.testFile("onecase.json", vschema, false)
+ s.testFile("onecase.json", vw, false)
}
func (s *planTestSuite) TestOneWithTPCHVSchema() {
reset := operators.EnableDebugPrinting()
defer reset()
- vschema := &vschemawrapper.VSchemaWrapper{
- V: loadSchema(s.T(), "vschemas/tpch_schema.json", true),
- SysVarEnabled: true,
- Env: vtenv.NewTestEnv(),
- }
- s.testFile("onecase.json", vschema, false)
+ env := vtenv.NewTestEnv()
+ vschema := loadSchema(s.T(), "vschemas/schema.json", true)
+ vw, err := vschemawrapper.NewVschemaWrapper(env, vschema, TestBuilder)
+ require.NoError(s.T(), err)
+
+ s.testFile("onecase.json", vw, false)
}
func (s *planTestSuite) TestOneWith57Version() {
@@ -390,52 +392,47 @@ func (s *planTestSuite) TestOneWith57Version() {
MySQLServerVersion: "5.7.9",
})
require.NoError(s.T(), err)
- vschema := &vschemawrapper.VSchemaWrapper{
- V: loadSchema(s.T(), "vschemas/schema.json", true),
- Env: env,
- }
+ vschema := loadSchema(s.T(), "vschemas/schema.json", true)
+ vw, err := vschemawrapper.NewVschemaWrapper(env, vschema, TestBuilder)
+ require.NoError(s.T(), err)
- s.testFile("onecase.json", vschema, false)
+ s.testFile("onecase.json", vw, false)
}
func (s *planTestSuite) TestRubyOnRailsQueries() {
- vschemaWrapper := &vschemawrapper.VSchemaWrapper{
- V: loadSchema(s.T(), "vschemas/rails_schema.json", true),
- SysVarEnabled: true,
- Env: vtenv.NewTestEnv(),
- }
+ env := vtenv.NewTestEnv()
+ vschema := loadSchema(s.T(), "vschemas/rails_schema.json", true)
+ vw, err := vschemawrapper.NewVschemaWrapper(env, vschema, TestBuilder)
+ require.NoError(s.T(), err)
- s.testFile("rails_cases.json", vschemaWrapper, false)
+ s.testFile("rails_cases.json", vw, false)
}
func (s *planTestSuite) TestOLTP() {
- vschemaWrapper := &vschemawrapper.VSchemaWrapper{
- V: loadSchema(s.T(), "vschemas/oltp_schema.json", true),
- SysVarEnabled: true,
- Env: vtenv.NewTestEnv(),
- }
+ env := vtenv.NewTestEnv()
+ vschema := loadSchema(s.T(), "vschemas/oltp_schema.json", true)
+ vw, err := vschemawrapper.NewVschemaWrapper(env, vschema, TestBuilder)
+ require.NoError(s.T(), err)
- s.testFile("oltp_cases.json", vschemaWrapper, false)
+ s.testFile("oltp_cases.json", vw, false)
}
func (s *planTestSuite) TestTPCC() {
- vschemaWrapper := &vschemawrapper.VSchemaWrapper{
- V: loadSchema(s.T(), "vschemas/tpcc_schema.json", true),
- SysVarEnabled: true,
- Env: vtenv.NewTestEnv(),
- }
+ env := vtenv.NewTestEnv()
+ vschema := loadSchema(s.T(), "vschemas/tpcc_schema.json", true)
+ vw, err := vschemawrapper.NewVschemaWrapper(env, vschema, TestBuilder)
+ require.NoError(s.T(), err)
- s.testFile("tpcc_cases.json", vschemaWrapper, false)
+ s.testFile("tpcc_cases.json", vw, false)
}
func (s *planTestSuite) TestTPCH() {
- vschemaWrapper := &vschemawrapper.VSchemaWrapper{
- V: loadSchema(s.T(), "vschemas/tpch_schema.json", true),
- SysVarEnabled: true,
- Env: vtenv.NewTestEnv(),
- }
+ env := vtenv.NewTestEnv()
+ vschema := loadSchema(s.T(), "vschemas/tpch_schema.json", true)
+ vw, err := vschemawrapper.NewVschemaWrapper(env, vschema, TestBuilder)
+ require.NoError(s.T(), err)
- s.testFile("tpch_cases.json", vschemaWrapper, false)
+ s.testFile("tpch_cases.json", vw, false)
}
func BenchmarkOLTP(b *testing.B) {
@@ -451,15 +448,14 @@ func BenchmarkTPCH(b *testing.B) {
}
func benchmarkWorkload(b *testing.B, name string) {
- vschemaWrapper := &vschemawrapper.VSchemaWrapper{
- V: loadSchema(b, "vschemas/"+name+"_schema.json", true),
- SysVarEnabled: true,
- Env: vtenv.NewTestEnv(),
- }
+ env := vtenv.NewTestEnv()
+ vschema := loadSchema(b, "vschemas/"+name+"_schema.json", true)
+ vw, err := vschemawrapper.NewVschemaWrapper(env, vschema, TestBuilder)
+ require.NoError(b, err)
testCases := readJSONTests(name + "_cases.json")
b.ResetTimer()
- benchmarkPlanner(b, Gen4, testCases, vschemaWrapper)
+ benchmarkPlanner(b, Gen4, testCases, vw)
}
func (s *planTestSuite) TestBypassPlanningShardTargetFromFile() {
@@ -478,35 +474,33 @@ func (s *planTestSuite) TestBypassPlanningShardTargetFromFile() {
}
func (s *planTestSuite) TestBypassPlanningKeyrangeTargetFromFile() {
+ env := vtenv.NewTestEnv()
+ vschema := loadSchema(s.T(), "vschemas/schema.json", true)
+ vw, err := vschemawrapper.NewVschemaWrapper(env, vschema, TestBuilder)
+ require.NoError(s.T(), err)
+
keyRange, _ := key.ParseShardingSpec("-")
+ vw.Dest = key.DestinationExactKeyRange{KeyRange: keyRange[0]}
- vschema := &vschemawrapper.VSchemaWrapper{
- V: loadSchema(s.T(), "vschemas/schema.json", true),
- Keyspace: &vindexes.Keyspace{
- Name: "main",
- Sharded: false,
- },
- TabletType_: topodatapb.TabletType_PRIMARY,
- Dest: key.DestinationExactKeyRange{KeyRange: keyRange[0]},
- Env: vtenv.NewTestEnv(),
- }
+ vw.Vcursor.SetTarget("main")
+ vw.Keyspace = &vindexes.Keyspace{Name: "main"}
- s.testFile("bypass_keyrange_cases.json", vschema, false)
+ s.testFile("bypass_keyrange_cases.json", vw, false)
}
func (s *planTestSuite) TestWithDefaultKeyspaceFromFile() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
+
// We are testing this separately so we can set a default keyspace
- vschema := &vschemawrapper.VSchemaWrapper{
- V: loadSchema(s.T(), "vschemas/schema.json", true),
- Keyspace: &vindexes.Keyspace{
- Name: "main",
- Sharded: false,
- },
- TabletType_: topodatapb.TabletType_PRIMARY,
- Env: vtenv.NewTestEnv(),
- }
+ env := vtenv.NewTestEnv()
+ vschema := loadSchema(s.T(), "vschemas/schema.json", true)
+ vw, err := vschemawrapper.NewVschemaWrapper(env, vschema, TestBuilder)
+ require.NoError(s.T(), err)
+
+ vw.Vcursor.SetTarget("main")
+ vw.Keyspace = &vindexes.Keyspace{Name: "main"}
+
ts := memorytopo.NewServer(ctx, "cell1")
ts.CreateKeyspace(ctx, "main", &topodatapb.Keyspace{})
ts.CreateKeyspace(ctx, "user", &topodatapb.Keyspace{})
@@ -521,96 +515,92 @@ func (s *planTestSuite) TestWithDefaultKeyspaceFromFile() {
})
require.True(s.T(), created)
- s.testFile("alterVschema_cases.json", vschema, false)
- s.testFile("ddl_cases.json", vschema, false)
- s.testFile("migration_cases.json", vschema, false)
- s.testFile("flush_cases.json", vschema, false)
- s.testFile("show_cases.json", vschema, false)
- s.testFile("call_cases.json", vschema, false)
+ s.testFile("alterVschema_cases.json", vw, false)
+ s.testFile("ddl_cases.json", vw, false)
+ s.testFile("migration_cases.json", vw, false)
+ s.testFile("flush_cases.json", vw, false)
+ s.testFile("show_cases.json", vw, false)
+ s.testFile("call_cases.json", vw, false)
}
func (s *planTestSuite) TestWithDefaultKeyspaceFromFileSharded() {
// We are testing this separately so we can set a default keyspace
- vschema := &vschemawrapper.VSchemaWrapper{
- V: loadSchema(s.T(), "vschemas/schema.json", true),
- Keyspace: &vindexes.Keyspace{
- Name: "second_user",
- Sharded: true,
- },
- TabletType_: topodatapb.TabletType_PRIMARY,
- Env: vtenv.NewTestEnv(),
+ env := vtenv.NewTestEnv()
+ vschema := loadSchema(s.T(), "vschemas/schema.json", true)
+ vw, err := vschemawrapper.NewVschemaWrapper(env, vschema, TestBuilder)
+ require.NoError(s.T(), err)
+
+ vw.Vcursor.SetTarget("second_user")
+ vw.Keyspace = &vindexes.Keyspace{
+ Name: "second_user",
+ Sharded: true,
}
- s.testFile("select_cases_with_default.json", vschema, false)
+ s.testFile("select_cases_with_default.json", vw, false)
}
func (s *planTestSuite) TestWithUserDefaultKeyspaceFromFileSharded() {
// We are testing this separately so we can set a default keyspace
- vschema := &vschemawrapper.VSchemaWrapper{
- V: loadSchema(s.T(), "vschemas/schema.json", true),
- Keyspace: &vindexes.Keyspace{
- Name: "user",
- Sharded: true,
- },
- TabletType_: topodatapb.TabletType_PRIMARY,
- Env: vtenv.NewTestEnv(),
+ env := vtenv.NewTestEnv()
+ vschema := loadSchema(s.T(), "vschemas/schema.json", true)
+ vw, err := vschemawrapper.NewVschemaWrapper(env, vschema, TestBuilder)
+ require.NoError(s.T(), err)
+
+ vw.Vcursor.SetTarget("user")
+ vw.Keyspace = &vindexes.Keyspace{
+ Name: "user",
+ Sharded: true,
}
- s.testFile("select_cases_with_user_as_default.json", vschema, false)
+ s.testFile("select_cases_with_user_as_default.json", vw, false)
+ s.testFile("dml_cases_with_user_as_default.json", vw, false)
}
func (s *planTestSuite) TestWithSystemSchemaAsDefaultKeyspace() {
// We are testing this separately so we can set a default keyspace
- vschema := &vschemawrapper.VSchemaWrapper{
- V: loadSchema(s.T(), "vschemas/schema.json", true),
- Keyspace: &vindexes.Keyspace{Name: "information_schema"},
- TabletType_: topodatapb.TabletType_PRIMARY,
- Env: vtenv.NewTestEnv(),
- }
+ env := vtenv.NewTestEnv()
+ vschema := loadSchema(s.T(), "vschemas/schema.json", true)
+ vw, err := vschemawrapper.NewVschemaWrapper(env, vschema, TestBuilder)
+ require.NoError(s.T(), err)
- s.testFile("sysschema_default.json", vschema, false)
+ vw.Keyspace = &vindexes.Keyspace{Name: "information_schema"}
+
+ s.testFile("sysschema_default.json", vw, false)
}
func (s *planTestSuite) TestOtherPlanningFromFile() {
// We are testing this separately so we can set a default keyspace
- vschema := &vschemawrapper.VSchemaWrapper{
- V: loadSchema(s.T(), "vschemas/schema.json", true),
- Keyspace: &vindexes.Keyspace{
- Name: "main",
- Sharded: false,
- },
- TabletType_: topodatapb.TabletType_PRIMARY,
- Env: vtenv.NewTestEnv(),
- }
+ env := vtenv.NewTestEnv()
+ vschema := loadSchema(s.T(), "vschemas/schema.json", true)
+ vw, err := vschemawrapper.NewVschemaWrapper(env, vschema, TestBuilder)
+ require.NoError(s.T(), err)
- s.testFile("other_read_cases.json", vschema, false)
- s.testFile("other_admin_cases.json", vschema, false)
+ vw.Vcursor.SetTarget("main")
+ vw.Keyspace = &vindexes.Keyspace{Name: "main"}
+
+ s.testFile("other_read_cases.json", vw, false)
+ s.testFile("other_admin_cases.json", vw, false)
}
func (s *planTestSuite) TestMirrorPlanning() {
- vschema := &vschemawrapper.VSchemaWrapper{
- V: loadSchema(s.T(), "vschemas/mirror_schema.json", true),
- TabletType_: topodatapb.TabletType_PRIMARY,
- SysVarEnabled: true,
- TestBuilder: TestBuilder,
- Env: vtenv.NewTestEnv(),
- }
+ env := vtenv.NewTestEnv()
+ vschema := loadSchema(s.T(), "vschemas/mirror_schema.json", true)
+ vw, err := vschemawrapper.NewVschemaWrapper(env, vschema, TestBuilder)
+ require.NoError(s.T(), err)
- s.testFile("mirror_cases.json", vschema, false)
+ s.testFile("mirror_cases.json", vw, false)
}
func (s *planTestSuite) TestOneMirror() {
reset := operators.EnableDebugPrinting()
defer reset()
- vschema := &vschemawrapper.VSchemaWrapper{
- V: loadSchema(s.T(), "vschemas/mirror_schema.json", true),
- TabletType_: topodatapb.TabletType_PRIMARY,
- SysVarEnabled: true,
- TestBuilder: TestBuilder,
- Env: vtenv.NewTestEnv(),
- }
- s.testFile("onecase.json", vschema, false)
+ env := vtenv.NewTestEnv()
+ vschema := loadSchema(s.T(), "vschemas/schema.json", true)
+ vw, err := vschemawrapper.NewVschemaWrapper(env, vschema, TestBuilder)
+ require.NoError(s.T(), err)
+
+ s.testFile("onecase.json", vw, false)
}
func loadSchema(t testing.TB, filename string, setCollation bool) *vindexes.VSchema {
@@ -659,21 +649,12 @@ func createFkDefinition(childCols []string, parentTableName string, parentCols [
}
}
-type (
- planTest struct {
- Comment string `json:"comment,omitempty"`
- Query string `json:"query,omitempty"`
- Plan json.RawMessage `json:"plan,omitempty"`
- Skip bool `json:"skip,omitempty"`
- }
-)
-
func (s *planTestSuite) testFile(filename string, vschema *vschemawrapper.VSchemaWrapper, render bool) {
opts := jsondiff.DefaultConsoleOptions()
s.T().Run(filename, func(t *testing.T) {
failed := false
- var expected []planTest
+ var expected []PlanTest
for _, tcase := range readJSONTests(filename) {
testName := tcase.Comment
if testName == "" {
@@ -682,9 +663,10 @@ func (s *planTestSuite) testFile(filename string, vschema *vschemawrapper.VSchem
if tcase.Query == "" {
continue
}
- current := planTest{
- Comment: testName,
+ current := PlanTest{
+ Comment: tcase.Comment,
Query: tcase.Query,
+ SkipE2E: tcase.SkipE2E,
}
vschema.Version = Gen4
out := getPlanOutput(tcase, vschema, render)
@@ -730,8 +712,8 @@ func (s *planTestSuite) testFile(filename string, vschema *vschemawrapper.VSchem
})
}
-func readJSONTests(filename string) []planTest {
- var output []planTest
+func readJSONTests(filename string) []PlanTest {
+ var output []PlanTest
file, err := os.Open(locateFile(filename))
if err != nil {
panic(err)
@@ -745,7 +727,7 @@ func readJSONTests(filename string) []planTest {
return output
}
-func getPlanOutput(tcase planTest, vschema *vschemawrapper.VSchemaWrapper, render bool) (out string) {
+func getPlanOutput(tcase PlanTest, vschema *vschemawrapper.VSchemaWrapper, render bool) (out string) {
defer func() {
if r := recover(); r != nil {
out = fmt.Sprintf("panicked: %v\n%s", r, string(debug.Stack()))
@@ -783,30 +765,29 @@ func locateFile(name string) string {
var benchMarkFiles = []string{"from_cases.json", "filter_cases.json", "large_cases.json", "aggr_cases.json", "select_cases.json", "union_cases.json"}
func BenchmarkPlanner(b *testing.B) {
- vschema := &vschemawrapper.VSchemaWrapper{
- V: loadSchema(b, "vschemas/schema.json", true),
- SysVarEnabled: true,
- Env: vtenv.NewTestEnv(),
- }
+ env := vtenv.NewTestEnv()
+ vschema := loadSchema(b, "vschemas/schema.json", true)
+ vw, err := vschemawrapper.NewVschemaWrapper(env, vschema, TestBuilder)
+ require.NoError(b, err)
+
for _, filename := range benchMarkFiles {
testCases := readJSONTests(filename)
b.Run(filename+"-gen4", func(b *testing.B) {
- benchmarkPlanner(b, Gen4, testCases, vschema)
+ benchmarkPlanner(b, Gen4, testCases, vw)
})
}
}
func BenchmarkSemAnalysis(b *testing.B) {
- vschema := &vschemawrapper.VSchemaWrapper{
- V: loadSchema(b, "vschemas/schema.json", true),
- SysVarEnabled: true,
- Env: vtenv.NewTestEnv(),
- }
+ env := vtenv.NewTestEnv()
+ vschema := loadSchema(b, "vschemas/schema.json", true)
+ vw, err := vschemawrapper.NewVschemaWrapper(env, vschema, TestBuilder)
+ require.NoError(b, err)
for i := 0; i < b.N; i++ {
for _, filename := range benchMarkFiles {
for _, tc := range readJSONTests(filename) {
- exerciseAnalyzer(tc.Query, vschema.CurrentDb(), vschema)
+ exerciseAnalyzer(tc.Query, vw.CurrentDb(), vw)
}
}
}
@@ -831,12 +812,10 @@ func exerciseAnalyzer(query, database string, s semantics.SchemaInformation) {
}
func BenchmarkSelectVsDML(b *testing.B) {
- vschema := &vschemawrapper.VSchemaWrapper{
- V: loadSchema(b, "vschemas/schema.json", true),
- SysVarEnabled: true,
- Version: Gen4,
- Env: vtenv.NewTestEnv(),
- }
+ env := vtenv.NewTestEnv()
+ vschema := loadSchema(b, "vschemas/schema.json", true)
+ vw, err := vschemawrapper.NewVschemaWrapper(env, vschema, TestBuilder)
+ require.NoError(b, err)
dmlCases := readJSONTests("dml_cases.json")
selectCases := readJSONTests("select_cases.json")
@@ -850,44 +829,37 @@ func BenchmarkSelectVsDML(b *testing.B) {
})
b.Run("DML (random sample, N=32)", func(b *testing.B) {
- benchmarkPlanner(b, Gen4, dmlCases[:32], vschema)
+ benchmarkPlanner(b, Gen4, dmlCases[:32], vw)
})
b.Run("Select (random sample, N=32)", func(b *testing.B) {
- benchmarkPlanner(b, Gen4, selectCases[:32], vschema)
+ benchmarkPlanner(b, Gen4, selectCases[:32], vw)
})
}
func BenchmarkBaselineVsMirrored(b *testing.B) {
+ env := vtenv.NewTestEnv()
baseline := loadSchema(b, "vschemas/mirror_schema.json", true)
baseline.MirrorRules = map[string]*vindexes.MirrorRule{}
- baselineVschema := &vschemawrapper.VSchemaWrapper{
- V: baseline,
- SysVarEnabled: true,
- Version: Gen4,
- Env: vtenv.NewTestEnv(),
- }
+ bvw, err := vschemawrapper.NewVschemaWrapper(env, baseline, TestBuilder)
+ require.NoError(b, err)
mirroredSchema := loadSchema(b, "vschemas/mirror_schema.json", true)
- mirroredVschema := &vschemawrapper.VSchemaWrapper{
- V: mirroredSchema,
- SysVarEnabled: true,
- Version: Gen4,
- Env: vtenv.NewTestEnv(),
- }
+ mvw, err := vschemawrapper.NewVschemaWrapper(env, mirroredSchema, TestBuilder)
+ require.NoError(b, err)
cases := readJSONTests("mirror_cases.json")
b.Run("Baseline", func(b *testing.B) {
- benchmarkPlanner(b, Gen4, cases, baselineVschema)
+ benchmarkPlanner(b, Gen4, cases, bvw)
})
b.Run("Mirrored", func(b *testing.B) {
- benchmarkPlanner(b, Gen4, cases, mirroredVschema)
+ benchmarkPlanner(b, Gen4, cases, mvw)
})
}
-func benchmarkPlanner(b *testing.B, version plancontext.PlannerVersion, testCases []planTest, vschema *vschemawrapper.VSchemaWrapper) {
+func benchmarkPlanner(b *testing.B, version plancontext.PlannerVersion, testCases []PlanTest, vschema *vschemawrapper.VSchemaWrapper) {
b.ReportAllocs()
for n := 0; n < b.N; n++ {
for _, tcase := range testCases {
diff --git a/go/vt/vtgate/planbuilder/plancontext/planning_context.go b/go/vt/vtgate/planbuilder/plancontext/planning_context.go
index 607ca83aa31..016f5c877cf 100644
--- a/go/vt/vtgate/planbuilder/plancontext/planning_context.go
+++ b/go/vt/vtgate/planbuilder/plancontext/planning_context.go
@@ -91,7 +91,7 @@ func CreatePlanningContext(stmt sqlparser.Statement,
version querypb.ExecuteOptions_PlannerVersion,
) (*PlanningContext, error) {
ksName := ""
- if ks, _ := vschema.DefaultKeyspace(); ks != nil {
+ if ks, _ := vschema.SelectedKeyspace(); ks != nil {
ksName = ks.Name
}
diff --git a/go/vt/vtgate/planbuilder/plancontext/planning_context_test.go b/go/vt/vtgate/planbuilder/plancontext/planning_context_test.go
index d7315f376b6..e5e96b0a4be 100644
--- a/go/vt/vtgate/planbuilder/plancontext/planning_context_test.go
+++ b/go/vt/vtgate/planbuilder/plancontext/planning_context_test.go
@@ -201,7 +201,7 @@ func (v *vschema) FindTableOrVindex(tablename sqlparser.TableName) (*vindexes.Ta
panic("implement me")
}
-func (v *vschema) DefaultKeyspace() (*vindexes.Keyspace, error) {
+func (v *vschema) SelectedKeyspace() (*vindexes.Keyspace, error) {
// TODO implement me
panic("implement me")
}
diff --git a/go/vt/vtgate/planbuilder/plancontext/vschema.go b/go/vt/vtgate/planbuilder/plancontext/vschema.go
index 6e92ad0d83b..b4560424718 100644
--- a/go/vt/vtgate/planbuilder/plancontext/vschema.go
+++ b/go/vt/vtgate/planbuilder/plancontext/vschema.go
@@ -27,7 +27,9 @@ type VSchema interface {
FindTable(tablename sqlparser.TableName) (*vindexes.Table, string, topodatapb.TabletType, key.Destination, error)
FindView(name sqlparser.TableName) sqlparser.SelectStatement
FindTableOrVindex(tablename sqlparser.TableName) (*vindexes.Table, vindexes.Vindex, string, topodatapb.TabletType, key.Destination, error)
- DefaultKeyspace() (*vindexes.Keyspace, error)
+
+ // SelectedKeyspace returns the current keyspace if set, otherwise returns an error
+ SelectedKeyspace() (*vindexes.Keyspace, error)
TargetString() string
Destination() key.Destination
TabletType() topodatapb.TabletType
diff --git a/go/vt/vtgate/planbuilder/select.go b/go/vt/vtgate/planbuilder/select.go
index 9cc1c8efe06..409343f2760 100644
--- a/go/vt/vtgate/planbuilder/select.go
+++ b/go/vt/vtgate/planbuilder/select.go
@@ -46,7 +46,7 @@ func gen4SelectStmtPlanner(
}
if p != nil {
used := "dual"
- keyspace, ksErr := vschema.DefaultKeyspace()
+ keyspace, ksErr := vschema.SelectedKeyspace()
if ksErr == nil {
// we are just getting the ks to log the correct table use.
// no need to fail this if we can't find the default keyspace
@@ -101,7 +101,7 @@ func gen4SelectStmtPlanner(
func gen4planSQLCalcFoundRows(vschema plancontext.VSchema, sel *sqlparser.Select, query string, reservedVars *sqlparser.ReservedVars) (*planResult, error) {
ksName := ""
- if ks, _ := vschema.DefaultKeyspace(); ks != nil {
+ if ks, _ := vschema.SelectedKeyspace(); ks != nil {
ksName = ks.Name
}
semTable, err := semantics.Analyze(sel, ksName, vschema)
diff --git a/go/vt/vtgate/planbuilder/show.go b/go/vt/vtgate/planbuilder/show.go
index 82035adaa87..40cf7b2411f 100644
--- a/go/vt/vtgate/planbuilder/show.go
+++ b/go/vt/vtgate/planbuilder/show.go
@@ -676,7 +676,7 @@ func buildVschemaKeyspacesPlan(vschema plancontext.VSchema) (engine.Primitive, e
func buildVschemaTablesPlan(vschema plancontext.VSchema) (engine.Primitive, error) {
vs := vschema.GetVSchema()
- ks, err := vschema.DefaultKeyspace()
+ ks, err := vschema.SelectedKeyspace()
if err != nil {
return nil, err
}
diff --git a/go/vt/vtgate/planbuilder/show_test.go b/go/vt/vtgate/planbuilder/show_test.go
index bfdb9a623a0..c3651aaa1cd 100644
--- a/go/vt/vtgate/planbuilder/show_test.go
+++ b/go/vt/vtgate/planbuilder/show_test.go
@@ -32,10 +32,13 @@ import (
)
func TestBuildDBPlan(t *testing.T) {
- vschema := &vschemawrapper.VSchemaWrapper{
- Keyspace: &vindexes.Keyspace{Name: "main"},
- Env: vtenv.NewTestEnv(),
- }
+ env := vtenv.NewTestEnv()
+ vschema := loadSchema(t, "vschemas/schema.json", true)
+ vw, err := vschemawrapper.NewVschemaWrapper(env, vschema, TestBuilder)
+ require.NoError(t, err)
+
+ vw.Vcursor.SetTarget("main")
+ vw.Keyspace = &vindexes.Keyspace{Name: "main"}
testCases := []struct {
query string
@@ -54,7 +57,7 @@ func TestBuildDBPlan(t *testing.T) {
require.NoError(t, err)
show := parserOut.(*sqlparser.Show)
- primitive, err := buildDBPlan(show.Internal.(*sqlparser.ShowBasic), vschema)
+ primitive, err := buildDBPlan(show.Internal.(*sqlparser.ShowBasic), vw)
require.NoError(t, err)
result, err := primitive.TryExecute(context.Background(), nil, nil, false)
diff --git a/go/vt/vtgate/planbuilder/simplifier_test.go b/go/vt/vtgate/planbuilder/simplifier_test.go
index 305c18896e3..dce21b3e175 100644
--- a/go/vt/vtgate/planbuilder/simplifier_test.go
+++ b/go/vt/vtgate/planbuilder/simplifier_test.go
@@ -38,21 +38,21 @@ func TestSimplifyBuggyQuery(t *testing.T) {
query := "select distinct count(distinct a), count(distinct 4) from user left join unsharded on 0 limit 5"
// select 0 from unsharded union select 0 from `user` union select 0 from unsharded
// select 0 from unsharded union (select 0 from `user` union select 0 from unsharded)
- vschema := &vschemawrapper.VSchemaWrapper{
- V: loadSchema(t, "vschemas/schema.json", true),
- Version: Gen4,
- Env: vtenv.NewTestEnv(),
- }
+ env := vtenv.NewTestEnv()
+ vschema := loadSchema(t, "vschemas/schema.json", true)
+ vw, err := vschemawrapper.NewVschemaWrapper(env, vschema, TestBuilder)
+ require.NoError(t, err)
+
stmt, reserved, err := sqlparser.NewTestParser().Parse2(query)
require.NoError(t, err)
- rewritten, _ := sqlparser.RewriteAST(sqlparser.Clone(stmt), vschema.CurrentDb(), sqlparser.SQLSelectLimitUnset, "", nil, nil, nil)
+ rewritten, _ := sqlparser.RewriteAST(sqlparser.Clone(stmt), vw.CurrentDb(), sqlparser.SQLSelectLimitUnset, "", nil, nil, nil)
reservedVars := sqlparser.NewReservedVars("vtg", reserved)
simplified := simplifier.SimplifyStatement(
stmt.(sqlparser.SelectStatement),
- vschema.CurrentDb(),
- vschema,
- keepSameError(query, reservedVars, vschema, rewritten.BindVarNeeds),
+ vw.CurrentDb(),
+ vw,
+ keepSameError(query, reservedVars, vw, rewritten.BindVarNeeds),
)
fmt.Println(sqlparser.String(simplified))
@@ -61,21 +61,22 @@ func TestSimplifyBuggyQuery(t *testing.T) {
func TestSimplifyPanic(t *testing.T) {
t.Skip("not needed to run")
query := "(select id from unsharded union select id from unsharded_auto) union (select id from unsharded_auto union select name from unsharded)"
- vschema := &vschemawrapper.VSchemaWrapper{
- V: loadSchema(t, "vschemas/schema.json", true),
- Version: Gen4,
- Env: vtenv.NewTestEnv(),
- }
+
+ env := vtenv.NewTestEnv()
+ vschema := loadSchema(t, "vschemas/schema.json", true)
+ vw, err := vschemawrapper.NewVschemaWrapper(env, vschema, TestBuilder)
+ require.NoError(t, err)
+
stmt, reserved, err := sqlparser.NewTestParser().Parse2(query)
require.NoError(t, err)
- rewritten, _ := sqlparser.RewriteAST(sqlparser.Clone(stmt), vschema.CurrentDb(), sqlparser.SQLSelectLimitUnset, "", nil, nil, nil)
+ rewritten, _ := sqlparser.RewriteAST(sqlparser.Clone(stmt), vw.CurrentDb(), sqlparser.SQLSelectLimitUnset, "", nil, nil, nil)
reservedVars := sqlparser.NewReservedVars("vtg", reserved)
simplified := simplifier.SimplifyStatement(
stmt.(sqlparser.SelectStatement),
- vschema.CurrentDb(),
- vschema,
- keepPanicking(query, reservedVars, vschema, rewritten.BindVarNeeds),
+ vw.CurrentDb(),
+ vw,
+ keepPanicking(query, reservedVars, vw, rewritten.BindVarNeeds),
)
fmt.Println(sqlparser.String(simplified))
@@ -83,11 +84,11 @@ func TestSimplifyPanic(t *testing.T) {
func TestUnsupportedFile(t *testing.T) {
t.Skip("run manually to see if any queries can be simplified")
- vschema := &vschemawrapper.VSchemaWrapper{
- V: loadSchema(t, "vschemas/schema.json", true),
- Version: Gen4,
- Env: vtenv.NewTestEnv(),
- }
+ env := vtenv.NewTestEnv()
+ vschema := loadSchema(t, "vschemas/schema.json", true)
+ vw, err := vschemawrapper.NewVschemaWrapper(env, vschema, TestBuilder)
+ require.NoError(t, err)
+
fmt.Println(vschema)
for _, tcase := range readJSONTests("unsupported_cases.txt") {
t.Run(tcase.Query, func(t *testing.T) {
@@ -99,11 +100,10 @@ func TestUnsupportedFile(t *testing.T) {
t.Skip()
return
}
- rewritten, err := sqlparser.RewriteAST(stmt, vschema.CurrentDb(), sqlparser.SQLSelectLimitUnset, "", nil, nil, nil)
+ rewritten, err := sqlparser.RewriteAST(stmt, vw.CurrentDb(), sqlparser.SQLSelectLimitUnset, "", nil, nil, nil)
if err != nil {
t.Skip()
}
- vschema.CurrentDb()
reservedVars := sqlparser.NewReservedVars("vtg", reserved)
ast := rewritten.AST
@@ -111,9 +111,9 @@ func TestUnsupportedFile(t *testing.T) {
stmt, _, _ = sqlparser.NewTestParser().Parse2(tcase.Query)
simplified := simplifier.SimplifyStatement(
stmt.(sqlparser.SelectStatement),
- vschema.CurrentDb(),
- vschema,
- keepSameError(tcase.Query, reservedVars, vschema, rewritten.BindVarNeeds),
+ vw.CurrentDb(),
+ vw,
+ keepSameError(tcase.Query, reservedVars, vw, rewritten.BindVarNeeds),
)
if simplified == nil {
@@ -135,12 +135,12 @@ func keepSameError(query string, reservedVars *sqlparser.ReservedVars, vschema *
}
rewritten, _ := sqlparser.RewriteAST(stmt, vschema.CurrentDb(), sqlparser.SQLSelectLimitUnset, "", nil, nil, nil)
ast := rewritten.AST
- _, expected := BuildFromStmt(context.Background(), query, ast, reservedVars, vschema, rewritten.BindVarNeeds, true, true)
+ _, expected := BuildFromStmt(context.Background(), query, ast, reservedVars, vschema, rewritten.BindVarNeeds, staticConfig{})
if expected == nil {
panic("query does not fail to plan")
}
return func(statement sqlparser.SelectStatement) bool {
- _, myErr := BuildFromStmt(context.Background(), query, statement, reservedVars, vschema, needs, true, true)
+ _, myErr := BuildFromStmt(context.Background(), query, statement, reservedVars, vschema, needs, staticConfig{})
if myErr == nil {
return false
}
@@ -162,7 +162,7 @@ func keepPanicking(query string, reservedVars *sqlparser.ReservedVars, vschema *
}
}()
log.Errorf("trying %s", sqlparser.String(statement))
- _, _ = BuildFromStmt(context.Background(), query, statement, reservedVars, vschema, needs, true, true)
+ _, _ = BuildFromStmt(context.Background(), query, statement, reservedVars, vschema, needs, staticConfig{})
log.Errorf("did not panic")
return false
diff --git a/go/vt/vtgate/planbuilder/test_helper.go b/go/vt/vtgate/planbuilder/test_helper.go
new file mode 100644
index 00000000000..25d6b7306d1
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/test_helper.go
@@ -0,0 +1,27 @@
+/*
+Copyright 2024 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package planbuilder
+
+import "encoding/json"
+
+type PlanTest struct {
+ Comment string `json:"comment,omitempty"`
+ Query string `json:"query,omitempty"`
+ Plan json.RawMessage `json:"plan,omitempty"`
+ Skip bool `json:"skip,omitempty"`
+ SkipE2E bool `json:"skip_e2e,omitempty"`
+}
diff --git a/go/vt/vtgate/planbuilder/testdata/aggr_cases.json b/go/vt/vtgate/planbuilder/testdata/aggr_cases.json
index 8b268e367dd..49a03a8f05a 100644
--- a/go/vt/vtgate/planbuilder/testdata/aggr_cases.json
+++ b/go/vt/vtgate/planbuilder/testdata/aggr_cases.json
@@ -940,19 +940,44 @@
"Table": "`user`, user_extra"
},
{
- "OperatorType": "Route",
+ "OperatorType": "VindexLookup",
"Variant": "EqualUnique",
"Keyspace": {
"Name": "user",
"Sharded": true
},
- "FieldQuery": "select music.`name` from music where 1 != 1",
- "Query": "select music.`name` from music where music.id = :user_id",
- "Table": "music",
"Values": [
":user_id"
],
- "Vindex": "music_user_map"
+ "Vindex": "music_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ "::name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.`name` from music where 1 != 1",
+ "Query": "select music.`name` from music where music.id = :user_id",
+ "Table": "music"
+ }
+ ]
}
]
}
@@ -2992,19 +3017,44 @@
]
},
{
- "OperatorType": "Route",
+ "OperatorType": "VindexLookup",
"Variant": "EqualUnique",
"Keyspace": {
"Name": "user",
"Sharded": true
},
- "FieldQuery": "select 1 from music as m where 1 != 1",
- "Query": "select 1 from music as m where m.id = :u2_val2",
- "Table": "music",
"Values": [
":u2_val2"
],
- "Vindex": "music_user_map"
+ "Vindex": "music_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ "::name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from music as m where 1 != 1",
+ "Query": "select 1 from music as m where m.id = :u2_val2",
+ "Table": "music"
+ }
+ ]
}
]
}
diff --git a/go/vt/vtgate/planbuilder/testdata/dml_cases_with_user_as_default.json b/go/vt/vtgate/planbuilder/testdata/dml_cases_with_user_as_default.json
new file mode 100644
index 00000000000..ff66967c2ce
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/dml_cases_with_user_as_default.json
@@ -0,0 +1,24 @@
+[
+ {
+ "comment": "Update reference table from sharded keyspace to unsharded keyspace",
+ "query": "update ambiguous_ref_with_source set done = true where id = 1;",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update ambiguous_ref_with_source set done = true where id = 1;",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "Query": "update ambiguous_ref_with_source set done = true where id = 1",
+ "Table": "ambiguous_ref_with_source"
+ },
+ "TablesUsed": [
+ "main.ambiguous_ref_with_source"
+ ]
+ }
+ }
+]
\ No newline at end of file
diff --git a/go/vt/vtgate/planbuilder/testdata/filter_cases.json b/go/vt/vtgate/planbuilder/testdata/filter_cases.json
index 4194a369bd6..edce4ebd0cb 100644
--- a/go/vt/vtgate/planbuilder/testdata/filter_cases.json
+++ b/go/vt/vtgate/planbuilder/testdata/filter_cases.json
@@ -3404,19 +3404,44 @@
"QueryType": "SELECT",
"Original": "select * from multicolvin where column_b = 1",
"Instructions": {
- "OperatorType": "Route",
+ "OperatorType": "VindexLookup",
"Variant": "EqualUnique",
"Keyspace": {
"Name": "user",
"Sharded": true
},
- "FieldQuery": "select * from multicolvin where 1 != 1",
- "Query": "select * from multicolvin where column_b = 1",
- "Table": "multicolvin",
"Values": [
"1"
],
- "Vindex": "colb_colc_map"
+ "Vindex": "colb_colc_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select colb, keyspace_id from colb_colc_map where 1 != 1",
+ "Query": "select colb, keyspace_id from colb_colc_map where colb in ::__vals",
+ "Table": "colb_colc_map",
+ "Values": [
+ "::colb"
+ ],
+ "Vindex": "hash"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicolvin where 1 != 1",
+ "Query": "select * from multicolvin where column_b = 1",
+ "Table": "multicolvin"
+ }
+ ]
},
"TablesUsed": [
"user.multicolvin"
@@ -3430,19 +3455,44 @@
"QueryType": "SELECT",
"Original": "select * from multicolvin where column_b = 1 and column_c = 2",
"Instructions": {
- "OperatorType": "Route",
+ "OperatorType": "VindexLookup",
"Variant": "EqualUnique",
"Keyspace": {
"Name": "user",
"Sharded": true
},
- "FieldQuery": "select * from multicolvin where 1 != 1",
- "Query": "select * from multicolvin where column_b = 1 and column_c = 2",
- "Table": "multicolvin",
"Values": [
"1"
],
- "Vindex": "colb_colc_map"
+ "Vindex": "colb_colc_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select colb, keyspace_id from colb_colc_map where 1 != 1",
+ "Query": "select colb, keyspace_id from colb_colc_map where colb in ::__vals",
+ "Table": "colb_colc_map",
+ "Values": [
+ "::colb"
+ ],
+ "Vindex": "hash"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicolvin where 1 != 1",
+ "Query": "select * from multicolvin where column_b = 1 and column_c = 2",
+ "Table": "multicolvin"
+ }
+ ]
},
"TablesUsed": [
"user.multicolvin"
@@ -3456,19 +3506,44 @@
"QueryType": "SELECT",
"Original": "select * from multicolvin where column_b = 1 and column_c = 2 and column_a = 3",
"Instructions": {
- "OperatorType": "Route",
+ "OperatorType": "VindexLookup",
"Variant": "EqualUnique",
"Keyspace": {
"Name": "user",
"Sharded": true
},
- "FieldQuery": "select * from multicolvin where 1 != 1",
- "Query": "select * from multicolvin where column_b = 1 and column_c = 2 and column_a = 3",
- "Table": "multicolvin",
"Values": [
"1"
],
- "Vindex": "colb_colc_map"
+ "Vindex": "colb_colc_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select colb, keyspace_id from colb_colc_map where 1 != 1",
+ "Query": "select colb, keyspace_id from colb_colc_map where colb in ::__vals",
+ "Table": "colb_colc_map",
+ "Values": [
+ "::colb"
+ ],
+ "Vindex": "hash"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicolvin where 1 != 1",
+ "Query": "select * from multicolvin where column_b = 1 and column_c = 2 and column_a = 3",
+ "Table": "multicolvin"
+ }
+ ]
},
"TablesUsed": [
"user.multicolvin"
@@ -3482,19 +3557,44 @@
"QueryType": "SELECT",
"Original": "select * from multicolvin where column_a = 3 and column_b = 1",
"Instructions": {
- "OperatorType": "Route",
+ "OperatorType": "VindexLookup",
"Variant": "EqualUnique",
"Keyspace": {
"Name": "user",
"Sharded": true
},
- "FieldQuery": "select * from multicolvin where 1 != 1",
- "Query": "select * from multicolvin where column_a = 3 and column_b = 1",
- "Table": "multicolvin",
"Values": [
"1"
],
- "Vindex": "colb_colc_map"
+ "Vindex": "colb_colc_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select colb, keyspace_id from colb_colc_map where 1 != 1",
+ "Query": "select colb, keyspace_id from colb_colc_map where colb in ::__vals",
+ "Table": "colb_colc_map",
+ "Values": [
+ "::colb"
+ ],
+ "Vindex": "hash"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicolvin where 1 != 1",
+ "Query": "select * from multicolvin where column_a = 3 and column_b = 1",
+ "Table": "multicolvin"
+ }
+ ]
},
"TablesUsed": [
"user.multicolvin"
diff --git a/go/vt/vtgate/planbuilder/testdata/from_cases.json b/go/vt/vtgate/planbuilder/testdata/from_cases.json
index 2e0fe429c1f..bec64fd7b1e 100644
--- a/go/vt/vtgate/planbuilder/testdata/from_cases.json
+++ b/go/vt/vtgate/planbuilder/testdata/from_cases.json
@@ -4709,19 +4709,44 @@
]
},
{
- "OperatorType": "Route",
+ "OperatorType": "VindexLookup",
"Variant": "EqualUnique",
"Keyspace": {
"Name": "user",
"Sharded": true
},
- "FieldQuery": "select 1 from music as m where 1 != 1",
- "Query": "select 1 from music as m where m.user_id = 5 and m.id = 20 and m.col = :u_col /* INT16 */",
- "Table": "music",
"Values": [
"20"
],
- "Vindex": "music_user_map"
+ "Vindex": "music_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ "::name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from music as m where 1 != 1",
+ "Query": "select 1 from music as m where m.user_id = 5 and m.id = 20 and m.col = :u_col /* INT16 */",
+ "Table": "music"
+ }
+ ]
}
]
},
diff --git a/go/vt/vtgate/planbuilder/testdata/memory_sort_cases.json b/go/vt/vtgate/planbuilder/testdata/memory_sort_cases.json
index 060f073a366..a35949cd4c1 100644
--- a/go/vt/vtgate/planbuilder/testdata/memory_sort_cases.json
+++ b/go/vt/vtgate/planbuilder/testdata/memory_sort_cases.json
@@ -318,19 +318,44 @@
"Vindex": "user_index"
},
{
- "OperatorType": "Route",
+ "OperatorType": "VindexLookup",
"Variant": "EqualUnique",
"Keyspace": {
"Name": "user",
"Sharded": true
},
- "FieldQuery": "select music.col3 as c, weight_string(music.col3) from music where 1 != 1",
- "Query": "select music.col3 as c, weight_string(music.col3) from music where music.id = :user_id",
- "Table": "music",
"Values": [
":user_id"
],
- "Vindex": "music_user_map"
+ "Vindex": "music_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ "::name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.col3 as c, weight_string(music.col3) from music where 1 != 1",
+ "Query": "select music.col3 as c, weight_string(music.col3) from music where music.id = :user_id",
+ "Table": "music"
+ }
+ ]
}
]
}
@@ -379,19 +404,44 @@
"Vindex": "user_index"
},
{
- "OperatorType": "Route",
+ "OperatorType": "VindexLookup",
"Variant": "EqualUnique",
"Keyspace": {
"Name": "user",
"Sharded": true
},
- "FieldQuery": "select music.col3, weight_string(music.col3) from music where 1 != 1",
- "Query": "select music.col3, weight_string(music.col3) from music where music.id = :user_id",
- "Table": "music",
"Values": [
":user_id"
],
- "Vindex": "music_user_map"
+ "Vindex": "music_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ "::name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.col3, weight_string(music.col3) from music where 1 != 1",
+ "Query": "select music.col3, weight_string(music.col3) from music where music.id = :user_id",
+ "Table": "music"
+ }
+ ]
}
]
}
diff --git a/go/vt/vtgate/planbuilder/testdata/postprocess_cases.json b/go/vt/vtgate/planbuilder/testdata/postprocess_cases.json
index 36f1472007d..6a8e94c0241 100644
--- a/go/vt/vtgate/planbuilder/testdata/postprocess_cases.json
+++ b/go/vt/vtgate/planbuilder/testdata/postprocess_cases.json
@@ -544,19 +544,44 @@
"Vindex": "user_index"
},
{
- "OperatorType": "Route",
+ "OperatorType": "VindexLookup",
"Variant": "EqualUnique",
"Keyspace": {
"Name": "user",
"Sharded": true
},
- "FieldQuery": "select music.col3 from music where 1 != 1",
- "Query": "select music.col3 from music where music.id = :user_id",
- "Table": "music",
"Values": [
":user_id"
],
- "Vindex": "music_user_map"
+ "Vindex": "music_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ "::name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.col3 from music where 1 != 1",
+ "Query": "select music.col3 from music where music.id = :user_id",
+ "Table": "music"
+ }
+ ]
}
]
},
@@ -597,19 +622,44 @@
"Vindex": "user_index"
},
{
- "OperatorType": "Route",
+ "OperatorType": "VindexLookup",
"Variant": "EqualUnique",
"Keyspace": {
"Name": "user",
"Sharded": true
},
- "FieldQuery": "select music.col3 from music where 1 != 1",
- "Query": "select music.col3 from music where music.id = :user_id",
- "Table": "music",
"Values": [
":user_id"
],
- "Vindex": "music_user_map"
+ "Vindex": "music_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ "::name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.col3 from music where 1 != 1",
+ "Query": "select music.col3 from music where music.id = :user_id",
+ "Table": "music"
+ }
+ ]
}
]
},
@@ -650,19 +700,44 @@
"Vindex": "user_index"
},
{
- "OperatorType": "Route",
+ "OperatorType": "VindexLookup",
"Variant": "EqualUnique",
"Keyspace": {
"Name": "user",
"Sharded": true
},
- "FieldQuery": "select music.col3 from music where 1 != 1",
- "Query": "select music.col3 from music where music.id = :user_id",
- "Table": "music",
"Values": [
":user_id"
],
- "Vindex": "music_user_map"
+ "Vindex": "music_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ "::name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.col3 from music where 1 != 1",
+ "Query": "select music.col3 from music where music.id = :user_id",
+ "Table": "music"
+ }
+ ]
}
]
},
@@ -770,19 +845,44 @@
"Vindex": "user_index"
},
{
- "OperatorType": "Route",
+ "OperatorType": "VindexLookup",
"Variant": "EqualUnique",
"Keyspace": {
"Name": "user",
"Sharded": true
},
- "FieldQuery": "select music.col3 from music where 1 != 1",
- "Query": "select music.col3 from music where music.id = :user_id",
- "Table": "music",
"Values": [
":user_id"
],
- "Vindex": "music_user_map"
+ "Vindex": "music_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ "::name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.col3 from music where 1 != 1",
+ "Query": "select music.col3 from music where music.id = :user_id",
+ "Table": "music"
+ }
+ ]
}
]
},
diff --git a/go/vt/vtgate/planbuilder/testdata/schemas/main.sql b/go/vt/vtgate/planbuilder/testdata/schemas/main.sql
new file mode 100644
index 00000000000..8c15b99218c
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/schemas/main.sql
@@ -0,0 +1,12 @@
+CREATE TABLE `unsharded` (
+ `id` INT NOT NULL PRIMARY KEY,
+ `col1` VARCHAR(255) DEFAULT NULL,
+ `col2` VARCHAR(255) DEFAULT NULL,
+ `name` VARCHAR(255) DEFAULT NULL
+);
+
+CREATE TABLE `unsharded_auto` (
+ `id` INT NOT NULL PRIMARY KEY,
+ `col1` VARCHAR(255) DEFAULT NULL,
+ `col2` VARCHAR(255) DEFAULT NULL
+);
\ No newline at end of file
diff --git a/go/vt/vtgate/planbuilder/testdata/schemas/user.sql b/go/vt/vtgate/planbuilder/testdata/schemas/user.sql
new file mode 100644
index 00000000000..55f4078557a
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/schemas/user.sql
@@ -0,0 +1,100 @@
+CREATE TABLE user
+(
+ id INT PRIMARY KEY,
+ col BIGINT,
+ predef1 VARCHAR(255),
+ predef2 VARCHAR(255),
+ textcol1 VARCHAR(255),
+ intcol BIGINT,
+ textcol2 VARCHAR(255)
+);
+
+CREATE TABLE user_metadata
+(
+ user_id INT,
+ email VARCHAR(255),
+ address VARCHAR(255),
+ md5 VARCHAR(255),
+ non_planable VARCHAR(255),
+ PRIMARY KEY (user_id)
+);
+
+CREATE TABLE music
+(
+ user_id INT,
+ id INT,
+ PRIMARY KEY (user_id)
+);
+
+CREATE TABLE samecolvin
+(
+ col VARCHAR(255),
+ PRIMARY KEY (col)
+);
+
+CREATE TABLE multicolvin
+(
+ kid INT,
+ column_a VARCHAR(255),
+ column_b VARCHAR(255),
+ column_c VARCHAR(255),
+ PRIMARY KEY (kid)
+);
+
+CREATE TABLE customer
+(
+ id INT,
+ email VARCHAR(255),
+ phone VARCHAR(255),
+ PRIMARY KEY (id)
+);
+
+CREATE TABLE multicol_tbl
+(
+ cola VARCHAR(255),
+ colb VARCHAR(255),
+ colc VARCHAR(255),
+ name VARCHAR(255),
+ PRIMARY KEY (cola, colb)
+);
+
+CREATE TABLE mixed_tbl
+(
+ shard_key VARCHAR(255),
+ lkp_key VARCHAR(255),
+ PRIMARY KEY (shard_key)
+);
+
+CREATE TABLE pin_test
+(
+ id INT PRIMARY KEY
+);
+
+CREATE TABLE cfc_vindex_col
+(
+ c1 VARCHAR(255),
+ c2 VARCHAR(255),
+ PRIMARY KEY (c1)
+);
+
+CREATE TABLE unq_lkp_idx
+(
+ unq_key INT PRIMARY KEY,
+ keyspace_id VARCHAR(255)
+);
+
+CREATE TABLE t1
+(
+ c1 INT,
+ c2 INT,
+ c3 INT,
+ PRIMARY KEY (c1)
+);
+
+CREATE TABLE authoritative
+(
+ user_id bigint NOT NULL,
+ col1 VARCHAR(255),
+ col2 bigint,
+ PRIMARY KEY (user_id)
+) ENGINE=InnoDB;
\ No newline at end of file
diff --git a/go/vt/vtgate/planbuilder/testdata/select_cases.json b/go/vt/vtgate/planbuilder/testdata/select_cases.json
index ab69df2cc47..eac13216380 100644
--- a/go/vt/vtgate/planbuilder/testdata/select_cases.json
+++ b/go/vt/vtgate/planbuilder/testdata/select_cases.json
@@ -92,7 +92,8 @@
"user.user",
"user.user_metadata"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "select with timeout directive sets QueryTimeout in the route",
@@ -197,7 +198,8 @@
"TablesUsed": [
"main.unsharded"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "select with partial scatter directive",
@@ -402,7 +404,8 @@
{
"comment": "test table lookup failure for authoritative code path",
"query": "select a.* from authoritative",
- "plan": "Unknown table 'a'"
+ "plan": "Unknown table 'a'",
+ "skip_e2e": true
},
{
"comment": "select * from qualified authoritative table",
@@ -470,7 +473,8 @@
"user.authoritative",
"user.user"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "auto-resolve anonymous columns for simple route",
@@ -493,7 +497,8 @@
"user.user",
"user.user_extra"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "json_arrayagg in single sharded query",
@@ -519,7 +524,8 @@
"TablesUsed": [
"user.user"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "json_objectagg in single sharded query",
@@ -545,17 +551,20 @@
"TablesUsed": [
"user.user"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "unsupported json aggregation expressions in scatter query",
"query": "select count(1) from user where cola = 'abc' group by n_id having json_arrayagg(a_id) = '[]'",
- "plan": "VT12001: unsupported: in scatter query: aggregation function 'json_arrayagg(a_id)'"
+ "plan": "VT12001: unsupported: in scatter query: aggregation function 'json_arrayagg(a_id)'",
+ "skip_e2e": true
},
{
"comment": "Cannot auto-resolve for cross-shard joins",
"query": "select col from user join user_extra",
- "plan": "Column 'col' in field list is ambiguous"
+ "plan": "Column 'col' in field list is ambiguous",
+ "skip_e2e": true
},
{
"comment": "Auto-resolve should work if unique vindex columns are referenced",
@@ -597,7 +606,8 @@
"user.user",
"user.user_extra"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "database calls should be substituted",
@@ -619,7 +629,8 @@
"TablesUsed": [
"main.dual"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "last_insert_id for unsharded route",
@@ -641,7 +652,8 @@
"TablesUsed": [
"main.unsharded"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "select from dual on unqualified keyspace",
@@ -694,7 +706,8 @@
{
"comment": "prefixing dual with a keyspace should not work",
"query": "select 1 from user.dual",
- "plan": "table dual not found"
+ "plan": "table dual not found",
+ "skip_e2e": true
},
{
"comment": "RHS route referenced",
@@ -736,7 +749,8 @@
"user.user",
"user.user_extra"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "Both routes referenced",
@@ -778,7 +792,8 @@
"user.user",
"user.user_extra"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "Expression with single-route reference",
@@ -820,7 +835,8 @@
"user.user",
"user.user_extra"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "subquery with an aggregation in order by that can be merged into a single route",
@@ -847,7 +863,8 @@
"user.user",
"user.user_extra"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "Jumbled references",
@@ -889,7 +906,8 @@
"user.user",
"user.user_extra"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "Comments",
@@ -931,7 +949,8 @@
"user.user",
"user.user_extra"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "for update",
@@ -973,7 +992,8 @@
"user.user",
"user.user_extra"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "Field query should work for joins select bind vars",
@@ -1018,7 +1038,8 @@
"main.unsharded",
"user.user"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "Case preservation",
@@ -1060,12 +1081,14 @@
"user.user",
"user.user_extra"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "syntax error",
"query": "the quick brown fox",
- "plan": "syntax error at position 4 near 'the'"
+ "plan": "syntax error at position 4 near 'the'",
+ "skip_e2e": true
},
{
"comment": "Hex number is not treated as a simple value",
@@ -1113,7 +1136,8 @@
"TablesUsed": [
"user.user"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "Selection but make the planner explicitly use a vindex",
@@ -1164,12 +1188,14 @@
"TablesUsed": [
"user.user"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "Vindex hint on a non-existing vindex",
"query": "select * from user use vindex (does_not_exist) where id = 1",
- "plan": "VT09021: Vindex 'does_not_exist' does not exist in table 'user.user'"
+ "plan": "VT09021: Vindex 'does_not_exist' does not exist in table 'user.user'",
+ "skip_e2e": true
},
{
"comment": "sharded limit offset",
@@ -1231,7 +1257,8 @@
"TablesUsed": [
"user.music"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "Sharding Key Condition in Parenthesis",
@@ -1257,7 +1284,8 @@
"TablesUsed": [
"user.user"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "Multiple parenthesized expressions",
@@ -1283,7 +1311,8 @@
"TablesUsed": [
"user.user"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "Multiple parenthesized expressions",
@@ -1309,7 +1338,8 @@
"TablesUsed": [
"user.user"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "Column Aliasing with Table.Column",
@@ -1387,7 +1417,8 @@
"TablesUsed": [
"user.user"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "Column as boolean-ish",
@@ -1413,7 +1444,8 @@
"TablesUsed": [
"user.user"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "PK as fake boolean, and column as boolean-ish",
@@ -1439,7 +1471,8 @@
"TablesUsed": [
"user.user"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "top level subquery in select",
@@ -1484,7 +1517,8 @@
"main.unsharded",
"user.user"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "sub-expression subquery in select",
@@ -1529,7 +1563,8 @@
"main.unsharded",
"user.user"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "select * from derived table expands specific columns",
@@ -1571,17 +1606,20 @@
"user.user",
"user.user_extra"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "duplicate columns not allowed in derived table",
"query": "select * from (select user.id, user_extra.id from user join user_extra) as t",
- "plan": "Duplicate column name 'id'"
+ "plan": "Duplicate column name 'id'",
+ "skip_e2e": true
},
{
"comment": "non-existent symbol in cross-shard derived table",
"query": "select t.col from (select user.id from user join user_extra) as t",
- "plan": "column 't.col' not found"
+ "plan": "column 't.col' not found",
+ "skip_e2e": true
},
{
"comment": "union with the same target shard",
@@ -1608,7 +1646,8 @@
"user.music",
"user.user"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "union with the same target shard last_insert_id",
@@ -1635,7 +1674,8 @@
"user.music",
"user.user"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "unsharded union in derived table",
@@ -1793,7 +1833,8 @@
"TablesUsed": [
"main.unsharded"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "routing table on music",
@@ -1815,7 +1856,8 @@
"TablesUsed": [
"user.music"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "testing SingleRow Projection",
@@ -1962,7 +2004,8 @@
"main.unsharded_a",
"user.user"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "Complex expression in a subquery used in NOT IN clause of an aggregate query",
@@ -2015,7 +2058,8 @@
"main.unsharded_a",
"user.user"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "testing SingleRow Projection with arithmetics",
@@ -2218,12 +2262,14 @@
{
"comment": "sql_calc_found_rows in sub queries",
"query": "select * from music where user_id IN (select sql_calc_found_rows * from music limit 10)",
- "plan": "Incorrect usage/placement of 'SQL_CALC_FOUND_ROWS'"
+ "plan": "Incorrect usage/placement of 'SQL_CALC_FOUND_ROWS'",
+ "skip_e2e": true
},
{
"comment": "sql_calc_found_rows in derived table",
"query": "select sql_calc_found_rows * from (select sql_calc_found_rows * from music limit 10) t limit 1",
- "plan": "Incorrect usage/placement of 'SQL_CALC_FOUND_ROWS'"
+ "plan": "Incorrect usage/placement of 'SQL_CALC_FOUND_ROWS'",
+ "skip_e2e": true
},
{
"comment": "select from unsharded keyspace into dumpfile",
@@ -2245,7 +2291,8 @@
"TablesUsed": [
"main.unsharded"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "select from unsharded keyspace into outfile",
@@ -2267,7 +2314,8 @@
"TablesUsed": [
"main.unsharded"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "select from unsharded keyspace into outfile s3",
@@ -2289,7 +2337,8 @@
"TablesUsed": [
"main.unsharded"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "left join with a dual table on left - merge-able",
@@ -2500,17 +2549,20 @@
{
"comment": "Union after into outfile is incorrect",
"query": "select id from user into outfile 'out_file_name' union all select id from music",
- "plan": "syntax error at position 55 near 'union'"
+ "plan": "syntax error at position 55 near 'union'",
+ "skip_e2e": true
},
{
"comment": "Into outfile s3 in derived table is incorrect",
"query": "select id from (select id from user into outfile s3 'inner_outfile') as t2",
- "plan": "syntax error at position 41 near 'into'"
+ "plan": "syntax error at position 41 near 'into'",
+ "skip_e2e": true
},
{
"comment": "Into outfile s3 in derived table with union incorrect",
"query": "select id from (select id from user into outfile s3 'inner_outfile' union select 1) as t2",
- "plan": "syntax error at position 41 near 'into'"
+ "plan": "syntax error at position 41 near 'into'",
+ "skip_e2e": true
},
{
"comment": "select (select u.id from user as u where u.id = 1), a.id from user as a where a.id = 1",
@@ -2579,7 +2631,8 @@
"user.user",
"user.user_extra"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "((((select 1))))",
@@ -2624,7 +2677,8 @@
"main.dual",
"user.user"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "subquery in select expression of derived table",
@@ -2694,7 +2748,8 @@
"user.user",
"user.user_extra"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "ORDER BY subquery",
@@ -2764,7 +2819,8 @@
"user.user",
"user.user_extra"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "plan test for a natural character set string",
@@ -2831,7 +2887,8 @@
"user.user",
"user.user_extra"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "Straight Join ensures specific ordering of joins",
@@ -2876,7 +2933,8 @@
"user.user",
"user.user_extra"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "Dual query should be handled on the vtgate even with a LIMIT",
@@ -2950,7 +3008,8 @@
"user.user",
"user.user_extra"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "Straight Join preserved in MySQL query",
@@ -2973,7 +3032,8 @@
"user.user",
"user.user_extra"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "correlated subquery in exists clause",
@@ -3031,7 +3091,8 @@
"user.user",
"user.user_extra"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "correlated subquery in exists clause with an order by",
@@ -3090,7 +3151,8 @@
"user.user",
"user.user_extra"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "correlated subquery having dependencies on two tables",
@@ -3163,7 +3225,8 @@
"user.user",
"user.user_extra"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "correlated subquery using a column twice",
@@ -3220,7 +3283,8 @@
"user.user",
"user.user_extra"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "correlated subquery that is dependent on one side of a join, fully mergeable",
@@ -3271,7 +3335,8 @@
"user.user",
"user.user_extra"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "union as a derived table",
@@ -3360,7 +3425,8 @@
"user.user",
"user.user_extra"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "mergeable derived table with order by and limit",
@@ -3382,7 +3448,8 @@
"TablesUsed": [
"main.unsharded"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "mergeable derived table with group by and limit",
@@ -3404,7 +3471,8 @@
"TablesUsed": [
"main.unsharded"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "select user.id, trim(leading 'x' from user.name) from user",
@@ -3426,7 +3494,8 @@
"TablesUsed": [
"user.user"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "json utility functions",
@@ -3448,7 +3517,8 @@
"TablesUsed": [
"user.user"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "dual query with exists clause",
@@ -3546,7 +3616,8 @@
"user.user",
"user.user_extra"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "yeah, it does not make sense, but it's valid",
@@ -3639,7 +3710,8 @@
"TablesUsed": [
"user.user"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "groupe by with non aggregated columns and table alias",
@@ -3661,7 +3733,8 @@
"TablesUsed": [
"user.user"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "Functions that return JSON value attributes",
@@ -3866,7 +3939,8 @@
"TablesUsed": [
"user.user"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "gtid functions",
@@ -3934,7 +4008,8 @@
"user.user_extra",
"user.user_metadata"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "Join across multiple tables, with conditions on different vindexes, but mergeable through join predicates",
@@ -3962,7 +4037,8 @@
"user.music_extra",
"user.user"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "SQL_CALC_FOUND_ROWS with vindex lookup",
@@ -4073,7 +4149,8 @@
"TablesUsed": [
"user.user"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "`None` route being merged with another route via join predicate on Vindex columns",
@@ -4122,7 +4199,8 @@
"TablesUsed": [
"user.music"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "Subquery with `IN` condition using columns with matching lookup vindexes",
@@ -4200,7 +4278,8 @@
"TablesUsed": [
"user.music"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "Subquery with `IN` condition using columns with matching lookup vindexes",
@@ -4314,7 +4393,8 @@
"TablesUsed": [
"user.music"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "Mergeable scatter subquery with `GROUP BY` on unique vindex column",
@@ -4336,7 +4416,8 @@
"TablesUsed": [
"user.music"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "Unmergeable scatter subquery with `GROUP BY` on-non vindex column",
@@ -4375,26 +4456,52 @@
},
{
"InputName": "Outer",
- "OperatorType": "Route",
+ "OperatorType": "VindexLookup",
"Variant": "IN",
"Keyspace": {
"Name": "user",
"Sharded": true
},
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where :__sq_has_values and music.id in ::__vals",
- "Table": "music",
"Values": [
"::__sq1"
],
- "Vindex": "music_user_map"
+ "Vindex": "music_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ "::name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where :__sq_has_values and music.id in ::__vals",
+ "Table": "music"
+ }
+ ]
}
]
},
"TablesUsed": [
"user.music"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "Unmergeable scatter subquery with LIMIT",
@@ -4430,26 +4537,52 @@
},
{
"InputName": "Outer",
- "OperatorType": "Route",
+ "OperatorType": "VindexLookup",
"Variant": "IN",
"Keyspace": {
"Name": "user",
"Sharded": true
},
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where :__sq_has_values and music.id in ::__vals",
- "Table": "music",
"Values": [
"::__sq1"
],
- "Vindex": "music_user_map"
+ "Vindex": "music_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ "::name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where :__sq_has_values and music.id in ::__vals",
+ "Table": "music"
+ }
+ ]
}
]
},
"TablesUsed": [
"user.music"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "Mergeable subquery with `MAX` aggregate and grouped by unique vindex",
@@ -4483,26 +4616,52 @@
},
{
"InputName": "Outer",
- "OperatorType": "Route",
+ "OperatorType": "VindexLookup",
"Variant": "IN",
"Keyspace": {
"Name": "user",
"Sharded": true
},
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where :__sq_has_values and music.id in ::__vals",
- "Table": "music",
"Values": [
"::__sq1"
],
- "Vindex": "music_user_map"
+ "Vindex": "music_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ "::name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where :__sq_has_values and music.id in ::__vals",
+ "Table": "music"
+ }
+ ]
}
]
},
"TablesUsed": [
"user.music"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "Unmergeable subquery with `MAX` aggregate",
@@ -4543,19 +4702,44 @@
},
{
"InputName": "Outer",
- "OperatorType": "Route",
+ "OperatorType": "VindexLookup",
"Variant": "IN",
"Keyspace": {
"Name": "user",
"Sharded": true
},
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where :__sq_has_values and music.id in ::__vals",
- "Table": "music",
"Values": [
"::__sq1"
],
- "Vindex": "music_user_map"
+ "Vindex": "music_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ "::name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where :__sq_has_values and music.id in ::__vals",
+ "Table": "music"
+ }
+ ]
}
]
},
@@ -4596,19 +4780,44 @@
},
{
"InputName": "Outer",
- "OperatorType": "Route",
+ "OperatorType": "VindexLookup",
"Variant": "IN",
"Keyspace": {
"Name": "user",
"Sharded": true
},
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where :__sq_has_values and music.id in ::__vals",
- "Table": "music",
"Values": [
"::__sq1"
],
- "Vindex": "music_user_map"
+ "Vindex": "music_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ "::name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where :__sq_has_values and music.id in ::__vals",
+ "Table": "music"
+ }
+ ]
}
]
},
@@ -4649,26 +4858,52 @@
},
{
"InputName": "Outer",
- "OperatorType": "Route",
+ "OperatorType": "VindexLookup",
"Variant": "IN",
"Keyspace": {
"Name": "user",
"Sharded": true
},
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where :__sq_has_values and music.id in ::__vals",
- "Table": "music",
"Values": [
"::__sq1"
],
- "Vindex": "music_user_map"
+ "Vindex": "music_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ "::name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where :__sq_has_values and music.id in ::__vals",
+ "Table": "music"
+ }
+ ]
}
]
},
"TablesUsed": [
"user.music"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "Mergeable subquery with multiple levels of derived statements",
@@ -4760,26 +4995,52 @@
},
{
"InputName": "Outer",
- "OperatorType": "Route",
+ "OperatorType": "VindexLookup",
"Variant": "IN",
"Keyspace": {
"Name": "user",
"Sharded": true
},
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where :__sq_has_values and music.id in ::__vals",
- "Table": "music",
"Values": [
"::__sq1"
],
- "Vindex": "music_user_map"
+ "Vindex": "music_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ "::name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where :__sq_has_values and music.id in ::__vals",
+ "Table": "music"
+ }
+ ]
}
]
},
"TablesUsed": [
"user.music"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "Unmergeable subquery with multiple levels of derived statements",
@@ -4815,26 +5076,52 @@
},
{
"InputName": "Outer",
- "OperatorType": "Route",
+ "OperatorType": "VindexLookup",
"Variant": "IN",
"Keyspace": {
"Name": "user",
"Sharded": true
},
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where :__sq_has_values and music.id in ::__vals",
- "Table": "music",
"Values": [
"::__sq1"
],
- "Vindex": "music_user_map"
+ "Vindex": "music_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ "::name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where :__sq_has_values and music.id in ::__vals",
+ "Table": "music"
+ }
+ ]
}
]
},
"TablesUsed": [
"user.music"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "`None` subquery as top level predicate - outer query changes from `Scatter` to `None` on merge",
@@ -5033,7 +5320,8 @@
"user.user",
"user.user_extra"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "select user.a, t.b from user join (select id, count(*) b, req from user_extra group by req, id) as t on user.id = t.id",
@@ -5097,7 +5385,8 @@
"user.user",
"user.user_extra"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "SELECT music.id FROM (SELECT MAX(id) as maxt FROM music WHERE music.user_id = 5) other JOIN music ON other.maxt = music.id",
@@ -5220,7 +5509,8 @@
"main.dual",
"main.unsharded_a"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "subquery having join table on clause, using column reference of outer select table",
@@ -5269,7 +5559,8 @@
"main.unsharded",
"main.unsharded_a"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "ALL modifier on unsharded table works well",
@@ -5292,7 +5583,8 @@
"main.unsharded",
"main.unsharded_a"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "allow last_insert_id with argument",
@@ -5337,7 +5629,8 @@
"user.music_extra",
"user.user"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "Query with non-plannable lookup vindex",
@@ -5363,7 +5656,8 @@
"TablesUsed": [
"user.user_metadata"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "join query with lookup and join on different vindex column",
@@ -5415,7 +5709,8 @@
"user.user",
"user.user_metadata"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "pick email as vindex lookup",
@@ -5425,7 +5720,7 @@
"Original": "select * from customer where email = 'a@mail.com'",
"Instructions": {
"OperatorType": "VindexLookup",
- "Variant": "Equal",
+ "Variant": "EqualUnique",
"Keyspace": {
"Name": "user",
"Sharded": true
@@ -5510,7 +5805,8 @@
"TablesUsed": [
"user.customer"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "email vindex is costly than phone vindex - but phone vindex is backfiling hence ignored",
@@ -5520,7 +5816,7 @@
"Original": "select * from customer where email = 'a@mail.com' and phone = 123456",
"Instructions": {
"OperatorType": "VindexLookup",
- "Variant": "Equal",
+ "Variant": "EqualUnique",
"Keyspace": {
"Name": "user",
"Sharded": true
@@ -5571,7 +5867,7 @@
"Original": "select * from customer where phone = 123456 and email = 'a@mail.com'",
"Instructions": {
"OperatorType": "VindexLookup",
- "Variant": "Equal",
+ "Variant": "EqualUnique",
"Keyspace": {
"Name": "user",
"Sharded": true
@@ -5634,7 +5930,8 @@
"TablesUsed": [
"user.samecolvin"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "column with qualifier is correctly used",
@@ -5677,7 +5974,8 @@
"user.user",
"user.user_extra"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "Derived tables going to a single shard still need to expand derived table columns",
@@ -5722,7 +6020,8 @@
"main.unsharded",
"user.user"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "column name aliases in outer join queries",
@@ -5777,7 +6076,8 @@
"user.user",
"user.user_extra"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "Over clause works for unsharded tables",
@@ -5799,7 +6099,8 @@
"TablesUsed": [
"main.unsharded_a"
]
- }
+ },
+ "skip_e2e": true
},
{
"comment": "join with derived table with alias and join condition - merge into route",
diff --git a/go/vt/vtgate/planbuilder/testdata/union_cases.json b/go/vt/vtgate/planbuilder/testdata/union_cases.json
index 7feabb0a698..2927c1c6093 100644
--- a/go/vt/vtgate/planbuilder/testdata/union_cases.json
+++ b/go/vt/vtgate/planbuilder/testdata/union_cases.json
@@ -447,34 +447,84 @@
"OperatorType": "Concatenate",
"Inputs": [
{
- "OperatorType": "Route",
+ "OperatorType": "VindexLookup",
"Variant": "EqualUnique",
"Keyspace": {
"Name": "user",
"Sharded": true
},
- "FieldQuery": "select 1 from music where 1 != 1",
- "Query": "select distinct 1 from music where id = 1",
- "Table": "music",
"Values": [
"1"
],
- "Vindex": "music_user_map"
+ "Vindex": "music_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ "::name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from music where 1 != 1",
+ "Query": "select distinct 1 from music where id = 1",
+ "Table": "music"
+ }
+ ]
},
{
- "OperatorType": "Route",
+ "OperatorType": "VindexLookup",
"Variant": "EqualUnique",
"Keyspace": {
"Name": "user",
"Sharded": true
},
- "FieldQuery": "select 1 from music where 1 != 1",
- "Query": "select distinct 1 from music where id = 2",
- "Table": "music",
"Values": [
"2"
],
- "Vindex": "music_user_map"
+ "Vindex": "music_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ "::name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from music where 1 != 1",
+ "Query": "select distinct 1 from music where id = 2",
+ "Table": "music"
+ }
+ ]
}
]
}
diff --git a/go/vt/vtgate/planbuilder/testdata/vschemas/schema.json b/go/vt/vtgate/planbuilder/testdata/vschemas/schema.json
index 4fe275f2398..a5de9d3697e 100644
--- a/go/vt/vtgate/planbuilder/testdata/vschemas/schema.json
+++ b/go/vt/vtgate/planbuilder/testdata/vschemas/schema.json
@@ -58,34 +58,52 @@
"sharded": true,
"vindexes": {
"user_index": {
- "type": "hash_test",
+ "type": "hash",
"owner": "user"
},
"kid_index": {
- "type": "hash_test",
+ "type": "hash",
"owner": "multicolvin"
},
+ "hash": {
+ "type": "hash"
+ },
"user_md5_index": {
"type": "unicode_loose_md5"
},
"music_user_map": {
- "type": "lookup_test",
- "owner": "music"
+ "type": "lookup_unique",
+ "owner": "music",
+ "params": {
+ "table": "name_user_vdx",
+ "from": "name",
+ "to": "keyspace_id"
+ }
},
"cola_map": {
- "type": "lookup_test",
- "owner": "multicolvin"
+ "type": "lookup_unique",
+ "owner": "multicolvin",
+ "params": {
+ "table": "cola_map",
+ "from": "cola",
+ "to": "keyspace_id"
+ }
},
"colb_colc_map": {
- "type": "lookup_test",
- "owner": "multicolvin"
+ "type": "lookup_unique",
+ "owner": "multicolvin",
+ "params": {
+ "table": "colb_colc_map",
+ "from": "colb,colc",
+ "to": "keyspace_id"
+ }
},
"cola_kid_map": {
- "type": "lookup_test",
+ "type": "lookup_unique",
"owner": "overlap_vindex"
},
"name_user_map": {
- "type": "name_lkp_test",
+ "type": "lookup",
"owner": "user",
"params": {
"table": "name_user_vdx",
@@ -94,42 +112,56 @@
}
},
"email_user_map": {
- "type": "lookup_test",
+ "type": "lookup_unique",
"owner": "user_metadata"
},
"address_user_map": {
- "type": "lookup_test",
+ "type": "lookup_unique",
"owner": "user_metadata"
},
"costly_map": {
- "type": "costly",
- "owner": "user"
+ "type": "lookup_cost",
+ "owner": "user",
+ "params": {
+ "table": "costly_map",
+ "from": "costly",
+ "to": "keyspace_id",
+ "cost": "100"
+ }
},
"hash_dup": {
- "type": "hash_test",
+ "type": "hash",
"owner": "user"
},
"vindex1": {
- "type": "hash_test",
+ "type": "hash",
"owner": "samecolvin"
},
"vindex2": {
- "type": "lookup_test",
+ "type": "lookup_unique",
"owner": "samecolvin"
},
"cfc": {
"type": "cfc"
},
"multicolIdx": {
- "type": "multiCol_test"
+ "type": "multicol",
+ "params": {
+ "column_count": "2"
+ }
},
"colc_map": {
- "type": "lookup_test",
+ "type": "lookup_unique",
"owner": "multicol_tbl"
},
"name_muticoltbl_map": {
- "type": "name_lkp_test",
- "owner": "multicol_tbl"
+ "type": "lookup",
+ "owner": "multicol_tbl",
+ "params": {
+ "table": "name_user_vdx",
+ "from": "name",
+ "to": "keyspace_id"
+ }
},
"non_planable_user_map": {
"type": "lookup_unicodeloosemd5_hash",
@@ -141,7 +173,7 @@
"owner": "user_metadata"
},
"lkp_shard_map": {
- "type": "name_lkp_test",
+ "type": "lookup_unique",
"owner": "mixed_tbl",
"params": {
"table": "lkp_shard_vdx",
@@ -153,18 +185,18 @@
"type": "xxhash"
},
"unq_lkp_bf_vdx": {
- "type": "unq_lkp_test",
+ "type": "lookup_unique",
"owner": "customer",
"params": {
"table": "unq_lkp_idx",
- "from": " ",
+ "from": "unq_key",
"to": "keyspace_id",
"cost": "100",
"write_only": "true"
}
},
"unq_lkp_vdx": {
- "type": "unq_lkp_test",
+ "type": "lookup_unique",
"owner": "customer",
"params": {
"table": "unq_lkp_idx",
@@ -174,11 +206,11 @@
}
},
"lkp_bf_vdx": {
- "type": "name_lkp_test",
+ "type": "lookup_unique",
"owner": "customer",
"params": {
"table": "lkp_shard_vdx",
- "from": " ",
+ "from": "unq_key",
"to": "keyspace_id",
"write_only": "true"
}
@@ -352,6 +384,22 @@
}
]
},
+ "cola_map": {
+ "column_vindexes": [
+ {
+ "column": "cola",
+ "name": "hash"
+ }
+ ]
+ },
+ "colb_colc_map": {
+ "column_vindexes": [
+ {
+ "column": "colb",
+ "name": "hash"
+ }
+ ]
+ },
"overlap_vindex": {
"column_vindexes": [
{
@@ -462,6 +510,14 @@
}
]
},
+ "costly_map": {
+ "column_vindexes": [
+ {
+ "column": "name",
+ "name": "user_md5_index"
+ }
+ ]
+ },
"mixed_tbl": {
"column_vindexes": [
{
@@ -641,7 +697,10 @@
"type": "hash_test"
},
"multicolIdx": {
- "type": "multiCol_test"
+ "type": "multicol",
+ "params": {
+ "column_count": "3"
+ }
}
},
"tables": {
diff --git a/go/vt/vtgate/planbuilder/vexplain.go b/go/vt/vtgate/planbuilder/vexplain.go
index f66af7bfc33..7aed1e48884 100644
--- a/go/vt/vtgate/planbuilder/vexplain.go
+++ b/go/vt/vtgate/planbuilder/vexplain.go
@@ -26,6 +26,7 @@ import (
vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vterrors"
+ "vitess.io/vitess/go/vt/vtgate/dynamicconfig"
"vitess.io/vitess/go/vt/vtgate/engine"
"vitess.io/vitess/go/vt/vtgate/planbuilder/operators"
"vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
@@ -37,15 +38,15 @@ func buildVExplainPlan(
vexplainStmt *sqlparser.VExplainStmt,
reservedVars *sqlparser.ReservedVars,
vschema plancontext.VSchema,
- enableOnlineDDL, enableDirectDDL bool,
+ cfg dynamicconfig.DDL,
) (*planResult, error) {
switch vexplainStmt.Type {
case sqlparser.QueriesVExplainType, sqlparser.AllVExplainType:
- return buildVExplainLoggingPlan(ctx, vexplainStmt, reservedVars, vschema, enableOnlineDDL, enableDirectDDL)
+ return buildVExplainLoggingPlan(ctx, vexplainStmt, reservedVars, vschema, cfg)
case sqlparser.PlanVExplainType:
- return buildVExplainVtgatePlan(ctx, vexplainStmt.Statement, reservedVars, vschema, enableOnlineDDL, enableDirectDDL)
+ return buildVExplainVtgatePlan(ctx, vexplainStmt.Statement, reservedVars, vschema, cfg)
case sqlparser.TraceVExplainType:
- return buildVExplainTracePlan(ctx, vexplainStmt.Statement, reservedVars, vschema, enableOnlineDDL, enableDirectDDL)
+ return buildVExplainTracePlan(ctx, vexplainStmt.Statement, reservedVars, vschema, cfg)
case sqlparser.KeysVExplainType:
return buildVExplainKeysPlan(vexplainStmt.Statement, vschema)
}
@@ -92,8 +93,8 @@ func explainTabPlan(explain *sqlparser.ExplainTab, vschema plancontext.VSchema)
}, singleTable(keyspace.Name, explain.Table.Name.String())), nil
}
-func buildVExplainVtgatePlan(ctx context.Context, explainStatement sqlparser.Statement, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema, enableOnlineDDL, enableDirectDDL bool) (*planResult, error) {
- innerInstruction, err := createInstructionFor(ctx, sqlparser.String(explainStatement), explainStatement, reservedVars, vschema, enableOnlineDDL, enableDirectDDL)
+func buildVExplainVtgatePlan(ctx context.Context, explainStatement sqlparser.Statement, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema, cfg dynamicconfig.DDL) (*planResult, error) {
+ innerInstruction, err := createInstructionFor(ctx, sqlparser.String(explainStatement), explainStatement, reservedVars, vschema, cfg)
if err != nil {
return nil, err
}
@@ -124,8 +125,8 @@ func buildVExplainKeysPlan(statement sqlparser.Statement, vschema plancontext.VS
return getJsonResultPlan(result, "ColumnUsage")
}
-func buildVExplainLoggingPlan(ctx context.Context, explain *sqlparser.VExplainStmt, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema, enableOnlineDDL, enableDirectDDL bool) (*planResult, error) {
- input, err := createInstructionFor(ctx, sqlparser.String(explain.Statement), explain.Statement, reservedVars, vschema, enableOnlineDDL, enableDirectDDL)
+func buildVExplainLoggingPlan(ctx context.Context, explain *sqlparser.VExplainStmt, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema, cfg dynamicconfig.DDL) (*planResult, error) {
+ input, err := createInstructionFor(ctx, sqlparser.String(explain.Statement), explain.Statement, reservedVars, vschema, cfg)
if err != nil {
return nil, err
}
@@ -188,8 +189,8 @@ func explainPlan(explain *sqlparser.ExplainStmt, reservedVars *sqlparser.Reserve
}, tables...), nil
}
-func buildVExplainTracePlan(ctx context.Context, explainStatement sqlparser.Statement, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema, enableOnlineDDL, enableDirectDDL bool) (*planResult, error) {
- innerInstruction, err := createInstructionFor(ctx, sqlparser.String(explainStatement), explainStatement, reservedVars, vschema, enableOnlineDDL, enableDirectDDL)
+func buildVExplainTracePlan(ctx context.Context, explainStatement sqlparser.Statement, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema, cfg dynamicconfig.DDL) (*planResult, error) {
+ innerInstruction, err := createInstructionFor(ctx, sqlparser.String(explainStatement), explainStatement, reservedVars, vschema, cfg)
if err != nil {
return nil, err
}
diff --git a/go/vt/vtgate/querylogz.go b/go/vt/vtgate/querylogz.go
index 7c72e950d4a..05d301f28be 100644
--- a/go/vt/vtgate/querylogz.go
+++ b/go/vt/vtgate/querylogz.go
@@ -20,15 +20,15 @@ import (
"net/http"
"strconv"
"strings"
- "text/template"
"time"
- "vitess.io/vitess/go/vt/vtgate/logstats"
+ "github.com/google/safehtml/template"
"vitess.io/vitess/go/acl"
"vitess.io/vitess/go/vt/log"
"vitess.io/vitess/go/vt/logz"
"vitess.io/vitess/go/vt/sqlparser"
+ "vitess.io/vitess/go/vt/vtgate/logstats"
)
var (
diff --git a/go/vt/vtgate/querylogz_test.go b/go/vt/vtgate/querylogz_test.go
index 3cecb983b3f..9236b2ac840 100644
--- a/go/vt/vtgate/querylogz_test.go
+++ b/go/vt/vtgate/querylogz_test.go
@@ -35,7 +35,7 @@ import (
func TestQuerylogzHandlerFormatting(t *testing.T) {
req, _ := http.NewRequest("GET", "/querylogz?timeout=10&limit=1", nil)
- logStats := logstats.NewLogStats(context.Background(), "Execute", "select name from test_table limit 1000", "suuid", nil)
+ logStats := logstats.NewLogStats(context.Background(), "Execute", "select name, 'inject ' from test_table limit 1000", "suuid", nil)
logStats.StmtType = "select"
logStats.RowsAffected = 1000
logStats.ShardQueries = 1
@@ -64,7 +64,7 @@ func TestQuerylogzHandlerFormatting(t *testing.T) {
`0.002 | `,
`0.003 | `,
`select | `,
- `select name from test_table limit 1000 | `,
+ regexp.QuoteMeta(`select name,​ 'inject <script>alert()​;</script>' from test_table limit 1000 | `),
`1 | `,
`1000 | `,
` | `,
@@ -94,7 +94,7 @@ func TestQuerylogzHandlerFormatting(t *testing.T) {
`0.002 | `,
`0.003 | `,
`select | `,
- `select name from test_table limit 1000 | `,
+ regexp.QuoteMeta(`select name,​ 'inject <script>alert()​;</script>' from test_table limit 1000 | `),
`1 | `,
`1000 | `,
` | `,
@@ -124,7 +124,7 @@ func TestQuerylogzHandlerFormatting(t *testing.T) {
`0.002 | `,
`0.003 | `,
`select | `,
- `select name from test_table limit 1000 | `,
+ regexp.QuoteMeta(`select name,​ 'inject <script>alert()​;</script>' from test_table limit 1000 | `),
`1 | `,
`1000 | `,
` | `,
diff --git a/go/vt/vtgate/safe_session_test.go b/go/vt/vtgate/safe_session_test.go
deleted file mode 100644
index ce681fe7fd3..00000000000
--- a/go/vt/vtgate/safe_session_test.go
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
-Copyright 2020 The Vitess Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package vtgate
-
-import (
- "reflect"
- "testing"
- "time"
-
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
-
- querypb "vitess.io/vitess/go/vt/proto/query"
- topodatapb "vitess.io/vitess/go/vt/proto/topodata"
- vtgatepb "vitess.io/vitess/go/vt/proto/vtgate"
-)
-
-func TestFailToMultiShardWhenSetToSingleDb(t *testing.T) {
- session := NewSafeSession(&vtgatepb.Session{
- InTransaction: true, TransactionMode: vtgatepb.TransactionMode_SINGLE,
- })
-
- sess0 := &vtgatepb.Session_ShardSession{
- Target: &querypb.Target{Keyspace: "keyspace", Shard: "0"},
- TabletAlias: &topodatapb.TabletAlias{Cell: "cell", Uid: 0},
- TransactionId: 1,
- }
- sess1 := &vtgatepb.Session_ShardSession{
- Target: &querypb.Target{Keyspace: "keyspace", Shard: "1"},
- TabletAlias: &topodatapb.TabletAlias{Cell: "cell", Uid: 1},
- TransactionId: 1,
- }
-
- err := session.AppendOrUpdate(sess0, vtgatepb.TransactionMode_SINGLE)
- require.NoError(t, err)
- err = session.AppendOrUpdate(sess1, vtgatepb.TransactionMode_SINGLE)
- require.Error(t, err)
-}
-
-func TestPrequeries(t *testing.T) {
- session := NewSafeSession(&vtgatepb.Session{
- SystemVariables: map[string]string{
- "s1": "'apa'",
- "s2": "42",
- },
- })
-
- want := []string{"set s1 = 'apa', s2 = 42"}
- preQueries := session.SetPreQueries()
-
- if !reflect.DeepEqual(want, preQueries) {
- t.Errorf("got %v but wanted %v", preQueries, want)
- }
-}
-
-func TestTimeZone(t *testing.T) {
- testCases := []struct {
- tz string
- want string
- }{
- {
- tz: "'Europe/Amsterdam'",
- want: "Europe/Amsterdam",
- },
- {
- tz: "'+02:00'",
- want: "UTC+02:00",
- },
- {
- tz: "foo",
- want: (*time.Location)(nil).String(),
- },
- }
-
- for _, tc := range testCases {
- t.Run(tc.tz, func(t *testing.T) {
- session := NewSafeSession(&vtgatepb.Session{
- SystemVariables: map[string]string{
- "time_zone": tc.tz,
- },
- })
-
- assert.Equal(t, tc.want, session.TimeZone().String())
- })
- }
-}
diff --git a/go/vt/vtgate/scatter_conn.go b/go/vt/vtgate/scatter_conn.go
index f7db598127e..6e2cf9ad8ba 100644
--- a/go/vt/vtgate/scatter_conn.go
+++ b/go/vt/vtgate/scatter_conn.go
@@ -24,26 +24,25 @@ import (
"sync/atomic"
"time"
- "vitess.io/vitess/go/mysql/sqlerror"
- "vitess.io/vitess/go/vt/sqlparser"
-
"google.golang.org/protobuf/proto"
+ "vitess.io/vitess/go/mysql/sqlerror"
"vitess.io/vitess/go/sqltypes"
"vitess.io/vitess/go/stats"
"vitess.io/vitess/go/vt/concurrency"
"vitess.io/vitess/go/vt/discovery"
"vitess.io/vitess/go/vt/log"
+ querypb "vitess.io/vitess/go/vt/proto/query"
+ topodatapb "vitess.io/vitess/go/vt/proto/topodata"
+ vtgatepb "vitess.io/vitess/go/vt/proto/vtgate"
+ vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
+ "vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/srvtopo"
"vitess.io/vitess/go/vt/topo/topoproto"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/engine"
+ econtext "vitess.io/vitess/go/vt/vtgate/executorcontext"
"vitess.io/vitess/go/vt/vttablet/queryservice"
-
- querypb "vitess.io/vitess/go/vt/proto/query"
- topodatapb "vitess.io/vitess/go/vt/proto/topodata"
- vtgatepb "vitess.io/vitess/go/vt/proto/vtgate"
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
)
// ScatterConn is used for executing queries across
@@ -73,13 +72,10 @@ type shardActionFunc func(rs *srvtopo.ResolvedShard, i int) error
type shardActionTransactionFunc func(rs *srvtopo.ResolvedShard, i int, shardActionInfo *shardActionInfo) (*shardActionInfo, error)
type (
- resultsObserver interface {
- observe(*sqltypes.Result)
- }
nullResultsObserver struct{}
)
-func (nullResultsObserver) observe(*sqltypes.Result) {}
+func (nullResultsObserver) Observe(*sqltypes.Result) {}
// NewScatterConn creates a new ScatterConn.
func NewScatterConn(statsName string, txConn *TxConn, gw *TabletGateway) *ScatterConn {
@@ -108,7 +104,7 @@ func (stc *ScatterConn) startAction(name string, target *querypb.Target) (time.T
return startTime, statsKey
}
-func (stc *ScatterConn) endAction(startTime time.Time, allErrors *concurrency.AllErrorRecorder, statsKey []string, err *error, session *SafeSession) {
+func (stc *ScatterConn) endAction(startTime time.Time, allErrors *concurrency.AllErrorRecorder, statsKey []string, err *error, session *econtext.SafeSession) {
if *err != nil {
allErrors.RecordError(*err)
// Don't increment the error counter for duplicate
@@ -152,10 +148,10 @@ func (stc *ScatterConn) ExecuteMultiShard(
primitive engine.Primitive,
rss []*srvtopo.ResolvedShard,
queries []*querypb.BoundQuery,
- session *SafeSession,
+ session *econtext.SafeSession,
autocommit bool,
ignoreMaxMemoryRows bool,
- resultsObserver resultsObserver,
+ resultsObserver econtext.ResultsObserver,
) (qr *sqltypes.Result, errs []error) {
if len(rss) != len(queries) {
@@ -166,7 +162,7 @@ func (stc *ScatterConn) ExecuteMultiShard(
var mu sync.Mutex
qr = new(sqltypes.Result)
- if session.InLockSession() && session.TriggerLockHeartBeat() {
+ if session.InLockSession() && triggerLockHeartBeat(session) {
go stc.runLockQuery(ctx, session)
}
@@ -224,6 +220,7 @@ func (stc *ScatterConn) ExecuteMultiShard(
retryRequest(func() {
// we seem to have lost our connection. it was a reserved connection, let's try to recreate it
info.actionNeeded = reserve
+ info.ignoreOldSession = true
var state queryservice.ReservedState
state, innerqr, err = qs.ReserveExecute(ctx, rs.Target, session.SetPreQueries(), queries[i].Sql, queries[i].BindVariables, 0 /*transactionId*/, opts)
reservedID = state.ReservedID
@@ -239,6 +236,7 @@ func (stc *ScatterConn) ExecuteMultiShard(
retryRequest(func() {
// we seem to have lost our connection. it was a reserved connection, let's try to recreate it
info.actionNeeded = reserveBegin
+ info.ignoreOldSession = true
var state queryservice.ReservedTransactionState
state, innerqr, err = qs.ReserveBeginExecute(ctx, rs.Target, session.SetPreQueries(), session.SavePoints(), queries[i].Sql, queries[i].BindVariables, opts)
transactionID = state.TransactionID
@@ -260,10 +258,10 @@ func (stc *ScatterConn) ExecuteMultiShard(
default:
return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] unexpected actionNeeded on query execution: %v", info.actionNeeded)
}
- session.logging.log(primitive, rs.Target, rs.Gateway, queries[i].Sql, info.actionNeeded == begin || info.actionNeeded == reserveBegin, queries[i].BindVariables)
+ session.Log(primitive, rs.Target, rs.Gateway, queries[i].Sql, info.actionNeeded == begin || info.actionNeeded == reserveBegin, queries[i].BindVariables)
// We need to new shard info irrespective of the error.
- newInfo := info.updateTransactionAndReservedID(transactionID, reservedID, alias)
+ newInfo := info.updateTransactionAndReservedID(transactionID, reservedID, alias, innerqr)
if err != nil {
return newInfo, err
}
@@ -271,7 +269,7 @@ func (stc *ScatterConn) ExecuteMultiShard(
defer mu.Unlock()
if innerqr != nil {
- resultsObserver.observe(innerqr)
+ resultsObserver.Observe(innerqr)
}
// Don't append more rows if row count is exceeded.
@@ -289,7 +287,13 @@ func (stc *ScatterConn) ExecuteMultiShard(
return qr, allErrors.GetErrors()
}
-func (stc *ScatterConn) runLockQuery(ctx context.Context, session *SafeSession) {
+func triggerLockHeartBeat(session *econtext.SafeSession) bool {
+ now := time.Now().Unix()
+ lastHeartbeat := session.GetLockHeartbeat()
+ return now-lastHeartbeat >= int64(lockHeartbeatTime.Seconds())
+}
+
+func (stc *ScatterConn) runLockQuery(ctx context.Context, session *econtext.SafeSession) {
rs := &srvtopo.ResolvedShard{Target: session.LockSession.Target, Gateway: stc.gateway}
query := &querypb.BoundQuery{Sql: "select 1", BindVariables: nil}
_, lockErr := stc.ExecuteLock(ctx, rs, query, session, sqlparser.IsUsedLock)
@@ -298,7 +302,7 @@ func (stc *ScatterConn) runLockQuery(ctx context.Context, session *SafeSession)
}
}
-func checkAndResetShardSession(info *shardActionInfo, err error, session *SafeSession, target *querypb.Target) reset {
+func checkAndResetShardSession(info *shardActionInfo, err error, session *econtext.SafeSession, target *querypb.Target) reset {
retry := none
if info.reservedID != 0 && info.transactionID == 0 {
if wasConnectionClosed(err) {
@@ -314,7 +318,7 @@ func checkAndResetShardSession(info *shardActionInfo, err error, session *SafeSe
return retry
}
-func getQueryService(ctx context.Context, rs *srvtopo.ResolvedShard, info *shardActionInfo, session *SafeSession, skipReset bool) (queryservice.QueryService, error) {
+func getQueryService(ctx context.Context, rs *srvtopo.ResolvedShard, info *shardActionInfo, session *econtext.SafeSession, skipReset bool) (queryservice.QueryService, error) {
if info.alias == nil {
return rs.Gateway, nil
}
@@ -365,18 +369,18 @@ func (stc *ScatterConn) StreamExecuteMulti(
query string,
rss []*srvtopo.ResolvedShard,
bindVars []map[string]*querypb.BindVariable,
- session *SafeSession,
+ session *econtext.SafeSession,
autocommit bool,
callback func(reply *sqltypes.Result) error,
- resultsObserver resultsObserver,
+ resultsObserver econtext.ResultsObserver,
) []error {
- if session.InLockSession() && session.TriggerLockHeartBeat() {
+ if session.InLockSession() && triggerLockHeartBeat(session) {
go stc.runLockQuery(ctx, session)
}
observedCallback := func(reply *sqltypes.Result) error {
if reply != nil {
- resultsObserver.observe(reply)
+ resultsObserver.Observe(reply)
}
return callback(reply)
}
@@ -469,10 +473,10 @@ func (stc *ScatterConn) StreamExecuteMulti(
default:
return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] unexpected actionNeeded on query execution: %v", info.actionNeeded)
}
- session.logging.log(primitive, rs.Target, rs.Gateway, query, info.actionNeeded == begin || info.actionNeeded == reserveBegin, bindVars[i])
+ session.Log(primitive, rs.Target, rs.Gateway, query, info.actionNeeded == begin || info.actionNeeded == reserveBegin, bindVars[i])
- // We need to new shard info irrespective of the error.
- newInfo := info.updateTransactionAndReservedID(transactionID, reservedID, alias)
+ // We need the new shard info irrespective of the error.
+ newInfo := info.updateTransactionAndReservedID(transactionID, reservedID, alias, nil)
if err != nil {
return newInfo, err
}
@@ -604,7 +608,7 @@ func (stc *ScatterConn) multiGo(
startTime, statsKey := stc.startAction(name, rs.Target)
// Send a dummy session.
// TODO(sougou): plumb a real session through this call.
- defer stc.endAction(startTime, allErrors, statsKey, &err, NewSafeSession(nil))
+ defer stc.endAction(startTime, allErrors, statsKey, &err, econtext.NewSafeSession(nil))
err = action(rs, i)
}
@@ -646,7 +650,7 @@ func (stc *ScatterConn) multiGoTransaction(
ctx context.Context,
name string,
rss []*srvtopo.ResolvedShard,
- session *SafeSession,
+ session *econtext.SafeSession,
autocommit bool,
action shardActionTransactionFunc,
) (allErrors *concurrency.AllErrorRecorder) {
@@ -662,21 +666,24 @@ func (stc *ScatterConn) multiGoTransaction(
startTime, statsKey := stc.startAction(name, rs.Target)
defer stc.endAction(startTime, allErrors, statsKey, &err, session)
- shardActionInfo, err := actionInfo(ctx, rs.Target, session, autocommit, stc.txConn.mode)
+ info, shardSession, err := actionInfo(ctx, rs.Target, session, autocommit, stc.txConn.mode)
if err != nil {
return
}
- updated, err := action(rs, i, shardActionInfo)
- if updated == nil {
+ info, err = action(rs, i, info)
+ if info == nil {
return
}
- if updated.actionNeeded != nothing && (updated.transactionID != 0 || updated.reservedID != 0) {
- appendErr := session.AppendOrUpdate(&vtgatepb.Session_ShardSession{
- Target: rs.Target,
- TransactionId: updated.transactionID,
- ReservedId: updated.reservedID,
- TabletAlias: updated.alias,
- }, stc.txConn.mode)
+ if info.ignoreOldSession {
+ shardSession = nil
+ }
+ if shardSession != nil && info.rowsAffected {
+ // We might not always update or append in the session.
+ // We need to track if rows were affected in the transaction.
+ shardSession.RowsAffected = info.rowsAffected
+ }
+ if info.actionNeeded != nothing && (info.transactionID != 0 || info.reservedID != 0) {
+ appendErr := session.AppendOrUpdate(rs.Target, info, shardSession, stc.txConn.mode)
if appendErr != nil {
err = appendErr
}
@@ -727,7 +734,7 @@ func (stc *ScatterConn) multiGoTransaction(
// It returns an error recorder in which each shard error is recorded positionally,
// i.e. if rss[2] had an error, then the error recorder will store that error
// in the second position.
-func (stc *ScatterConn) ExecuteLock(ctx context.Context, rs *srvtopo.ResolvedShard, query *querypb.BoundQuery, session *SafeSession, lockFuncType sqlparser.LockingFuncType) (*sqltypes.Result, error) {
+func (stc *ScatterConn) ExecuteLock(ctx context.Context, rs *srvtopo.ResolvedShard, query *querypb.BoundQuery, session *econtext.SafeSession, lockFuncType sqlparser.LockingFuncType) (*sqltypes.Result, error) {
var (
qr *sqltypes.Result
@@ -830,25 +837,25 @@ func requireNewQS(err error, target *querypb.Target) bool {
}
// actionInfo looks at the current session, and returns information about what needs to be done for this tablet
-func actionInfo(ctx context.Context, target *querypb.Target, session *SafeSession, autocommit bool, txMode vtgatepb.TransactionMode) (*shardActionInfo, error) {
+func actionInfo(ctx context.Context, target *querypb.Target, session *econtext.SafeSession, autocommit bool, txMode vtgatepb.TransactionMode) (*shardActionInfo, *vtgatepb.Session_ShardSession, error) {
if !(session.InTransaction() || session.InReservedConn()) {
- return &shardActionInfo{}, nil
+ return &shardActionInfo{}, nil, nil
}
ignoreSession := ctx.Value(engine.IgnoreReserveTxn)
if ignoreSession != nil {
- return &shardActionInfo{}, nil
+ return &shardActionInfo{}, nil, nil
}
// No need to protect ourselves from the race condition between
// Find and AppendOrUpdate. The higher level functions ensure that no
// duplicate (target) tuples can execute
// this at the same time.
- transactionID, reservedID, alias, err := session.FindAndChangeSessionIfInSingleTxMode(target.Keyspace, target.Shard, target.TabletType, txMode)
+ shardSession, err := session.FindAndChangeSessionIfInSingleTxMode(target.Keyspace, target.Shard, target.TabletType, txMode)
if err != nil {
- return nil, err
+ return nil, nil, err
}
- shouldReserve := session.InReservedConn() && reservedID == 0
- shouldBegin := session.InTransaction() && transactionID == 0 && !autocommit
+ shouldReserve := session.InReservedConn() && (shardSession == nil || shardSession.ReservedId == 0)
+ shouldBegin := session.InTransaction() && (shardSession == nil || shardSession.TransactionId == 0) && !autocommit
var act = nothing
switch {
@@ -860,16 +867,20 @@ func actionInfo(ctx context.Context, target *querypb.Target, session *SafeSessio
act = begin
}
- return &shardActionInfo{
- actionNeeded: act,
- transactionID: transactionID,
- reservedID: reservedID,
- alias: alias,
- }, nil
+ info := &shardActionInfo{
+ actionNeeded: act,
+ }
+ if shardSession != nil {
+ info.transactionID = shardSession.TransactionId
+ info.reservedID = shardSession.ReservedId
+ info.alias = shardSession.TabletAlias
+ info.rowsAffected = shardSession.RowsAffected
+ }
+ return info, shardSession, nil
}
// lockInfo looks at the current session, and returns information about what needs to be done for this tablet
-func lockInfo(target *querypb.Target, session *SafeSession, lockFuncType sqlparser.LockingFuncType) (*shardActionInfo, error) {
+func lockInfo(target *querypb.Target, session *econtext.SafeSession, lockFuncType sqlparser.LockingFuncType) (*shardActionInfo, error) {
info := &shardActionInfo{actionNeeded: nothing}
if session.LockSession != nil {
if !proto.Equal(target, session.LockSession.Target) {
@@ -894,10 +905,35 @@ type shardActionInfo struct {
actionNeeded actionNeeded
reservedID, transactionID int64
alias *topodatapb.TabletAlias
+
+ // ignoreOldSession is used when there is a retry on the same shard due to connection loss for a reserved connection.
+ // The old reference should be ignored and new shard session should be added to the session.
+ ignoreOldSession bool
+ rowsAffected bool
+}
+
+func (sai *shardActionInfo) TransactionID() int64 {
+ return sai.transactionID
+}
+
+func (sai *shardActionInfo) ReservedID() int64 {
+ return sai.reservedID
+}
+
+func (sai *shardActionInfo) RowsAffected() bool {
+ return sai.rowsAffected
+}
+
+func (sai *shardActionInfo) Alias() *topodatapb.TabletAlias {
+ return sai.alias
}
-func (sai *shardActionInfo) updateTransactionAndReservedID(txID int64, rID int64, alias *topodatapb.TabletAlias) *shardActionInfo {
- if txID == sai.transactionID && rID == sai.reservedID {
+func (sai *shardActionInfo) updateTransactionAndReservedID(txID int64, rID int64, alias *topodatapb.TabletAlias, qr *sqltypes.Result) *shardActionInfo {
+ firstTimeRowsAffected := false
+ if txID != 0 && qr != nil && !sai.rowsAffected {
+ firstTimeRowsAffected = qr.RowsAffected > 0
+ }
+ if txID == sai.transactionID && rID == sai.reservedID && !firstTimeRowsAffected {
// As transaction id and reserved id have not changed, there is nothing to update in session shard sessions.
return nil
}
@@ -905,6 +941,7 @@ func (sai *shardActionInfo) updateTransactionAndReservedID(txID int64, rID int64
newInfo.reservedID = rID
newInfo.transactionID = txID
newInfo.alias = alias
+ newInfo.rowsAffected = firstTimeRowsAffected
return &newInfo
}
diff --git a/go/vt/vtgate/scatter_conn_test.go b/go/vt/vtgate/scatter_conn_test.go
index c5d4f350433..ab8680ca5e6 100644
--- a/go/vt/vtgate/scatter_conn_test.go
+++ b/go/vt/vtgate/scatter_conn_test.go
@@ -21,6 +21,7 @@ import (
"testing"
"vitess.io/vitess/go/vt/log"
+ econtext "vitess.io/vitess/go/vt/vtgate/executorcontext"
"vitess.io/vitess/go/mysql/sqlerror"
vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
@@ -100,7 +101,7 @@ func TestExecuteFailOnAutocommit(t *testing.T) {
},
Autocommit: false,
}
- _, errs := sc.ExecuteMultiShard(ctx, nil, rss, queries, NewSafeSession(session), true /*autocommit*/, false, nullResultsObserver{})
+ _, errs := sc.ExecuteMultiShard(ctx, nil, rss, queries, econtext.NewSafeSession(session), true /*autocommit*/, false, nullResultsObserver{})
err := vterrors.Aggregate(errs)
require.Error(t, err)
require.Contains(t, err.Error(), "in autocommit mode, transactionID should be zero but was: 123")
@@ -183,7 +184,7 @@ func TestExecutePanic(t *testing.T) {
require.Contains(t, logMessage, "(*ScatterConn).multiGoTransaction")
}()
- _, _ = sc.ExecuteMultiShard(ctx, nil, rss, queries, NewSafeSession(session), true /*autocommit*/, false, nullResultsObserver{})
+ _, _ = sc.ExecuteMultiShard(ctx, nil, rss, queries, econtext.NewSafeSession(session), true /*autocommit*/, false, nullResultsObserver{})
}
@@ -204,7 +205,7 @@ func TestReservedOnMultiReplica(t *testing.T) {
res := srvtopo.NewResolver(newSandboxForCells(ctx, []string{"aa"}), sc.gateway, "aa")
- session := NewSafeSession(&vtgatepb.Session{InTransaction: false, InReservedConn: true})
+ session := econtext.NewSafeSession(&vtgatepb.Session{InTransaction: false, InReservedConn: true})
destinations := []key.Destination{key.DestinationShard("0")}
for i := 0; i < 10; i++ {
executeOnShards(t, ctx, res, keyspace, sc, session, destinations)
@@ -351,7 +352,7 @@ func TestReservedBeginTableDriven(t *testing.T) {
res := srvtopo.NewResolver(newSandboxForCells(ctx, []string{"aa"}), sc.gateway, "aa")
t.Run(test.name, func(t *testing.T) {
- session := NewSafeSession(&vtgatepb.Session{})
+ session := econtext.NewSafeSession(&vtgatepb.Session{})
for _, action := range test.actions {
session.Session.InTransaction = action.transaction
session.Session.InReservedConn = action.reserved
@@ -384,7 +385,7 @@ func TestReservedConnFail(t *testing.T) {
_ = hc.AddTestTablet("aa", "1", 1, keyspace, "1", topodatapb.TabletType_REPLICA, true, 1, nil)
res := srvtopo.NewResolver(newSandboxForCells(ctx, []string{"aa"}), sc.gateway, "aa")
- session := NewSafeSession(&vtgatepb.Session{InTransaction: false, InReservedConn: true})
+ session := econtext.NewSafeSession(&vtgatepb.Session{InTransaction: false, InReservedConn: true})
destinations := []key.Destination{key.DestinationShard("0")}
executeOnShards(t, ctx, res, keyspace, sc, session, destinations)
diff --git a/go/vt/vtgate/tabletgateway_flaky_test.go b/go/vt/vtgate/tabletgateway_flaky_test.go
index d136542d176..124997bea9e 100644
--- a/go/vt/vtgate/tabletgateway_flaky_test.go
+++ b/go/vt/vtgate/tabletgateway_flaky_test.go
@@ -20,6 +20,8 @@ import (
"testing"
"time"
+ econtext "vitess.io/vitess/go/vt/vtgate/executorcontext"
+
"github.com/stretchr/testify/require"
"vitess.io/vitess/go/mysql/collations"
@@ -53,7 +55,7 @@ func TestGatewayBufferingWhenPrimarySwitchesServingState(t *testing.T) {
TabletType: tabletType,
}
- ts := &fakeTopoServer{}
+ ts := &econtext.FakeTopoServer{}
// create a new fake health check. We want to check the buffering code which uses Subscribe, so we must also pass a channel
hc := discovery.NewFakeHealthCheck(make(chan *discovery.TabletHealth))
// create a new tablet gateway
@@ -156,7 +158,7 @@ func TestGatewayBufferingWhileReparenting(t *testing.T) {
TabletType: tabletType,
}
- ts := &fakeTopoServer{}
+ ts := &econtext.FakeTopoServer{}
// create a new fake health check. We want to check the buffering code which uses Subscribe, so we must also pass a channel
hc := discovery.NewFakeHealthCheck(make(chan *discovery.TabletHealth))
// create a new tablet gateway
@@ -286,7 +288,7 @@ func TestInconsistentStateDetectedBuffering(t *testing.T) {
TabletType: tabletType,
}
- ts := &fakeTopoServer{}
+ ts := &econtext.FakeTopoServer{}
// create a new fake health check. We want to check the buffering code which uses Subscribe, so we must also pass a channel
hc := discovery.NewFakeHealthCheck(make(chan *discovery.TabletHealth))
// create a new tablet gateway
diff --git a/go/vt/vtgate/tabletgateway_test.go b/go/vt/vtgate/tabletgateway_test.go
index 2aafb78af99..b318cb84981 100644
--- a/go/vt/vtgate/tabletgateway_test.go
+++ b/go/vt/vtgate/tabletgateway_test.go
@@ -22,6 +22,8 @@ import (
"strings"
"testing"
+ econtext "vitess.io/vitess/go/vt/vtgate/executorcontext"
+
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -109,7 +111,7 @@ func TestTabletGatewayShuffleTablets(t *testing.T) {
ctx := utils.LeakCheckContext(t)
hc := discovery.NewFakeHealthCheck(nil)
- ts := &fakeTopoServer{}
+ ts := &econtext.FakeTopoServer{}
tg := NewTabletGateway(ctx, hc, ts, "local")
defer tg.Close(ctx)
@@ -183,7 +185,7 @@ func TestTabletGatewayReplicaTransactionError(t *testing.T) {
TabletType: tabletType,
}
hc := discovery.NewFakeHealthCheck(nil)
- ts := &fakeTopoServer{}
+ ts := &econtext.FakeTopoServer{}
tg := NewTabletGateway(ctx, hc, ts, "cell")
defer tg.Close(ctx)
@@ -218,7 +220,7 @@ func testTabletGatewayGenericHelper(t *testing.T, ctx context.Context, f func(ct
TabletType: tabletType,
}
hc := discovery.NewFakeHealthCheck(nil)
- ts := &fakeTopoServer{}
+ ts := &econtext.FakeTopoServer{}
tg := NewTabletGateway(ctx, hc, ts, "cell")
defer tg.Close(ctx)
// no tablet
@@ -306,7 +308,7 @@ func testTabletGatewayTransact(t *testing.T, ctx context.Context, f func(ctx con
TabletType: tabletType,
}
hc := discovery.NewFakeHealthCheck(nil)
- ts := &fakeTopoServer{}
+ ts := &econtext.FakeTopoServer{}
tg := NewTabletGateway(ctx, hc, ts, "cell")
defer tg.Close(ctx)
@@ -348,7 +350,7 @@ func verifyShardErrors(t *testing.T, err error, wantErrors []string, wantCode vt
// TestWithRetry tests the functionality of withRetry function in different circumstances.
func TestWithRetry(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
- tg := NewTabletGateway(ctx, discovery.NewFakeHealthCheck(nil), &fakeTopoServer{}, "cell")
+ tg := NewTabletGateway(ctx, discovery.NewFakeHealthCheck(nil), &econtext.FakeTopoServer{}, "cell")
tg.kev = discovery.NewKeyspaceEventWatcher(ctx, tg.srvTopoServer, tg.hc, tg.localCell)
defer func() {
cancel()
diff --git a/go/vt/vtgate/tx_conn.go b/go/vt/vtgate/tx_conn.go
index 315484ea499..3ce138bc0e4 100644
--- a/go/vt/vtgate/tx_conn.go
+++ b/go/vt/vtgate/tx_conn.go
@@ -33,6 +33,7 @@ import (
vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vterrors"
+ econtext "vitess.io/vitess/go/vt/vtgate/executorcontext"
"vitess.io/vitess/go/vt/vttablet/queryservice"
)
@@ -80,7 +81,7 @@ var phaseMessage = map[commitPhase]string{
// Begin begins a new transaction. If one is already in progress, it commits it
// and starts a new one.
-func (txc *TxConn) Begin(ctx context.Context, session *SafeSession, txAccessModes []sqlparser.TxAccessMode) error {
+func (txc *TxConn) Begin(ctx context.Context, session *econtext.SafeSession, txAccessModes []sqlparser.TxAccessMode) error {
if session.InTransaction() {
if err := txc.Commit(ctx, session); err != nil {
return err
@@ -102,7 +103,7 @@ func (txc *TxConn) Begin(ctx context.Context, session *SafeSession, txAccessMode
// Commit commits the current transaction. The type of commit can be
// best effort or 2pc depending on the session setting.
-func (txc *TxConn) Commit(ctx context.Context, session *SafeSession) error {
+func (txc *TxConn) Commit(ctx context.Context, session *econtext.SafeSession) error {
defer session.ResetTx()
if !session.InTransaction() {
return nil
@@ -123,7 +124,7 @@ func (txc *TxConn) Commit(ctx context.Context, session *SafeSession) error {
return txc.commitNormal(ctx, session)
}
-func recordCommitTime(session *SafeSession, twopc bool, startTime time.Time) {
+func recordCommitTime(session *econtext.SafeSession, twopc bool, startTime time.Time) {
switch {
case len(session.ShardSessions) == 0:
// No-op
@@ -143,7 +144,7 @@ func (txc *TxConn) queryService(ctx context.Context, alias *topodatapb.TabletAli
return txc.tabletGateway.QueryServiceByAlias(ctx, alias, nil)
}
-func (txc *TxConn) commitShard(ctx context.Context, s *vtgatepb.Session_ShardSession, logging *executeLogger) error {
+func (txc *TxConn) commitShard(ctx context.Context, s *vtgatepb.Session_ShardSession, logging *econtext.ExecuteLogger) error {
if s.TransactionId == 0 {
return nil
}
@@ -159,19 +160,19 @@ func (txc *TxConn) commitShard(ctx context.Context, s *vtgatepb.Session_ShardSes
}
s.TransactionId = 0
s.ReservedId = reservedID
- logging.log(nil, s.Target, nil, "commit", false, nil)
+ logging.Log(nil, s.Target, nil, "commit", false, nil)
return nil
}
-func (txc *TxConn) commitNormal(ctx context.Context, session *SafeSession) error {
- if err := txc.runSessions(ctx, session.PreSessions, session.logging, txc.commitShard); err != nil {
+func (txc *TxConn) commitNormal(ctx context.Context, session *econtext.SafeSession) error {
+ if err := txc.runSessions(ctx, session.PreSessions, session.GetLogger(), txc.commitShard); err != nil {
_ = txc.Release(ctx, session)
return err
}
// Retain backward compatibility on commit order for the normal session.
for i, shardSession := range session.ShardSessions {
- if err := txc.commitShard(ctx, shardSession, session.logging); err != nil {
+ if err := txc.commitShard(ctx, shardSession, session.GetLogger()); err != nil {
if i > 0 {
nShards := i
elipsis := false
@@ -197,7 +198,7 @@ func (txc *TxConn) commitNormal(ctx context.Context, session *SafeSession) error
}
}
- if err := txc.runSessions(ctx, session.PostSessions, session.logging, txc.commitShard); err != nil {
+ if err := txc.runSessions(ctx, session.PostSessions, session.GetLogger(), txc.commitShard); err != nil {
// If last commit fails, there will be nothing to rollback.
session.RecordWarning(&querypb.QueryWarning{Message: fmt.Sprintf("post-operation transaction had an error: %v", err)})
// With reserved connection we should release them.
@@ -209,7 +210,7 @@ func (txc *TxConn) commitNormal(ctx context.Context, session *SafeSession) error
}
// commit2PC will not used the pinned tablets - to make sure we use the current source, we need to use the gateway's queryservice
-func (txc *TxConn) commit2PC(ctx context.Context, session *SafeSession) (err error) {
+func (txc *TxConn) commit2PC(ctx context.Context, session *econtext.SafeSession) (err error) {
// If the number of participants is one or less, then it's a normal commit.
if len(session.ShardSessions) <= 1 {
return txc.commitNormal(ctx, session)
@@ -249,7 +250,7 @@ func (txc *TxConn) commit2PC(ctx context.Context, session *SafeSession) (err err
}
txPhase = Commit2pcPrepare
- prepareAction := func(ctx context.Context, s *vtgatepb.Session_ShardSession, logging *executeLogger) error {
+ prepareAction := func(ctx context.Context, s *vtgatepb.Session_ShardSession, logging *econtext.ExecuteLogger) error {
if DebugTwoPc { // Test code to simulate a failure during RM prepare
if terr := checkTestFailure(ctx, "RMPrepare_-40_FailNow", s.Target); terr != nil {
return terr
@@ -257,7 +258,7 @@ func (txc *TxConn) commit2PC(ctx context.Context, session *SafeSession) (err err
}
return txc.tabletGateway.Prepare(ctx, s.Target, s.TransactionId, dtid)
}
- if err = txc.runSessions(ctx, rmShards, session.logging, prepareAction); err != nil {
+ if err = txc.runSessions(ctx, rmShards, session.GetLogger(), prepareAction); err != nil {
return err
}
@@ -280,7 +281,7 @@ func (txc *TxConn) commit2PC(ctx context.Context, session *SafeSession) (err err
}
txPhase = Commit2pcPrepareCommit
- prepareCommitAction := func(ctx context.Context, s *vtgatepb.Session_ShardSession, logging *executeLogger) error {
+ prepareCommitAction := func(ctx context.Context, s *vtgatepb.Session_ShardSession, logging *econtext.ExecuteLogger) error {
if DebugTwoPc { // Test code to simulate a failure during RM prepare
if terr := checkTestFailure(ctx, "RMCommit_-40_FailNow", s.Target); terr != nil {
return terr
@@ -288,7 +289,7 @@ func (txc *TxConn) commit2PC(ctx context.Context, session *SafeSession) (err err
}
return txc.tabletGateway.CommitPrepared(ctx, s.Target, dtid)
}
- if err = txc.runSessions(ctx, rmShards, session.logging, prepareCommitAction); err != nil {
+ if err = txc.runSessions(ctx, rmShards, session.GetLogger(), prepareCommitAction); err != nil {
return err
}
@@ -300,7 +301,7 @@ func (txc *TxConn) commit2PC(ctx context.Context, session *SafeSession) (err err
return nil
}
-func (txc *TxConn) checkValidCondition(session *SafeSession) error {
+func (txc *TxConn) checkValidCondition(session *econtext.SafeSession) error {
if len(session.PreSessions) != 0 || len(session.PostSessions) != 0 {
return vterrors.VT12001("atomic distributed transaction commit with consistent lookup vindex")
}
@@ -309,7 +310,7 @@ func (txc *TxConn) checkValidCondition(session *SafeSession) error {
func (txc *TxConn) errActionAndLogWarn(
ctx context.Context,
- session *SafeSession,
+ session *econtext.SafeSession,
txPhase commitPhase,
startCommitState querypb.StartCommitState,
dtid string,
@@ -323,12 +324,12 @@ func (txc *TxConn) errActionAndLogWarn(
rollbackErr = txc.Rollback(ctx, session)
case Commit2pcPrepare:
// Rollback the prepared and unprepared transactions.
- rollbackErr = txc.rollbackTx(ctx, dtid, mmShard, rmShards, session.logging)
+ rollbackErr = txc.rollbackTx(ctx, dtid, mmShard, rmShards, session.GetLogger())
case Commit2pcStartCommit:
// Failed to store the commit decision on MM.
// If the failure state is certain, then the only option is to rollback the prepared transactions on the RMs.
if startCommitState == querypb.StartCommitState_Fail {
- rollbackErr = txc.rollbackTx(ctx, dtid, mmShard, rmShards, session.logging)
+ rollbackErr = txc.rollbackTx(ctx, dtid, mmShard, rmShards, session.GetLogger())
}
fallthrough
case Commit2pcPrepareCommit:
@@ -362,7 +363,7 @@ func createWarningMessage(dtid string, txPhase commitPhase) string {
}
// Rollback rolls back the current transaction. There are no retries on this operation.
-func (txc *TxConn) Rollback(ctx context.Context, session *SafeSession) error {
+func (txc *TxConn) Rollback(ctx context.Context, session *econtext.SafeSession) error {
if !session.InTransaction() {
return nil
}
@@ -371,7 +372,7 @@ func (txc *TxConn) Rollback(ctx context.Context, session *SafeSession) error {
allsessions := append(session.PreSessions, session.ShardSessions...)
allsessions = append(allsessions, session.PostSessions...)
- err := txc.runSessions(ctx, allsessions, session.logging, func(ctx context.Context, s *vtgatepb.Session_ShardSession, logging *executeLogger) error {
+ err := txc.runSessions(ctx, allsessions, session.GetLogger(), func(ctx context.Context, s *vtgatepb.Session_ShardSession, logging *econtext.ExecuteLogger) error {
if s.TransactionId == 0 {
return nil
}
@@ -385,7 +386,7 @@ func (txc *TxConn) Rollback(ctx context.Context, session *SafeSession) error {
}
s.TransactionId = 0
s.ReservedId = reservedID
- logging.log(nil, s.Target, nil, "rollback", false, nil)
+ logging.Log(nil, s.Target, nil, "rollback", false, nil)
return nil
})
if err != nil {
@@ -398,7 +399,7 @@ func (txc *TxConn) Rollback(ctx context.Context, session *SafeSession) error {
}
// Release releases the reserved connection and/or rollbacks the transaction
-func (txc *TxConn) Release(ctx context.Context, session *SafeSession) error {
+func (txc *TxConn) Release(ctx context.Context, session *econtext.SafeSession) error {
if !session.InTransaction() && !session.InReservedConn() {
return nil
}
@@ -407,7 +408,7 @@ func (txc *TxConn) Release(ctx context.Context, session *SafeSession) error {
allsessions := append(session.PreSessions, session.ShardSessions...)
allsessions = append(allsessions, session.PostSessions...)
- return txc.runSessions(ctx, allsessions, session.logging, func(ctx context.Context, s *vtgatepb.Session_ShardSession, logging *executeLogger) error {
+ return txc.runSessions(ctx, allsessions, session.GetLogger(), func(ctx context.Context, s *vtgatepb.Session_ShardSession, logging *econtext.ExecuteLogger) error {
if s.ReservedId == 0 && s.TransactionId == 0 {
return nil
}
@@ -426,7 +427,7 @@ func (txc *TxConn) Release(ctx context.Context, session *SafeSession) error {
}
// ReleaseLock releases the reserved connection used for locking.
-func (txc *TxConn) ReleaseLock(ctx context.Context, session *SafeSession) error {
+func (txc *TxConn) ReleaseLock(ctx context.Context, session *econtext.SafeSession) error {
if !session.InLockSession() {
return nil
}
@@ -445,7 +446,7 @@ func (txc *TxConn) ReleaseLock(ctx context.Context, session *SafeSession) error
}
// ReleaseAll releases all the shard sessions and lock session.
-func (txc *TxConn) ReleaseAll(ctx context.Context, session *SafeSession) error {
+func (txc *TxConn) ReleaseAll(ctx context.Context, session *econtext.SafeSession) error {
if !session.InTransaction() && !session.InReservedConn() && !session.InLockSession() {
return nil
}
@@ -457,7 +458,7 @@ func (txc *TxConn) ReleaseAll(ctx context.Context, session *SafeSession) error {
allsessions = append(allsessions, session.LockSession)
}
- return txc.runSessions(ctx, allsessions, session.logging, func(ctx context.Context, s *vtgatepb.Session_ShardSession, loggging *executeLogger) error {
+ return txc.runSessions(ctx, allsessions, session.GetLogger(), func(ctx context.Context, s *vtgatepb.Session_ShardSession, loggging *econtext.ExecuteLogger) error {
if s.ReservedId == 0 && s.TransactionId == 0 {
return nil
}
@@ -529,12 +530,12 @@ func (txc *TxConn) resolveTx(ctx context.Context, target *querypb.Target, transa
// rollbackTx rollbacks the specified distributed transaction.
// Rollbacks happens on the metadata manager and all participants irrespective of the failure.
-func (txc *TxConn) rollbackTx(ctx context.Context, dtid string, mmShard *vtgatepb.Session_ShardSession, participants []*vtgatepb.Session_ShardSession, logging *executeLogger) error {
+func (txc *TxConn) rollbackTx(ctx context.Context, dtid string, mmShard *vtgatepb.Session_ShardSession, participants []*vtgatepb.Session_ShardSession, logging *econtext.ExecuteLogger) error {
var errs []error
if mmErr := txc.rollbackMM(ctx, dtid, mmShard); mmErr != nil {
errs = append(errs, mmErr)
}
- if rmErr := txc.runSessions(ctx, participants, logging, func(ctx context.Context, session *vtgatepb.Session_ShardSession, logger *executeLogger) error {
+ if rmErr := txc.runSessions(ctx, participants, logging, func(ctx context.Context, session *vtgatepb.Session_ShardSession, logger *econtext.ExecuteLogger) error {
return txc.tabletGateway.RollbackPrepared(ctx, session.Target, dtid, session.TransactionId)
}); rmErr != nil {
errs = append(errs, rmErr)
@@ -575,7 +576,7 @@ func (txc *TxConn) resumeCommit(ctx context.Context, target *querypb.Target, tra
}
// runSessions executes the action for all shardSessions in parallel and returns a consolidated error.
-func (txc *TxConn) runSessions(ctx context.Context, shardSessions []*vtgatepb.Session_ShardSession, logging *executeLogger, action func(context.Context, *vtgatepb.Session_ShardSession, *executeLogger) error) error {
+func (txc *TxConn) runSessions(ctx context.Context, shardSessions []*vtgatepb.Session_ShardSession, logging *econtext.ExecuteLogger, action func(context.Context, *vtgatepb.Session_ShardSession, *econtext.ExecuteLogger) error) error {
// Fastpath.
if len(shardSessions) == 1 {
return action(ctx, shardSessions[0], logging)
diff --git a/go/vt/vtgate/tx_conn_test.go b/go/vt/vtgate/tx_conn_test.go
index 9d49626f6f1..333094569c8 100644
--- a/go/vt/vtgate/tx_conn_test.go
+++ b/go/vt/vtgate/tx_conn_test.go
@@ -26,6 +26,8 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ econtext "vitess.io/vitess/go/vt/vtgate/executorcontext"
+
"vitess.io/vitess/go/event/syslogger"
"vitess.io/vitess/go/mysql/sqlerror"
"vitess.io/vitess/go/test/utils"
@@ -51,7 +53,7 @@ func TestTxConnBegin(t *testing.T) {
session := &vtgatepb.Session{}
// begin
- safeSession := NewSafeSession(session)
+ safeSession := econtext.NewSafeSession(session)
err := sc.txConn.Begin(ctx, safeSession, nil)
require.NoError(t, err)
wantSession := vtgatepb.Session{InTransaction: true}
@@ -75,7 +77,7 @@ func TestTxConnCommitFailure(t *testing.T) {
// Sequence the executes to ensure commit order
- session := NewSafeSession(&vtgatepb.Session{InTransaction: true})
+ session := econtext.NewSafeSession(&vtgatepb.Session{InTransaction: true})
sc.ExecuteMultiShard(ctx, nil, rssm[0], queries, session, false, false, nullResultsObserver{})
wantSession := vtgatepb.Session{
InTransaction: true,
@@ -176,7 +178,7 @@ func TestTxConnCommitFailureAfterNonAtomicCommitMaxShards(t *testing.T) {
// Sequence the executes to ensure commit order
- session := NewSafeSession(&vtgatepb.Session{InTransaction: true})
+ session := econtext.NewSafeSession(&vtgatepb.Session{InTransaction: true})
wantSession := vtgatepb.Session{
InTransaction: true,
ShardSessions: []*vtgatepb.Session_ShardSession{},
@@ -230,7 +232,7 @@ func TestTxConnCommitFailureBeforeNonAtomicCommitMaxShards(t *testing.T) {
// Sequence the executes to ensure commit order
- session := NewSafeSession(&vtgatepb.Session{InTransaction: true})
+ session := econtext.NewSafeSession(&vtgatepb.Session{InTransaction: true})
wantSession := vtgatepb.Session{
InTransaction: true,
ShardSessions: []*vtgatepb.Session_ShardSession{},
@@ -282,7 +284,7 @@ func TestTxConnCommitSuccess(t *testing.T) {
sc.txConn.mode = vtgatepb.TransactionMode_MULTI
// Sequence the executes to ensure commit order
- session := NewSafeSession(&vtgatepb.Session{InTransaction: true})
+ session := econtext.NewSafeSession(&vtgatepb.Session{InTransaction: true})
sc.ExecuteMultiShard(ctx, nil, rss0, queries, session, false, false, nullResultsObserver{})
wantSession := vtgatepb.Session{
InTransaction: true,
@@ -335,7 +337,7 @@ func TestTxConnReservedCommitSuccess(t *testing.T) {
sc.txConn.mode = vtgatepb.TransactionMode_MULTI
// Sequence the executes to ensure commit order
- session := NewSafeSession(&vtgatepb.Session{InTransaction: true, InReservedConn: true})
+ session := econtext.NewSafeSession(&vtgatepb.Session{InTransaction: true, InReservedConn: true})
sc.ExecuteMultiShard(ctx, nil, rss0, queries, session, false, false, nullResultsObserver{})
wantSession := vtgatepb.Session{
InTransaction: true,
@@ -420,7 +422,7 @@ func TestTxConnReservedOn2ShardTxOn1ShardAndCommit(t *testing.T) {
sc.txConn.mode = vtgatepb.TransactionMode_MULTI
// Sequence the executes to ensure shard session order
- session := NewSafeSession(&vtgatepb.Session{InReservedConn: true})
+ session := econtext.NewSafeSession(&vtgatepb.Session{InReservedConn: true})
// this will create reserved connections against all tablets
_, errs := sc.ExecuteMultiShard(ctx, nil, rss1, queries, session, false, false, nullResultsObserver{})
@@ -515,7 +517,7 @@ func TestTxConnReservedOn2ShardTxOn1ShardAndRollback(t *testing.T) {
sc.txConn.mode = vtgatepb.TransactionMode_MULTI
// Sequence the executes to ensure shard session order
- session := NewSafeSession(&vtgatepb.Session{InReservedConn: true})
+ session := econtext.NewSafeSession(&vtgatepb.Session{InReservedConn: true})
// this will create reserved connections against all tablets
_, errs := sc.ExecuteMultiShard(ctx, nil, rss1, queries, session, false, false, nullResultsObserver{})
@@ -611,7 +613,7 @@ func TestTxConnCommitOrderFailure1(t *testing.T) {
queries := []*querypb.BoundQuery{{Sql: "query1"}}
// Sequence the executes to ensure commit order
- session := NewSafeSession(&vtgatepb.Session{InTransaction: true})
+ session := econtext.NewSafeSession(&vtgatepb.Session{InTransaction: true})
sc.ExecuteMultiShard(ctx, nil, rss0, queries, session, false, false, nullResultsObserver{})
session.SetCommitOrder(vtgatepb.CommitOrder_PRE)
@@ -646,7 +648,7 @@ func TestTxConnCommitOrderFailure2(t *testing.T) {
}}
// Sequence the executes to ensure commit order
- session := NewSafeSession(&vtgatepb.Session{InTransaction: true})
+ session := econtext.NewSafeSession(&vtgatepb.Session{InTransaction: true})
sc.ExecuteMultiShard(context.Background(), nil, rss1, queries, session, false, false, nullResultsObserver{})
session.SetCommitOrder(vtgatepb.CommitOrder_PRE)
@@ -680,7 +682,7 @@ func TestTxConnCommitOrderFailure3(t *testing.T) {
}}
// Sequence the executes to ensure commit order
- session := NewSafeSession(&vtgatepb.Session{InTransaction: true})
+ session := econtext.NewSafeSession(&vtgatepb.Session{InTransaction: true})
sc.ExecuteMultiShard(ctx, nil, rss0, queries, session, false, false, nullResultsObserver{})
session.SetCommitOrder(vtgatepb.CommitOrder_PRE)
@@ -722,7 +724,7 @@ func TestTxConnCommitOrderSuccess(t *testing.T) {
}}
// Sequence the executes to ensure commit order
- session := NewSafeSession(&vtgatepb.Session{InTransaction: true})
+ session := econtext.NewSafeSession(&vtgatepb.Session{InTransaction: true})
sc.ExecuteMultiShard(ctx, nil, rss0, queries, session, false, false, nullResultsObserver{})
wantSession := vtgatepb.Session{
InTransaction: true,
@@ -820,7 +822,7 @@ func TestTxConnReservedCommitOrderSuccess(t *testing.T) {
}}
// Sequence the executes to ensure commit order
- session := NewSafeSession(&vtgatepb.Session{InTransaction: true, InReservedConn: true})
+ session := econtext.NewSafeSession(&vtgatepb.Session{InTransaction: true, InReservedConn: true})
sc.ExecuteMultiShard(ctx, nil, rss0, queries, session, false, false, nullResultsObserver{})
wantSession := vtgatepb.Session{
InTransaction: true,
@@ -957,7 +959,7 @@ func TestTxConnCommit2PC(t *testing.T) {
sc, sbc0, sbc1, rss0, _, rss01 := newTestTxConnEnv(t, ctx, "TestTxConnCommit2PC")
- session := NewSafeSession(&vtgatepb.Session{InTransaction: true})
+ session := econtext.NewSafeSession(&vtgatepb.Session{InTransaction: true})
sc.ExecuteMultiShard(ctx, nil, rss0, queries, session, false, false, nullResultsObserver{})
sc.ExecuteMultiShard(ctx, nil, rss01, twoQueries, session, false, false, nullResultsObserver{})
session.TransactionMode = vtgatepb.TransactionMode_TWOPC
@@ -974,7 +976,7 @@ func TestTxConnCommit2PCOneParticipant(t *testing.T) {
ctx := utils.LeakCheckContext(t)
sc, sbc0, _, rss0, _, _ := newTestTxConnEnv(t, ctx, "TestTxConnCommit2PCOneParticipant")
- session := NewSafeSession(&vtgatepb.Session{InTransaction: true})
+ session := econtext.NewSafeSession(&vtgatepb.Session{InTransaction: true})
sc.ExecuteMultiShard(ctx, nil, rss0, queries, session, false, false, nullResultsObserver{})
session.TransactionMode = vtgatepb.TransactionMode_TWOPC
require.NoError(t,
@@ -987,7 +989,7 @@ func TestTxConnCommit2PCCreateTransactionFail(t *testing.T) {
sc, sbc0, sbc1, rss0, rss1, _ := newTestTxConnEnv(t, ctx, "TestTxConnCommit2PCCreateTransactionFail")
- session := NewSafeSession(&vtgatepb.Session{InTransaction: true})
+ session := econtext.NewSafeSession(&vtgatepb.Session{InTransaction: true})
sc.ExecuteMultiShard(ctx, nil, rss0, queries, session, false, false, nullResultsObserver{})
sc.ExecuteMultiShard(ctx, nil, rss1, queries, session, false, false, nullResultsObserver{})
@@ -1009,7 +1011,7 @@ func TestTxConnCommit2PCPrepareFail(t *testing.T) {
sc, sbc0, sbc1, rss0, _, rss01 := newTestTxConnEnv(t, ctx, "TestTxConnCommit2PCPrepareFail")
- session := NewSafeSession(&vtgatepb.Session{InTransaction: true})
+ session := econtext.NewSafeSession(&vtgatepb.Session{InTransaction: true})
sc.ExecuteMultiShard(ctx, nil, rss0, queries, session, false, false, nullResultsObserver{})
sc.ExecuteMultiShard(ctx, nil, rss01, twoQueries, session, false, false, nullResultsObserver{})
@@ -1035,7 +1037,7 @@ func TestTxConnCommit2PCStartCommitFail(t *testing.T) {
sc, sbc0, sbc1, rss0, _, rss01 := newTestTxConnEnv(t, ctx, "TestTxConnCommit2PCStartCommitFail")
- session := NewSafeSession(&vtgatepb.Session{InTransaction: true})
+ session := econtext.NewSafeSession(&vtgatepb.Session{InTransaction: true})
sc.ExecuteMultiShard(ctx, nil, rss0, queries, session, false, false, nullResultsObserver{})
sc.ExecuteMultiShard(ctx, nil, rss01, twoQueries, session, false, false, nullResultsObserver{})
@@ -1054,7 +1056,7 @@ func TestTxConnCommit2PCStartCommitFail(t *testing.T) {
sbc0.ResetCounter()
sbc1.ResetCounter()
- session = NewSafeSession(&vtgatepb.Session{InTransaction: true})
+ session = econtext.NewSafeSession(&vtgatepb.Session{InTransaction: true})
sc.ExecuteMultiShard(ctx, nil, rss0, queries, session, false, false, nullResultsObserver{})
sc.ExecuteMultiShard(ctx, nil, rss01, twoQueries, session, false, false, nullResultsObserver{})
@@ -1077,7 +1079,7 @@ func TestTxConnCommit2PCCommitPreparedFail(t *testing.T) {
sc, sbc0, sbc1, rss0, _, rss01 := newTestTxConnEnv(t, ctx, "TestTxConnCommit2PCCommitPreparedFail")
- session := NewSafeSession(&vtgatepb.Session{InTransaction: true})
+ session := econtext.NewSafeSession(&vtgatepb.Session{InTransaction: true})
sc.ExecuteMultiShard(ctx, nil, rss0, queries, session, false, false, nullResultsObserver{})
sc.ExecuteMultiShard(ctx, nil, rss01, twoQueries, session, false, false, nullResultsObserver{})
@@ -1097,7 +1099,7 @@ func TestTxConnCommit2PCConcludeTransactionFail(t *testing.T) {
sc, sbc0, sbc1, rss0, _, rss01 := newTestTxConnEnv(t, ctx, "TestTxConnCommit2PCConcludeTransactionFail")
- session := NewSafeSession(&vtgatepb.Session{InTransaction: true})
+ session := econtext.NewSafeSession(&vtgatepb.Session{InTransaction: true})
sc.ExecuteMultiShard(ctx, nil, rss0, queries, session, false, false, nullResultsObserver{})
sc.ExecuteMultiShard(ctx, nil, rss01, twoQueries, session, false, false, nullResultsObserver{})
@@ -1117,7 +1119,7 @@ func TestTxConnRollback(t *testing.T) {
sc, sbc0, sbc1, rss0, _, rss01 := newTestTxConnEnv(t, ctx, "TxConnRollback")
- session := NewSafeSession(&vtgatepb.Session{InTransaction: true})
+ session := econtext.NewSafeSession(&vtgatepb.Session{InTransaction: true})
sc.ExecuteMultiShard(ctx, nil, rss0, queries, session, false, false, nullResultsObserver{})
sc.ExecuteMultiShard(ctx, nil, rss01, twoQueries, session, false, false, nullResultsObserver{})
require.NoError(t,
@@ -1133,7 +1135,7 @@ func TestTxConnReservedRollback(t *testing.T) {
sc, sbc0, sbc1, rss0, _, rss01 := newTestTxConnEnv(t, ctx, "TxConnReservedRollback")
- session := NewSafeSession(&vtgatepb.Session{InTransaction: true, InReservedConn: true})
+ session := econtext.NewSafeSession(&vtgatepb.Session{InTransaction: true, InReservedConn: true})
sc.ExecuteMultiShard(ctx, nil, rss0, queries, session, false, false, nullResultsObserver{})
sc.ExecuteMultiShard(ctx, nil, rss01, twoQueries, session, false, false, nullResultsObserver{})
require.NoError(t,
@@ -1170,7 +1172,7 @@ func TestTxConnReservedRollbackFailure(t *testing.T) {
sc, sbc0, sbc1, rss0, rss1, rss01 := newTestTxConnEnv(t, ctx, "TxConnReservedRollback")
- session := NewSafeSession(&vtgatepb.Session{InTransaction: true, InReservedConn: true})
+ session := econtext.NewSafeSession(&vtgatepb.Session{InTransaction: true, InReservedConn: true})
sc.ExecuteMultiShard(ctx, nil, rss0, queries, session, false, false, nullResultsObserver{})
sc.ExecuteMultiShard(ctx, nil, rss01, twoQueries, session, false, false, nullResultsObserver{})
@@ -1449,7 +1451,7 @@ func TestTxConnMultiGoSessions(t *testing.T) {
Keyspace: "0",
},
}}
- err := txc.runSessions(ctx, input, nil, func(ctx context.Context, s *vtgatepb.Session_ShardSession, logger *executeLogger) error {
+ err := txc.runSessions(ctx, input, nil, func(ctx context.Context, s *vtgatepb.Session_ShardSession, logger *econtext.ExecuteLogger) error {
return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "err %s", s.Target.Keyspace)
})
want := "err 0"
@@ -1464,7 +1466,7 @@ func TestTxConnMultiGoSessions(t *testing.T) {
Keyspace: "1",
},
}}
- err = txc.runSessions(ctx, input, nil, func(ctx context.Context, s *vtgatepb.Session_ShardSession, logger *executeLogger) error {
+ err = txc.runSessions(ctx, input, nil, func(ctx context.Context, s *vtgatepb.Session_ShardSession, logger *econtext.ExecuteLogger) error {
return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "err %s", s.Target.Keyspace)
})
want = "err 0\nerr 1"
@@ -1472,7 +1474,7 @@ func TestTxConnMultiGoSessions(t *testing.T) {
wantCode := vtrpcpb.Code_INTERNAL
assert.Equal(t, wantCode, vterrors.Code(err), "error code")
- err = txc.runSessions(ctx, input, nil, func(ctx context.Context, s *vtgatepb.Session_ShardSession, logger *executeLogger) error {
+ err = txc.runSessions(ctx, input, nil, func(ctx context.Context, s *vtgatepb.Session_ShardSession, logger *econtext.ExecuteLogger) error {
return nil
})
require.NoError(t, err)
@@ -1515,7 +1517,7 @@ func TestTxConnAccessModeReset(t *testing.T) {
tcases := []struct {
name string
- f func(ctx context.Context, session *SafeSession) error
+ f func(ctx context.Context, session *econtext.SafeSession) error
}{{
name: "begin-commit",
f: sc.txConn.Commit,
@@ -1532,7 +1534,7 @@ func TestTxConnAccessModeReset(t *testing.T) {
for _, tcase := range tcases {
t.Run(tcase.name, func(t *testing.T) {
- safeSession := NewSafeSession(&vtgatepb.Session{
+ safeSession := econtext.NewSafeSession(&vtgatepb.Session{
Options: &querypb.ExecuteOptions{
TransactionAccessMode: []querypb.ExecuteOptions_TransactionAccessMode{querypb.ExecuteOptions_READ_ONLY},
},
diff --git a/go/vt/vtgate/vindexes/cached_size.go b/go/vt/vtgate/vindexes/cached_size.go
index a97411a6ac8..eeadb69b532 100644
--- a/go/vt/vtgate/vindexes/cached_size.go
+++ b/go/vt/vtgate/vindexes/cached_size.go
@@ -175,6 +175,18 @@ func (cached *Keyspace) CachedSize(alloc bool) int64 {
size += hack.RuntimeAllocSize(int64(len(cached.Name)))
return size
}
+func (cached *LookupCost) CachedSize(alloc bool) int64 {
+ if cached == nil {
+ return int64(0)
+ }
+ size := int64(0)
+ if alloc {
+ size += int64(16)
+ }
+ // field LookupNonUnique *vitess.io/vitess/go/vt/vtgate/vindexes.LookupNonUnique
+ size += cached.LookupNonUnique.CachedSize(true)
+ return size
+}
func (cached *LookupHash) CachedSize(alloc bool) int64 {
if cached == nil {
return int64(0)
diff --git a/go/vt/vtgate/vindexes/lookup_cost.go b/go/vt/vtgate/vindexes/lookup_cost.go
new file mode 100644
index 00000000000..6556032cea5
--- /dev/null
+++ b/go/vt/vtgate/vindexes/lookup_cost.go
@@ -0,0 +1,70 @@
+/*
+Copyright 2024 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package vindexes
+
+import (
+ "strconv"
+)
+
+var (
+ _ SingleColumn = (*LookupCost)(nil)
+ _ Lookup = (*LookupCost)(nil)
+ _ LookupPlanable = (*LookupCost)(nil)
+)
+
+func init() {
+ Register("lookup_cost", newLookupCost)
+}
+
+const defaultCost = 5
+
+// LookupCost defines a test vindex that uses the cost provided by the user.
+// This is a test vindex.
+type LookupCost struct {
+ *LookupNonUnique
+ cost int
+}
+
+// Cost returns the cost of this vindex as provided.
+func (lc *LookupCost) Cost() int {
+ return lc.cost
+}
+
+func newLookupCost(name string, m map[string]string) (Vindex, error) {
+ lookup, err := newLookup(name, m)
+ if err != nil {
+ return nil, err
+ }
+ cost := getInt(m, "cost", defaultCost)
+ return &LookupCost{
+ LookupNonUnique: lookup.(*LookupNonUnique),
+ cost: cost,
+ }, nil
+
+}
+
+func getInt(m map[string]string, key string, defaultVal int) int {
+ val, ok := m[key]
+ if !ok {
+ return defaultVal
+ }
+ intVal, err := strconv.Atoi(val)
+ if err != nil {
+ return defaultVal
+ }
+ return intVal
+}
diff --git a/go/vt/vtgate/vindexes/vschema_test.go b/go/vt/vtgate/vindexes/vschema_test.go
index 25f8e135698..f9bcf43ddaa 100644
--- a/go/vt/vtgate/vindexes/vschema_test.go
+++ b/go/vt/vtgate/vindexes/vschema_test.go
@@ -21,6 +21,7 @@ import (
"encoding/json"
"errors"
"fmt"
+ "os"
"reflect"
"strings"
"testing"
@@ -3551,6 +3552,20 @@ func TestFindTableWithSequences(t *testing.T) {
}
}
+func TestGlobalTables(t *testing.T) {
+ input, err := os.ReadFile("../planbuilder/testdata/vschemas/schema.json")
+ require.NoError(t, err)
+
+ var vs vschemapb.SrvVSchema
+ err = json2.UnmarshalPB(input, &vs)
+ require.NoError(t, err)
+
+ got := BuildVSchema(&vs, sqlparser.NewTestParser())
+ tbl, err := got.findGlobalTable("user", false)
+ require.NoError(t, err)
+ assert.NotNil(t, tbl)
+}
+
func vindexNames(vindexes []*ColumnVindex) (result []string) {
for _, vindex := range vindexes {
result = append(result, vindex.Name)
diff --git a/go/vt/vtgate/viperconfig.go b/go/vt/vtgate/viperconfig.go
new file mode 100644
index 00000000000..ec77ff62d4f
--- /dev/null
+++ b/go/vt/vtgate/viperconfig.go
@@ -0,0 +1,16 @@
+package vtgate
+
+import "vitess.io/vitess/go/viperutil"
+
+type dynamicViperConfig struct {
+ onlineDDL viperutil.Value[bool]
+ directDDL viperutil.Value[bool]
+}
+
+func (d *dynamicViperConfig) OnlineEnabled() bool {
+ return d.onlineDDL.Get()
+}
+
+func (d *dynamicViperConfig) DirectEnabled() bool {
+ return d.directDDL.Get()
+}
diff --git a/go/vt/vtgate/vschema_manager.go b/go/vt/vtgate/vschema_manager.go
index 2b6761f4a8e..62ea2cd3455 100644
--- a/go/vt/vtgate/vschema_manager.go
+++ b/go/vt/vtgate/vschema_manager.go
@@ -33,8 +33,6 @@ import (
vschemapb "vitess.io/vitess/go/vt/proto/vschema"
)
-var _ VSchemaOperator = (*VSchemaManager)(nil)
-
// VSchemaManager is used to watch for updates to the vschema and to implement
// the DDL commands to add / remove vindexes
type VSchemaManager struct {
diff --git a/go/vt/vtgate/vschemaacl/vschemaacl.go b/go/vt/vtgate/vschemaacl/vschemaacl.go
index 5345d1437fc..08f6c2b0cd4 100644
--- a/go/vt/vtgate/vschemaacl/vschemaacl.go
+++ b/go/vt/vtgate/vschemaacl/vschemaacl.go
@@ -18,26 +18,67 @@ package vschemaacl
import (
"strings"
- "sync"
"github.com/spf13/pflag"
+ "github.com/spf13/viper"
- "vitess.io/vitess/go/vt/servenv"
-
+ "vitess.io/vitess/go/viperutil"
querypb "vitess.io/vitess/go/vt/proto/query"
+ "vitess.io/vitess/go/vt/servenv"
)
-var (
- // AuthorizedDDLUsers specifies the users that can perform ddl operations
- AuthorizedDDLUsers string
-
- // ddlAllowAll is true if the special value of "*" was specified
+type authorizedDDLUsers struct {
allowAll bool
+ acl map[string]struct{}
+ source string
+}
+
+func NewAuthorizedDDLUsers(users string) *authorizedDDLUsers {
+ acl := make(map[string]struct{})
+ allowAll := false
+
+ switch users {
+ case "":
+ case "%":
+ allowAll = true
+ default:
+ for _, user := range strings.Split(users, ",") {
+ user = strings.TrimSpace(user)
+ acl[user] = struct{}{}
+ }
+ }
+
+ return &authorizedDDLUsers{
+ allowAll: allowAll,
+ acl: acl,
+ source: users,
+ }
+}
- // ddlACL contains a set of allowed usernames
- acl map[string]struct{}
+func (a *authorizedDDLUsers) String() string {
+ return a.source
+}
- initMu sync.Mutex
+var (
+ // AuthorizedDDLUsers specifies the users that can perform ddl operations
+ AuthorizedDDLUsers = viperutil.Configure(
+ "vschema_ddl_authorized_users",
+ viperutil.Options[*authorizedDDLUsers]{
+ FlagName: "vschema_ddl_authorized_users",
+ Default: &authorizedDDLUsers{},
+ Dynamic: true,
+ GetFunc: func(v *viper.Viper) func(key string) *authorizedDDLUsers {
+ return func(key string) *authorizedDDLUsers {
+ newVal := v.GetString(key)
+ curVal, ok := v.Get(key).(*authorizedDDLUsers)
+ if ok && newVal == curVal.source {
+ return curVal
+ }
+ return NewAuthorizedDDLUsers(newVal)
+ }
+ },
+ },
+ )
)
// RegisterSchemaACLFlags installs log flags on the given FlagSet.
@@ -46,7 +87,8 @@ var (
// calls this function, or call this function directly before parsing
// command-line arguments.
func RegisterSchemaACLFlags(fs *pflag.FlagSet) {
- fs.StringVar(&AuthorizedDDLUsers, "vschema_ddl_authorized_users", AuthorizedDDLUsers, "List of users authorized to execute vschema ddl operations, or '%' to allow all users.")
+ fs.String("vschema_ddl_authorized_users", "", "List of users authorized to execute vschema ddl operations, or '%' to allow all users.")
+ viperutil.BindFlags(fs, AuthorizedDDLUsers)
}
func init() {
@@ -55,33 +97,14 @@ func init() {
}
}
-// Init parses the users option and sets allowAll / acl accordingly
-func Init() {
- initMu.Lock()
- defer initMu.Unlock()
- acl = make(map[string]struct{})
- allowAll = false
-
- if AuthorizedDDLUsers == "%" {
- allowAll = true
- return
- } else if AuthorizedDDLUsers == "" {
- return
- }
-
- for _, user := range strings.Split(AuthorizedDDLUsers, ",") {
- user = strings.TrimSpace(user)
- acl[user] = struct{}{}
- }
-}
-
// Authorized returns true if the given caller is allowed to execute vschema operations
func Authorized(caller *querypb.VTGateCallerID) bool {
- if allowAll {
+ users := AuthorizedDDLUsers.Get()
+ if users.allowAll {
return true
}
user := caller.GetUsername()
- _, ok := acl[user]
+ _, ok := users.acl[user]
return ok
}
diff --git a/go/vt/vtgate/vschemaacl/vschemaacl_test.go b/go/vt/vtgate/vschemaacl/vschemaacl_test.go
index faa2dbfc294..cfd1de705af 100644
--- a/go/vt/vtgate/vschemaacl/vschemaacl_test.go
+++ b/go/vt/vtgate/vschemaacl/vschemaacl_test.go
@@ -35,8 +35,7 @@ func TestVschemaAcl(t *testing.T) {
}
// Test wildcard
- AuthorizedDDLUsers = "%"
- Init()
+ AuthorizedDDLUsers.Set(NewAuthorizedDDLUsers("%"))
if !Authorized(&redUser) {
t.Errorf("user should be authorized")
@@ -46,8 +45,7 @@ func TestVschemaAcl(t *testing.T) {
}
// Test user list
- AuthorizedDDLUsers = "oneUser, twoUser, redUser, blueUser"
- Init()
+ AuthorizedDDLUsers.Set(NewAuthorizedDDLUsers("oneUser, twoUser, redUser, blueUser"))
if !Authorized(&redUser) {
t.Errorf("user should be authorized")
@@ -57,8 +55,7 @@ func TestVschemaAcl(t *testing.T) {
}
// Revert to baseline state for other tests
- AuthorizedDDLUsers = ""
- Init()
+ AuthorizedDDLUsers.Set(NewAuthorizedDDLUsers(""))
// By default no users are allowed in
if Authorized(&redUser) {
diff --git a/go/vt/vtgate/vtgate.go b/go/vt/vtgate/vtgate.go
index e9e7cd65011..8bab05479dd 100644
--- a/go/vt/vtgate/vtgate.go
+++ b/go/vt/vtgate/vtgate.go
@@ -34,6 +34,7 @@ import (
"vitess.io/vitess/go/sqltypes"
"vitess.io/vitess/go/stats"
"vitess.io/vitess/go/tb"
+ "vitess.io/vitess/go/viperutil"
"vitess.io/vitess/go/vt/discovery"
"vitess.io/vitess/go/vt/key"
"vitess.io/vitess/go/vt/log"
@@ -51,6 +52,7 @@ import (
"vitess.io/vitess/go/vt/topo/topoproto"
"vitess.io/vitess/go/vt/vtenv"
"vitess.io/vitess/go/vt/vterrors"
+ econtext "vitess.io/vitess/go/vt/vtgate/executorcontext"
"vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
vtschema "vitess.io/vitess/go/vt/vtgate/schema"
"vitess.io/vitess/go/vt/vtgate/txresolver"
@@ -93,8 +95,24 @@ var (
foreignKeyMode = "allow"
dbDDLPlugin = "fail"
defaultDDLStrategy = string(schema.DDLStrategyDirect)
- enableOnlineDDL = true
- enableDirectDDL = true
+
+ enableOnlineDDL = viperutil.Configure(
+ "enable_online_ddl",
+ viperutil.Options[bool]{
+ FlagName: "enable_online_ddl",
+ Default: true,
+ Dynamic: true,
+ },
+ )
+
+ enableDirectDDL = viperutil.Configure(
+ "enable_direct_ddl",
+ viperutil.Options[bool]{
+ FlagName: "enable_direct_ddl",
+ Default: true,
+ Dynamic: true,
+ },
+ )
// schema tracking flags
enableSchemaChangeSignal = true
@@ -141,8 +159,8 @@ func registerFlags(fs *pflag.FlagSet) {
fs.DurationVar(&lockHeartbeatTime, "lock_heartbeat_time", lockHeartbeatTime, "If there is lock function used. This will keep the lock connection active by using this heartbeat")
fs.BoolVar(&warnShardedOnly, "warn_sharded_only", warnShardedOnly, "If any features that are only available in unsharded mode are used, query execution warnings will be added to the session")
fs.StringVar(&foreignKeyMode, "foreign_key_mode", foreignKeyMode, "This is to provide how to handle foreign key constraint in create/alter table. Valid values are: allow, disallow")
- fs.BoolVar(&enableOnlineDDL, "enable_online_ddl", enableOnlineDDL, "Allow users to submit, review and control Online DDL")
- fs.BoolVar(&enableDirectDDL, "enable_direct_ddl", enableDirectDDL, "Allow users to submit direct DDL statements")
+ fs.Bool("enable_online_ddl", enableOnlineDDL.Default(), "Allow users to submit, review and control Online DDL")
+ fs.Bool("enable_direct_ddl", enableDirectDDL.Default(), "Allow users to submit direct DDL statements")
fs.BoolVar(&enableSchemaChangeSignal, "schema_change_signal", enableSchemaChangeSignal, "Enable the schema tracker; requires queryserver-config-schema-change-signal to be enabled on the underlying vttablets for this to work")
fs.IntVar(&queryTimeout, "query-timeout", queryTimeout, "Sets the default query timeout (in ms). Can be overridden by session variable (query_timeout) or comment directive (QUERY_TIMEOUT_MS)")
fs.StringVar(&queryLogToFile, "log_queries_to_file", queryLogToFile, "Enable query logging to the specified file")
@@ -154,6 +172,8 @@ func registerFlags(fs *pflag.FlagSet) {
fs.IntVar(&warmingReadsPercent, "warming-reads-percent", 0, "Percentage of reads on the primary to forward to replicas. Useful for keeping buffer pools warm")
fs.IntVar(&warmingReadsConcurrency, "warming-reads-concurrency", 500, "Number of concurrent warming reads allowed")
fs.DurationVar(&warmingReadsQueryTimeout, "warming-reads-query-timeout", 5*time.Second, "Timeout of warming read queries")
+
+ viperutil.BindFlags(fs, enableOnlineDDL, enableDirectDDL)
}
func init() {
@@ -469,7 +489,7 @@ func (vtg *VTGate) Execute(ctx context.Context, mysqlCtx vtgateservice.MySQLConn
if bvErr := sqltypes.ValidateBindVariables(bindVariables); bvErr != nil {
err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%v", bvErr)
} else {
- safeSession := NewSafeSession(session)
+ safeSession := econtext.NewSafeSession(session)
qr, err = vtg.executor.Execute(ctx, mysqlCtx, "Execute", safeSession, sql, bindVariables)
safeSession.RemoveInternalSavepoint()
}
@@ -526,7 +546,7 @@ func (vtg *VTGate) StreamExecute(ctx context.Context, mysqlCtx vtgateservice.MyS
defer vtg.timings.Record(statsKey, time.Now())
- safeSession := NewSafeSession(session)
+ safeSession := econtext.NewSafeSession(session)
var err error
if bvErr := sqltypes.ValidateBindVariables(bindVariables); bvErr != nil {
err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%v", bvErr)
@@ -560,7 +580,7 @@ func (vtg *VTGate) StreamExecute(ctx context.Context, mysqlCtx vtgateservice.MyS
// same effect as if a "rollback" statement was executed, but does not affect the query
// statistics.
func (vtg *VTGate) CloseSession(ctx context.Context, session *vtgatepb.Session) error {
- return vtg.executor.CloseSession(ctx, NewSafeSession(session))
+ return vtg.executor.CloseSession(ctx, econtext.NewSafeSession(session))
}
// Prepare supports non-streaming prepare statement query with multi shards
@@ -575,7 +595,7 @@ func (vtg *VTGate) Prepare(ctx context.Context, session *vtgatepb.Session, sql s
goto handleError
}
- fld, err = vtg.executor.Prepare(ctx, "Prepare", NewSafeSession(session), sql, bindVariables)
+ fld, err = vtg.executor.Prepare(ctx, "Prepare", econtext.NewSafeSession(session), sql, bindVariables)
if err == nil {
return session, fld, nil
}
diff --git a/go/vt/vtorc/config/config.go b/go/vt/vtorc/config/config.go
index 2d21e377cb6..cafff5acce8 100644
--- a/go/vt/vtorc/config/config.go
+++ b/go/vt/vtorc/config/config.go
@@ -17,14 +17,12 @@
package config
import (
- "encoding/json"
- "fmt"
- "os"
"time"
"github.com/spf13/pflag"
- "vitess.io/vitess/go/vt/log"
+ "vitess.io/vitess/go/viperutil"
+ "vitess.io/vitess/go/vt/servenv"
)
var configurationLoaded = make(chan bool)
@@ -42,200 +40,296 @@ const (
)
var (
- sqliteDataFile = "file::memory:?mode=memory&cache=shared"
- instancePollTime = 5 * time.Second
- snapshotTopologyInterval = 0 * time.Hour
- reasonableReplicationLag = 10 * time.Second
- auditFileLocation = ""
- auditToBackend = false
- auditToSyslog = false
- auditPurgeDuration = 7 * 24 * time.Hour // Equivalent of 7 days
- recoveryPeriodBlockDuration = 30 * time.Second
- preventCrossCellFailover = false
- waitReplicasTimeout = 30 * time.Second
- tolerableReplicationLag = 0 * time.Second
- topoInformationRefreshDuration = 15 * time.Second
- recoveryPollDuration = 1 * time.Second
- ersEnabled = true
- convertTabletsWithErrantGTIDs = false
+ instancePollTime = viperutil.Configure(
+ "instance-poll-time",
+ viperutil.Options[time.Duration]{
+ FlagName: "instance-poll-time",
+ Default: 5 * time.Second,
+ Dynamic: true,
+ },
+ )
+
+ preventCrossCellFailover = viperutil.Configure(
+ "prevent-cross-cell-failover",
+ viperutil.Options[bool]{
+ FlagName: "prevent-cross-cell-failover",
+ Default: false,
+ Dynamic: true,
+ },
+ )
+
+ sqliteDataFile = viperutil.Configure(
+ "sqlite-data-file",
+ viperutil.Options[string]{
+ FlagName: "sqlite-data-file",
+ Default: "file::memory:?mode=memory&cache=shared",
+ Dynamic: false,
+ },
+ )
+
+ snapshotTopologyInterval = viperutil.Configure(
+ "snapshot-topology-interval",
+ viperutil.Options[time.Duration]{
+ FlagName: "snapshot-topology-interval",
+ Default: 0 * time.Hour,
+ Dynamic: true,
+ },
+ )
+
+ reasonableReplicationLag = viperutil.Configure(
+ "reasonable-replication-lag",
+ viperutil.Options[time.Duration]{
+ FlagName: "reasonable-replication-lag",
+ Default: 10 * time.Second,
+ Dynamic: true,
+ },
+ )
+
+ auditFileLocation = viperutil.Configure(
+ "audit-file-location",
+ viperutil.Options[string]{
+ FlagName: "audit-file-location",
+ Default: "",
+ Dynamic: false,
+ },
+ )
+
+ auditToBackend = viperutil.Configure(
+ "audit-to-backend",
+ viperutil.Options[bool]{
+ FlagName: "audit-to-backend",
+ Default: false,
+ Dynamic: true,
+ },
+ )
+
+ auditToSyslog = viperutil.Configure(
+ "audit-to-syslog",
+ viperutil.Options[bool]{
+ FlagName: "audit-to-syslog",
+ Default: false,
+ Dynamic: true,
+ },
+ )
+
+ auditPurgeDuration = viperutil.Configure(
+ "audit-purge-duration",
+ viperutil.Options[time.Duration]{
+ FlagName: "audit-purge-duration",
+ Default: 7 * 24 * time.Hour,
+ Dynamic: true,
+ },
+ )
+
+ waitReplicasTimeout = viperutil.Configure(
+ "wait-replicas-timeout",
+ viperutil.Options[time.Duration]{
+ FlagName: "wait-replicas-timeout",
+ Default: 30 * time.Second,
+ Dynamic: true,
+ },
+ )
+
+ tolerableReplicationLag = viperutil.Configure(
+ "tolerable-replication-lag",
+ viperutil.Options[time.Duration]{
+ FlagName: "tolerable-replication-lag",
+ Default: 0 * time.Second,
+ Dynamic: true,
+ },
+ )
+
+ topoInformationRefreshDuration = viperutil.Configure(
+ "topo-information-refresh-duration",
+ viperutil.Options[time.Duration]{
+ FlagName: "topo-information-refresh-duration",
+ Default: 15 * time.Second,
+ Dynamic: true,
+ },
+ )
+
+ recoveryPollDuration = viperutil.Configure(
+ "recovery-poll-duration",
+ viperutil.Options[time.Duration]{
+ FlagName: "recovery-poll-duration",
+ Default: 1 * time.Second,
+ Dynamic: true,
+ },
+ )
+
+ ersEnabled = viperutil.Configure(
+ "allow-emergency-reparent",
+ viperutil.Options[bool]{
+ FlagName: "allow-emergency-reparent",
+ Default: true,
+ Dynamic: true,
+ },
+ )
+
+ convertTabletsWithErrantGTIDs = viperutil.Configure(
+ "change-tablets-with-errant-gtid-to-drained",
+ viperutil.Options[bool]{
+ FlagName: "change-tablets-with-errant-gtid-to-drained",
+ Default: false,
+ Dynamic: true,
+ },
+ )
)
-// RegisterFlags registers the flags required by VTOrc
-func RegisterFlags(fs *pflag.FlagSet) {
- fs.StringVar(&sqliteDataFile, "sqlite-data-file", sqliteDataFile, "SQLite Datafile to use as VTOrc's database")
- fs.DurationVar(&instancePollTime, "instance-poll-time", instancePollTime, "Timer duration on which VTOrc refreshes MySQL information")
- fs.DurationVar(&snapshotTopologyInterval, "snapshot-topology-interval", snapshotTopologyInterval, "Timer duration on which VTOrc takes a snapshot of the current MySQL information it has in the database. Should be in multiple of hours")
- fs.DurationVar(&reasonableReplicationLag, "reasonable-replication-lag", reasonableReplicationLag, "Maximum replication lag on replicas which is deemed to be acceptable")
- fs.StringVar(&auditFileLocation, "audit-file-location", auditFileLocation, "File location where the audit logs are to be stored")
- fs.BoolVar(&auditToBackend, "audit-to-backend", auditToBackend, "Whether to store the audit log in the VTOrc database")
- fs.BoolVar(&auditToSyslog, "audit-to-syslog", auditToSyslog, "Whether to store the audit log in the syslog")
- fs.DurationVar(&auditPurgeDuration, "audit-purge-duration", auditPurgeDuration, "Duration for which audit logs are held before being purged. Should be in multiples of days")
- fs.DurationVar(&recoveryPeriodBlockDuration, "recovery-period-block-duration", recoveryPeriodBlockDuration, "Duration for which a new recovery is blocked on an instance after running a recovery")
- fs.MarkDeprecated("recovery-period-block-duration", "As of v20 this is ignored and will be removed in a future release.")
- fs.BoolVar(&preventCrossCellFailover, "prevent-cross-cell-failover", preventCrossCellFailover, "Prevent VTOrc from promoting a primary in a different cell than the current primary in case of a failover")
- fs.DurationVar(&waitReplicasTimeout, "wait-replicas-timeout", waitReplicasTimeout, "Duration for which to wait for replica's to respond when issuing RPCs")
- fs.DurationVar(&tolerableReplicationLag, "tolerable-replication-lag", tolerableReplicationLag, "Amount of replication lag that is considered acceptable for a tablet to be eligible for promotion when Vitess makes the choice of a new primary in PRS")
- fs.DurationVar(&topoInformationRefreshDuration, "topo-information-refresh-duration", topoInformationRefreshDuration, "Timer duration on which VTOrc refreshes the keyspace and vttablet records from the topology server")
- fs.DurationVar(&recoveryPollDuration, "recovery-poll-duration", recoveryPollDuration, "Timer duration on which VTOrc polls its database to run a recovery")
- fs.BoolVar(&ersEnabled, "allow-emergency-reparent", ersEnabled, "Whether VTOrc should be allowed to run emergency reparent operation when it detects a dead primary")
- fs.BoolVar(&convertTabletsWithErrantGTIDs, "change-tablets-with-errant-gtid-to-drained", convertTabletsWithErrantGTIDs, "Whether VTOrc should be changing the type of tablets with errant GTIDs to DRAINED")
+func init() {
+ servenv.OnParseFor("vtorc", registerFlags)
}
-// Configuration makes for vtorc configuration input, which can be provided by user via JSON formatted file.
-// Some of the parameters have reasonable default values, and some (like database credentials) are
-// strictly expected from user.
-// TODO(sougou): change this to yaml parsing, and possible merge with tabletenv.
-type Configuration struct {
- SQLite3DataFile string // full path to sqlite3 datafile
- InstancePollSeconds uint // Number of seconds between instance reads
- SnapshotTopologiesIntervalHours uint // Interval in hour between snapshot-topologies invocation. Default: 0 (disabled)
- ReasonableReplicationLagSeconds int // Above this value is considered a problem
- AuditLogFile string // Name of log file for audit operations. Disabled when empty.
- AuditToSyslog bool // If true, audit messages are written to syslog
- AuditToBackendDB bool // If true, audit messages are written to the backend DB's `audit` table (default: true)
- AuditPurgeDays uint // Days after which audit entries are purged from the database
- RecoveryPeriodBlockSeconds int // (overrides `RecoveryPeriodBlockMinutes`) The time for which an instance's recovery is kept "active", so as to avoid concurrent recoveries on smae instance as well as flapping
- PreventCrossDataCenterPrimaryFailover bool // When true (default: false), cross-DC primary failover are not allowed, vtorc will do all it can to only fail over within same DC, or else not fail over at all.
- WaitReplicasTimeoutSeconds int // Timeout on amount of time to wait for the replicas in case of ERS. Should be a small value because we should fail-fast. Should not be larger than LockTimeout since that is the total time we use for an ERS.
- TolerableReplicationLagSeconds int // Amount of replication lag that is considered acceptable for a tablet to be eligible for promotion when Vitess makes the choice of a new primary in PRS.
- TopoInformationRefreshSeconds int // Timer duration on which VTOrc refreshes the keyspace and vttablet records from the topo-server.
- RecoveryPollSeconds int // Timer duration on which VTOrc recovery analysis runs
+// registerFlags registers the flags required by VTOrc
+func registerFlags(fs *pflag.FlagSet) {
+ fs.String("sqlite-data-file", sqliteDataFile.Default(), "SQLite Datafile to use as VTOrc's database")
+ fs.Duration("instance-poll-time", instancePollTime.Default(), "Timer duration on which VTOrc refreshes MySQL information")
+ fs.Duration("snapshot-topology-interval", snapshotTopologyInterval.Default(), "Timer duration on which VTOrc takes a snapshot of the current MySQL information it has in the database. Should be in multiple of hours")
+ fs.Duration("reasonable-replication-lag", reasonableReplicationLag.Default(), "Maximum replication lag on replicas which is deemed to be acceptable")
+ fs.String("audit-file-location", auditFileLocation.Default(), "File location where the audit logs are to be stored")
+ fs.Bool("audit-to-backend", auditToBackend.Default(), "Whether to store the audit log in the VTOrc database")
+ fs.Bool("audit-to-syslog", auditToSyslog.Default(), "Whether to store the audit log in the syslog")
+ fs.Duration("audit-purge-duration", auditPurgeDuration.Default(), "Duration for which audit logs are held before being purged. Should be in multiples of days")
+ fs.Bool("prevent-cross-cell-failover", preventCrossCellFailover.Default(), "Prevent VTOrc from promoting a primary in a different cell than the current primary in case of a failover")
+ fs.Duration("wait-replicas-timeout", waitReplicasTimeout.Default(), "Duration for which to wait for replica's to respond when issuing RPCs")
+ fs.Duration("tolerable-replication-lag", tolerableReplicationLag.Default(), "Amount of replication lag that is considered acceptable for a tablet to be eligible for promotion when Vitess makes the choice of a new primary in PRS")
+ fs.Duration("topo-information-refresh-duration", topoInformationRefreshDuration.Default(), "Timer duration on which VTOrc refreshes the keyspace and vttablet records from the topology server")
+ fs.Duration("recovery-poll-duration", recoveryPollDuration.Default(), "Timer duration on which VTOrc polls its database to run a recovery")
+ fs.Bool("allow-emergency-reparent", ersEnabled.Default(), "Whether VTOrc should be allowed to run emergency reparent operation when it detects a dead primary")
+ fs.Bool("change-tablets-with-errant-gtid-to-drained", convertTabletsWithErrantGTIDs.Default(), "Whether VTOrc should be changing the type of tablets with errant GTIDs to DRAINED")
+
+ viperutil.BindFlags(fs,
+ instancePollTime,
+ preventCrossCellFailover,
+ sqliteDataFile,
+ snapshotTopologyInterval,
+ reasonableReplicationLag,
+ auditFileLocation,
+ auditToBackend,
+ auditToSyslog,
+ auditPurgeDuration,
+ waitReplicasTimeout,
+ tolerableReplicationLag,
+ topoInformationRefreshDuration,
+ recoveryPollDuration,
+ ersEnabled,
+ convertTabletsWithErrantGTIDs,
+ )
}
-// ToJSONString will marshal this configuration as JSON
-func (config *Configuration) ToJSONString() string {
- b, _ := json.Marshal(config)
- return string(b)
+// GetInstancePollTime is a getter function.
+func GetInstancePollTime() time.Duration {
+ return instancePollTime.Get()
}
-// Config is *the* configuration instance, used globally to get configuration data
-var Config = newConfiguration()
-var readFileNames []string
-
-// UpdateConfigValuesFromFlags is used to update the config values from the flags defined.
-// This is done before we read any configuration files from the user. So the config files take precedence.
-func UpdateConfigValuesFromFlags() {
- Config.SQLite3DataFile = sqliteDataFile
- Config.InstancePollSeconds = uint(instancePollTime / time.Second)
- Config.InstancePollSeconds = uint(instancePollTime / time.Second)
- Config.SnapshotTopologiesIntervalHours = uint(snapshotTopologyInterval / time.Hour)
- Config.ReasonableReplicationLagSeconds = int(reasonableReplicationLag / time.Second)
- Config.AuditLogFile = auditFileLocation
- Config.AuditToBackendDB = auditToBackend
- Config.AuditToSyslog = auditToSyslog
- Config.AuditPurgeDays = uint(auditPurgeDuration / (time.Hour * 24))
- Config.RecoveryPeriodBlockSeconds = int(recoveryPeriodBlockDuration / time.Second)
- Config.PreventCrossDataCenterPrimaryFailover = preventCrossCellFailover
- Config.WaitReplicasTimeoutSeconds = int(waitReplicasTimeout / time.Second)
- Config.TolerableReplicationLagSeconds = int(tolerableReplicationLag / time.Second)
- Config.TopoInformationRefreshSeconds = int(topoInformationRefreshDuration / time.Second)
- Config.RecoveryPollSeconds = int(recoveryPollDuration / time.Second)
+// SetInstancePollTime is a setter function.
+func SetInstancePollTime(v time.Duration) {
+ instancePollTime.Set(v)
}
-// ERSEnabled reports whether VTOrc is allowed to run ERS or not.
-func ERSEnabled() bool {
- return ersEnabled
+// GetInstancePollSeconds gets the instance poll time but in seconds.
+func GetInstancePollSeconds() uint {
+ return uint(instancePollTime.Get() / time.Second)
}
-// SetERSEnabled sets the value for the ersEnabled variable. This should only be used from tests.
-func SetERSEnabled(val bool) {
- ersEnabled = val
+// GetPreventCrossCellFailover is a getter function.
+func GetPreventCrossCellFailover() bool {
+ return preventCrossCellFailover.Get()
}
-// ConvertTabletWithErrantGTIDs reports whether VTOrc is allowed to change the tablet type of tablets with errant GTIDs to DRAINED.
-func ConvertTabletWithErrantGTIDs() bool {
- return convertTabletsWithErrantGTIDs
+// GetSQLiteDataFile is a getter function.
+func GetSQLiteDataFile() string {
+ return sqliteDataFile.Get()
}
-// SetConvertTabletWithErrantGTIDs sets the value for the convertTabletWithErrantGTIDs variable. This should only be used from tests.
-func SetConvertTabletWithErrantGTIDs(val bool) {
- convertTabletsWithErrantGTIDs = val
+// GetReasonableReplicationLagSeconds gets the reasonable replication lag but in seconds.
+func GetReasonableReplicationLagSeconds() int64 {
+ return int64(reasonableReplicationLag.Get() / time.Second)
+}
+
+// GetSnapshotTopologyInterval is a getter function.
+func GetSnapshotTopologyInterval() time.Duration {
+ return snapshotTopologyInterval.Get()
}
-// LogConfigValues is used to log the config values.
-func LogConfigValues() {
- b, _ := json.MarshalIndent(Config, "", "\t")
- log.Infof("Running with Configuration - %v", string(b))
+// GetAuditFileLocation is a getter function.
+func GetAuditFileLocation() string {
+ return auditFileLocation.Get()
}
-func newConfiguration() *Configuration {
- return &Configuration{
- SQLite3DataFile: "file::memory:?mode=memory&cache=shared",
- InstancePollSeconds: 5,
- SnapshotTopologiesIntervalHours: 0,
- ReasonableReplicationLagSeconds: 10,
- AuditLogFile: "",
- AuditToSyslog: false,
- AuditToBackendDB: false,
- AuditPurgeDays: 7,
- RecoveryPeriodBlockSeconds: 30,
- PreventCrossDataCenterPrimaryFailover: false,
- WaitReplicasTimeoutSeconds: 30,
- TopoInformationRefreshSeconds: 15,
- RecoveryPollSeconds: 1,
- }
+// SetAuditFileLocation is a setter function.
+func SetAuditFileLocation(v string) {
+ auditFileLocation.Set(v)
}
-func (config *Configuration) postReadAdjustments() error {
- if config.SQLite3DataFile == "" {
- return fmt.Errorf("SQLite3DataFile must be set")
- }
+// GetAuditToSyslog is a getter function.
+func GetAuditToSyslog() bool {
+ return auditToSyslog.Get()
+}
+
+// SetAuditToSyslog is a setter function.
+func SetAuditToSyslog(v bool) {
+ auditToSyslog.Set(v)
+}
+
+// GetAuditToBackend is a getter function.
+func GetAuditToBackend() bool {
+ return auditToBackend.Get()
+}
+
+// SetAuditToBackend is a setter function.
+func SetAuditToBackend(v bool) {
+ auditToBackend.Set(v)
+}
- return nil
+// GetAuditPurgeDays gets the audit purge duration but in days.
+func GetAuditPurgeDays() int64 {
+ return int64(auditPurgeDuration.Get() / (24 * time.Hour))
}
-// read reads configuration from given file, or silently skips if the file does not exist.
-// If the file does exist, then it is expected to be in valid JSON format or the function bails out.
-func read(fileName string) (*Configuration, error) {
- if fileName == "" {
- return Config, fmt.Errorf("Empty file name")
- }
- file, err := os.Open(fileName)
- if err != nil {
- return Config, err
- }
- decoder := json.NewDecoder(file)
- err = decoder.Decode(Config)
- if err == nil {
- log.Infof("Read config: %s", fileName)
- } else {
- log.Fatal("Cannot read config file:", fileName, err)
- }
- if err := Config.postReadAdjustments(); err != nil {
- log.Fatal(err)
- }
- return Config, err
+// SetAuditPurgeDays sets the audit purge duration.
+func SetAuditPurgeDays(days int64) {
+ auditPurgeDuration.Set(time.Duration(days) * 24 * time.Hour)
}
-// Read reads configuration from zero, either, some or all given files, in order of input.
-// A file can override configuration provided in previous file.
-func Read(fileNames ...string) *Configuration {
- for _, fileName := range fileNames {
- _, _ = read(fileName)
- }
- readFileNames = fileNames
- return Config
+// GetWaitReplicasTimeout is a getter function.
+func GetWaitReplicasTimeout() time.Duration {
+ return waitReplicasTimeout.Get()
}
-// ForceRead reads configuration from given file name or bails out if it fails
-func ForceRead(fileName string) *Configuration {
- _, err := read(fileName)
- if err != nil {
- log.Fatal("Cannot read config file:", fileName, err)
- }
- readFileNames = []string{fileName}
- return Config
+// GetTolerableReplicationLag is a getter function.
+func GetTolerableReplicationLag() time.Duration {
+ return tolerableReplicationLag.Get()
}
-// Reload re-reads configuration from last used files
-func Reload(extraFileNames ...string) *Configuration {
- for _, fileName := range readFileNames {
- _, _ = read(fileName)
- }
- for _, fileName := range extraFileNames {
- _, _ = read(fileName)
- }
- return Config
+// GetTopoInformationRefreshDuration is a getter function.
+func GetTopoInformationRefreshDuration() time.Duration {
+ return topoInformationRefreshDuration.Get()
+}
+
+// GetRecoveryPollDuration is a getter function.
+func GetRecoveryPollDuration() time.Duration {
+ return recoveryPollDuration.Get()
+}
+
+// ERSEnabled reports whether VTOrc is allowed to run ERS or not.
+func ERSEnabled() bool {
+ return ersEnabled.Get()
+}
+
+// SetERSEnabled sets the value for the ersEnabled variable. This should only be used from tests.
+func SetERSEnabled(val bool) {
+ ersEnabled.Set(val)
+}
+
+// ConvertTabletWithErrantGTIDs reports whether VTOrc is allowed to change the tablet type of tablets with errant GTIDs to DRAINED.
+func ConvertTabletWithErrantGTIDs() bool {
+ return convertTabletsWithErrantGTIDs.Get()
+}
+
+// SetConvertTabletWithErrantGTIDs sets the value for the convertTabletWithErrantGTIDs variable. This should only be used from tests.
+func SetConvertTabletWithErrantGTIDs(val bool) {
+ convertTabletsWithErrantGTIDs.Set(val)
}
// MarkConfigurationLoaded is called once configuration has first been loaded.
diff --git a/go/vt/vtorc/config/config_test.go b/go/vt/vtorc/config/config_test.go
deleted file mode 100644
index 2009b476f1d..00000000000
--- a/go/vt/vtorc/config/config_test.go
+++ /dev/null
@@ -1,234 +0,0 @@
-/*
-Copyright 2022 The Vitess Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package config
-
-import (
- "testing"
- "time"
-
- "github.com/stretchr/testify/require"
-)
-
-func TestUpdateConfigValuesFromFlags(t *testing.T) {
- t.Run("defaults", func(t *testing.T) {
- // Restore the changes we make to the Config parameter
- defer func() {
- Config = newConfiguration()
- }()
- defaultConfig := newConfiguration()
- UpdateConfigValuesFromFlags()
- require.Equal(t, defaultConfig, Config)
- })
-
- t.Run("override auditPurgeDuration", func(t *testing.T) {
- oldAuditPurgeDuration := auditPurgeDuration
- auditPurgeDuration = 8 * time.Hour * 24
- auditPurgeDuration += time.Second + 4*time.Minute
- // Restore the changes we make
- defer func() {
- Config = newConfiguration()
- auditPurgeDuration = oldAuditPurgeDuration
- }()
-
- testConfig := newConfiguration()
- // auditPurgeDuration is supposed to be in multiples of days.
- // If it is not, then we round down to the nearest number of days.
- testConfig.AuditPurgeDays = 8
- UpdateConfigValuesFromFlags()
- require.Equal(t, testConfig, Config)
- })
-
- t.Run("override sqliteDataFile", func(t *testing.T) {
- oldSqliteDataFile := sqliteDataFile
- sqliteDataFile = "newVal"
- // Restore the changes we make
- defer func() {
- Config = newConfiguration()
- sqliteDataFile = oldSqliteDataFile
- }()
-
- testConfig := newConfiguration()
- testConfig.SQLite3DataFile = "newVal"
- UpdateConfigValuesFromFlags()
- require.Equal(t, testConfig, Config)
- })
-
- t.Run("override instancePollTime", func(t *testing.T) {
- oldInstancePollTime := instancePollTime
- instancePollTime = 7 * time.Second
- // Restore the changes we make
- defer func() {
- Config = newConfiguration()
- instancePollTime = oldInstancePollTime
- }()
-
- testConfig := newConfiguration()
- testConfig.InstancePollSeconds = 7
- UpdateConfigValuesFromFlags()
- require.Equal(t, testConfig, Config)
- })
-
- t.Run("override snapshotTopologyInterval", func(t *testing.T) {
- oldSnapshotTopologyInterval := snapshotTopologyInterval
- snapshotTopologyInterval = 1 * time.Hour
- // Restore the changes we make
- defer func() {
- Config = newConfiguration()
- snapshotTopologyInterval = oldSnapshotTopologyInterval
- }()
-
- testConfig := newConfiguration()
- testConfig.SnapshotTopologiesIntervalHours = 1
- UpdateConfigValuesFromFlags()
- require.Equal(t, testConfig, Config)
- })
-
- t.Run("override reasonableReplicationLag", func(t *testing.T) {
- oldReasonableReplicationLag := reasonableReplicationLag
- reasonableReplicationLag = 15 * time.Second
- // Restore the changes we make
- defer func() {
- Config = newConfiguration()
- reasonableReplicationLag = oldReasonableReplicationLag
- }()
-
- testConfig := newConfiguration()
- testConfig.ReasonableReplicationLagSeconds = 15
- UpdateConfigValuesFromFlags()
- require.Equal(t, testConfig, Config)
- })
-
- t.Run("override auditFileLocation", func(t *testing.T) {
- oldAuditFileLocation := auditFileLocation
- auditFileLocation = "newFile"
- // Restore the changes we make
- defer func() {
- Config = newConfiguration()
- auditFileLocation = oldAuditFileLocation
- }()
-
- testConfig := newConfiguration()
- testConfig.AuditLogFile = "newFile"
- UpdateConfigValuesFromFlags()
- require.Equal(t, testConfig, Config)
- })
-
- t.Run("override auditToBackend", func(t *testing.T) {
- oldAuditToBackend := auditToBackend
- auditToBackend = true
- // Restore the changes we make
- defer func() {
- Config = newConfiguration()
- auditToBackend = oldAuditToBackend
- }()
-
- testConfig := newConfiguration()
- testConfig.AuditToBackendDB = true
- UpdateConfigValuesFromFlags()
- require.Equal(t, testConfig, Config)
- })
-
- t.Run("override auditToSyslog", func(t *testing.T) {
- oldAuditToSyslog := auditToSyslog
- auditToSyslog = true
- // Restore the changes we make
- defer func() {
- Config = newConfiguration()
- auditToSyslog = oldAuditToSyslog
- }()
-
- testConfig := newConfiguration()
- testConfig.AuditToSyslog = true
- UpdateConfigValuesFromFlags()
- require.Equal(t, testConfig, Config)
- })
-
- t.Run("override recoveryPeriodBlockDuration", func(t *testing.T) {
- oldRecoveryPeriodBlockDuration := recoveryPeriodBlockDuration
- recoveryPeriodBlockDuration = 5 * time.Minute
- // Restore the changes we make
- defer func() {
- Config = newConfiguration()
- recoveryPeriodBlockDuration = oldRecoveryPeriodBlockDuration
- }()
-
- testConfig := newConfiguration()
- testConfig.RecoveryPeriodBlockSeconds = 300
- UpdateConfigValuesFromFlags()
- require.Equal(t, testConfig, Config)
- })
-
- t.Run("override preventCrossCellFailover", func(t *testing.T) {
- oldPreventCrossCellFailover := preventCrossCellFailover
- preventCrossCellFailover = true
- // Restore the changes we make
- defer func() {
- Config = newConfiguration()
- preventCrossCellFailover = oldPreventCrossCellFailover
- }()
-
- testConfig := newConfiguration()
- testConfig.PreventCrossDataCenterPrimaryFailover = true
- UpdateConfigValuesFromFlags()
- require.Equal(t, testConfig, Config)
- })
-
- t.Run("override waitReplicasTimeout", func(t *testing.T) {
- oldWaitReplicasTimeout := waitReplicasTimeout
- waitReplicasTimeout = 3*time.Minute + 4*time.Second
- // Restore the changes we make
- defer func() {
- Config = newConfiguration()
- waitReplicasTimeout = oldWaitReplicasTimeout
- }()
-
- testConfig := newConfiguration()
- testConfig.WaitReplicasTimeoutSeconds = 184
- UpdateConfigValuesFromFlags()
- require.Equal(t, testConfig, Config)
- })
-
- t.Run("override topoInformationRefreshDuration", func(t *testing.T) {
- oldTopoInformationRefreshDuration := topoInformationRefreshDuration
- topoInformationRefreshDuration = 1 * time.Second
- // Restore the changes we make
- defer func() {
- Config = newConfiguration()
- topoInformationRefreshDuration = oldTopoInformationRefreshDuration
- }()
-
- testConfig := newConfiguration()
- testConfig.TopoInformationRefreshSeconds = 1
- UpdateConfigValuesFromFlags()
- require.Equal(t, testConfig, Config)
- })
-
- t.Run("override recoveryPollDuration", func(t *testing.T) {
- oldRecoveryPollDuration := recoveryPollDuration
- recoveryPollDuration = 15 * time.Second
- // Restore the changes we make
- defer func() {
- Config = newConfiguration()
- recoveryPollDuration = oldRecoveryPollDuration
- }()
-
- testConfig := newConfiguration()
- testConfig.RecoveryPollSeconds = 15
- UpdateConfigValuesFromFlags()
- require.Equal(t, testConfig, Config)
- })
-}
diff --git a/go/vt/vtorc/db/db.go b/go/vt/vtorc/db/db.go
index 64143477645..870a3d15949 100644
--- a/go/vt/vtorc/db/db.go
+++ b/go/vt/vtorc/db/db.go
@@ -44,10 +44,12 @@ func (m *vtorcDB) QueryVTOrc(query string, argsArray []any, onRow func(sqlutils.
// OpenTopology returns the DB instance for the vtorc backed database
func OpenVTOrc() (db *sql.DB, err error) {
var fromCache bool
- db, fromCache, err = sqlutils.GetSQLiteDB(config.Config.SQLite3DataFile)
+ db, fromCache, err = sqlutils.GetSQLiteDB(config.GetSQLiteDataFile())
if err == nil && !fromCache {
- log.Infof("Connected to vtorc backend: sqlite on %v", config.Config.SQLite3DataFile)
- _ = initVTOrcDB(db)
+ log.Infof("Connected to vtorc backend: sqlite on %v", config.GetSQLiteDataFile())
+ if err := initVTOrcDB(db); err != nil {
+ log.Fatalf("Cannot initiate vtorc: %+v", err)
+ }
}
if db != nil {
db.SetMaxOpenConns(1)
@@ -58,13 +60,13 @@ func OpenVTOrc() (db *sql.DB, err error) {
// registerVTOrcDeployment updates the vtorc_db_deployments table upon successful deployment
func registerVTOrcDeployment(db *sql.DB) error {
- query := `
- replace into vtorc_db_deployments (
- deployed_version, deployed_timestamp
- ) values (
- ?, datetime('now')
- )
- `
+ query := `REPLACE INTO vtorc_db_deployments (
+ deployed_version,
+ deployed_timestamp
+ ) VALUES (
+ ?,
+ DATETIME('now')
+ )`
if _, err := execInternal(db, query, ""); err != nil {
log.Fatalf("Unable to write to vtorc_db_deployments: %+v", err)
}
@@ -76,27 +78,24 @@ func registerVTOrcDeployment(db *sql.DB) error {
func deployStatements(db *sql.DB, queries []string) error {
tx, err := db.Begin()
if err != nil {
- log.Fatal(err.Error())
return err
}
for _, query := range queries {
if _, err := tx.Exec(query); err != nil {
- log.Fatalf("Cannot initiate vtorc: %+v; query=%+v", err, query)
return err
}
}
- if err := tx.Commit(); err != nil {
- log.Fatal(err.Error())
- }
- return nil
+ return tx.Commit()
}
// ClearVTOrcDatabase is used to clear the VTOrc database. This function is meant to be used by tests to clear the
// database to get a clean slate without starting a new one.
func ClearVTOrcDatabase() {
- db, _, _ := sqlutils.GetSQLiteDB(config.Config.SQLite3DataFile)
+ db, _, _ := sqlutils.GetSQLiteDB(config.GetSQLiteDataFile())
if db != nil {
- _ = initVTOrcDB(db)
+ if err := initVTOrcDB(db); err != nil {
+ log.Fatalf("Cannot re-initiate vtorc: %+v", err)
+ }
}
}
@@ -105,20 +104,24 @@ func ClearVTOrcDatabase() {
func initVTOrcDB(db *sql.DB) error {
log.Info("Initializing vtorc")
log.Info("Migrating database schema")
- _ = deployStatements(db, vtorcBackend)
- _ = registerVTOrcDeployment(db)
-
- _, _ = ExecVTOrc(`PRAGMA journal_mode = WAL`)
- _, _ = ExecVTOrc(`PRAGMA synchronous = NORMAL`)
-
+ if err := deployStatements(db, vtorcBackend); err != nil {
+ return err
+ }
+ if err := registerVTOrcDeployment(db); err != nil {
+ return err
+ }
+ if _, err := ExecVTOrc(`PRAGMA journal_mode = WAL`); err != nil {
+ return err
+ }
+ if _, err := ExecVTOrc(`PRAGMA synchronous = NORMAL`); err != nil {
+ return err
+ }
return nil
}
// execInternal
func execInternal(db *sql.DB, query string, args ...any) (sql.Result, error) {
- var err error
- res, err := sqlutils.ExecNoPrepare(db, query, args...)
- return res, err
+ return sqlutils.ExecNoPrepare(db, query, args...)
}
// ExecVTOrc will execute given query on the vtorc backend database.
diff --git a/go/vt/vtorc/discovery/queue.go b/go/vt/vtorc/discovery/queue.go
index 95751c6ae25..4b18303959b 100644
--- a/go/vt/vtorc/discovery/queue.go
+++ b/go/vt/vtorc/discovery/queue.go
@@ -153,7 +153,7 @@ func (q *Queue) Consume() string {
// alarm if have been waiting for too long
timeOnQueue := time.Since(q.queuedKeys[key])
- if timeOnQueue > time.Duration(config.Config.InstancePollSeconds)*time.Second {
+ if timeOnQueue > config.GetInstancePollTime() {
log.Warningf("key %v spent %.4fs waiting on a discoveryQueue", key, timeOnQueue.Seconds())
}
diff --git a/go/vt/vtorc/inst/analysis.go b/go/vt/vtorc/inst/analysis.go
index 66d6c6dd9ce..3e9e81c5c9f 100644
--- a/go/vt/vtorc/inst/analysis.go
+++ b/go/vt/vtorc/inst/analysis.go
@@ -144,5 +144,5 @@ func (replicationAnalysis *ReplicationAnalysis) MarshalJSON() ([]byte, error) {
// ValidSecondsFromSeenToLastAttemptedCheck returns the maximum allowed elapsed time
// between last_attempted_check to last_checked before we consider the instance as invalid.
func ValidSecondsFromSeenToLastAttemptedCheck() uint {
- return config.Config.InstancePollSeconds + 1
+ return config.GetInstancePollSeconds()
}
diff --git a/go/vt/vtorc/inst/analysis_dao.go b/go/vt/vtorc/inst/analysis_dao.go
index 25d93a6864b..07830bf7dda 100644
--- a/go/vt/vtorc/inst/analysis_dao.go
+++ b/go/vt/vtorc/inst/analysis_dao.go
@@ -47,7 +47,7 @@ func init() {
func initializeAnalysisDaoPostConfiguration() {
config.WaitForConfigurationToBeLoaded()
- recentInstantAnalysis = cache.New(time.Duration(config.Config.RecoveryPollSeconds*2)*time.Second, time.Second)
+ recentInstantAnalysis = cache.New(config.GetRecoveryPollDuration()*2, time.Second)
}
type clusterAnalysis struct {
@@ -68,9 +68,8 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna
}
// TODO(sougou); deprecate ReduceReplicationAnalysisCount
- args := sqlutils.Args(config.Config.ReasonableReplicationLagSeconds, ValidSecondsFromSeenToLastAttemptedCheck(), config.Config.ReasonableReplicationLagSeconds, keyspace, shard)
- query := `
- SELECT
+ args := sqlutils.Args(config.GetReasonableReplicationLagSeconds(), ValidSecondsFromSeenToLastAttemptedCheck(), config.GetReasonableReplicationLagSeconds(), keyspace, shard)
+ query := `SELECT
vitess_tablet.info AS tablet_info,
vitess_tablet.tablet_type,
vitess_tablet.primary_timestamp,
@@ -91,13 +90,13 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna
IFNULL(
primary_instance.binary_log_file = database_instance_stale_binlog_coordinates.binary_log_file
AND primary_instance.binary_log_pos = database_instance_stale_binlog_coordinates.binary_log_pos
- AND database_instance_stale_binlog_coordinates.first_seen < datetime('now', printf('-%d second', ?)),
+ AND database_instance_stale_binlog_coordinates.first_seen < DATETIME('now', PRINTF('-%d SECOND', ?)),
0
)
) AS is_stale_binlog_coordinates,
MIN(
primary_instance.last_checked <= primary_instance.last_seen
- and primary_instance.last_attempted_check <= datetime(primary_instance.last_seen, printf('+%d second', ?))
+ and primary_instance.last_attempted_check <= DATETIME(primary_instance.last_seen, PRINTF('+%d SECOND', ?))
) = 1 AS is_last_check_valid,
/* To be considered a primary, traditional async replication must not be present/valid AND the host should either */
/* not be a replication group member OR be the primary of the replication group */
@@ -655,13 +654,13 @@ func auditInstanceAnalysisInChangelog(tabletAlias string, analysisCode AnalysisC
// Find if the lastAnalysisHasChanged or not while updating the row if it has.
lastAnalysisChanged := false
{
- sqlResult, err := db.ExecVTOrc(`
- update database_instance_last_analysis set
+ sqlResult, err := db.ExecVTOrc(`UPDATE database_instance_last_analysis
+ SET
analysis = ?,
- analysis_timestamp = datetime('now')
- where
+ analysis_timestamp = DATETIME('now')
+ WHERE
alias = ?
- and analysis != ?
+ AND analysis != ?
`,
string(analysisCode), tabletAlias, string(analysisCode),
)
@@ -682,13 +681,16 @@ func auditInstanceAnalysisInChangelog(tabletAlias string, analysisCode AnalysisC
firstInsertion := false
if !lastAnalysisChanged {
// The insert only returns more than 1 row changed if this is the first insertion.
- sqlResult, err := db.ExecVTOrc(`
- insert or ignore into database_instance_last_analysis (
- alias, analysis_timestamp, analysis
- ) values (
- ?, datetime('now'), ?
- )
- `,
+ sqlResult, err := db.ExecVTOrc(`INSERT OR IGNORE
+ INTO database_instance_last_analysis (
+ alias,
+ analysis_timestamp,
+ analysis
+ ) VALUES (
+ ?,
+ DATETIME('now'),
+ ?
+ )`,
tabletAlias, string(analysisCode),
)
if err != nil {
@@ -708,13 +710,16 @@ func auditInstanceAnalysisInChangelog(tabletAlias string, analysisCode AnalysisC
return nil
}
- _, err := db.ExecVTOrc(`
- insert into database_instance_analysis_changelog (
- alias, analysis_timestamp, analysis
- ) values (
- ?, datetime('now'), ?
- )
- `,
+ _, err := db.ExecVTOrc(`INSERT
+ INTO database_instance_analysis_changelog (
+ alias,
+ analysis_timestamp,
+ analysis
+ ) VALUES (
+ ?,
+ DATETIME('now'),
+ ?
+ )`,
tabletAlias, string(analysisCode),
)
if err == nil {
@@ -727,12 +732,11 @@ func auditInstanceAnalysisInChangelog(tabletAlias string, analysisCode AnalysisC
// ExpireInstanceAnalysisChangelog removes old-enough analysis entries from the changelog
func ExpireInstanceAnalysisChangelog() error {
- _, err := db.ExecVTOrc(`
- delete
- from database_instance_analysis_changelog
- where
- analysis_timestamp < datetime('now', printf('-%d hour', ?))
- `,
+ _, err := db.ExecVTOrc(`DELETE
+ FROM database_instance_analysis_changelog
+ WHERE
+ analysis_timestamp < DATETIME('now', PRINTF('-%d HOUR', ?))
+ `,
config.UnseenInstanceForgetHours,
)
if err != nil {
diff --git a/go/vt/vtorc/inst/audit_dao.go b/go/vt/vtorc/inst/audit_dao.go
index 642fb187509..7ae60fba927 100644
--- a/go/vt/vtorc/inst/audit_dao.go
+++ b/go/vt/vtorc/inst/audit_dao.go
@@ -38,10 +38,10 @@ func AuditOperation(auditType string, tabletAlias string, message string) error
}
auditWrittenToFile := false
- if config.Config.AuditLogFile != "" {
+ if config.GetAuditFileLocation() != "" {
auditWrittenToFile = true
go func() {
- f, err := os.OpenFile(config.Config.AuditLogFile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0640)
+ f, err := os.OpenFile(config.GetAuditFileLocation(), os.O_RDWR|os.O_CREATE|os.O_APPEND, 0640)
if err != nil {
log.Error(err)
return
@@ -54,15 +54,23 @@ func AuditOperation(auditType string, tabletAlias string, message string) error
}
}()
}
- if config.Config.AuditToBackendDB {
- _, err := db.ExecVTOrc(`
- insert
- into audit (
- audit_timestamp, audit_type, alias, keyspace, shard, message
- ) VALUES (
- datetime('now'), ?, ?, ?, ?, ?
- )
- `,
+ if config.GetAuditToBackend() {
+ _, err := db.ExecVTOrc(`INSERT
+ INTO audit (
+ audit_timestamp,
+ audit_type,
+ alias,
+ keyspace,
+ shard,
+ message
+ ) VALUES (
+ DATETIME('now'),
+ ?,
+ ?,
+ ?,
+ ?,
+ ?
+ )`,
auditType,
tabletAlias,
keyspace,
diff --git a/go/vt/vtorc/inst/audit_dao_test.go b/go/vt/vtorc/inst/audit_dao_test.go
index 1d50de4c146..d22e9177dc3 100644
--- a/go/vt/vtorc/inst/audit_dao_test.go
+++ b/go/vt/vtorc/inst/audit_dao_test.go
@@ -35,13 +35,13 @@ import (
// This test also verifies that we are able to read the recent audits that are written to the databaes.
func TestAuditOperation(t *testing.T) {
// Restore original configurations
- originalAuditSysLog := config.Config.AuditToSyslog
- originalAuditLogFile := config.Config.AuditLogFile
- originalAuditBackend := config.Config.AuditToBackendDB
+ originalAuditSysLog := config.GetAuditToSyslog()
+ originalAuditLogFile := config.GetAuditFileLocation()
+ originalAuditBackend := config.GetAuditToBackend()
defer func() {
- config.Config.AuditToSyslog = originalAuditSysLog
- config.Config.AuditLogFile = originalAuditLogFile
- config.Config.AuditToBackendDB = originalAuditBackend
+ config.SetAuditToSyslog(originalAuditSysLog)
+ config.SetAuditFileLocation(originalAuditLogFile)
+ config.SetAuditToBackend(originalAuditBackend)
}()
orcDb, err := db.OpenVTOrc()
@@ -78,9 +78,9 @@ func TestAuditOperation(t *testing.T) {
message := "test-message"
t.Run("audit to backend", func(t *testing.T) {
- config.Config.AuditLogFile = ""
- config.Config.AuditToSyslog = false
- config.Config.AuditToBackendDB = true
+ config.SetAuditFileLocation("")
+ config.SetAuditToSyslog(false)
+ config.SetAuditToBackend(true)
// Auditing should succeed as expected
err = AuditOperation(auditType, tab100Alias, message)
@@ -106,13 +106,13 @@ func TestAuditOperation(t *testing.T) {
})
t.Run("audit to File", func(t *testing.T) {
- config.Config.AuditToBackendDB = false
- config.Config.AuditToSyslog = false
+ config.SetAuditToBackend(false)
+ config.SetAuditToSyslog(false)
file, err := os.CreateTemp("", "test-auditing-*")
require.NoError(t, err)
defer os.Remove(file.Name())
- config.Config.AuditLogFile = file.Name()
+ config.SetAuditFileLocation(file.Name())
err = AuditOperation(auditType, tab100Alias, message)
require.NoError(t, err)
diff --git a/go/vt/vtorc/inst/instance_dao.go b/go/vt/vtorc/inst/instance_dao.go
index bd4438dd05f..d1421dbc91d 100644
--- a/go/vt/vtorc/inst/instance_dao.go
+++ b/go/vt/vtorc/inst/instance_dao.go
@@ -80,7 +80,7 @@ func init() {
func initializeInstanceDao() {
config.WaitForConfigurationToBeLoaded()
- forgetAliases = cache.New(time.Duration(config.Config.InstancePollSeconds*3)*time.Second, time.Second)
+ forgetAliases = cache.New(config.GetInstancePollTime()*3, time.Second)
cacheInitializationCompleted.Store(true)
}
@@ -114,10 +114,15 @@ func ExecDBWriteFunc(f func() error) error {
func ExpireTableData(tableName string, timestampColumn string) error {
writeFunc := func() error {
- _, err := db.ExecVTOrc(
- fmt.Sprintf("delete from %s where %s < datetime('now', printf('-%%d DAY', ?))", tableName, timestampColumn),
- config.Config.AuditPurgeDays,
+ query := fmt.Sprintf(`DELETE
+ FROM %s
+ WHERE
+ %s < DATETIME('now', PRINTF('-%%d DAY', ?))
+ `,
+ tableName,
+ timestampColumn,
)
+ _, err := db.ExecVTOrc(query, config.GetAuditPurgeDays())
return err
}
return ExecDBWriteFunc(writeFunc)
@@ -357,35 +362,7 @@ Cleanup:
// Add replication group ancestry UUID as well. Otherwise, VTOrc thinks there are errant GTIDs in group
// members and its replicas, even though they are not.
instance.AncestryUUID = strings.Trim(instance.AncestryUUID, ",")
- if instance.ExecutedGtidSet != "" && instance.primaryExecutedGtidSet != "" {
- // Compare primary & replica GTID sets, but ignore the sets that present the primary's UUID.
- // This is because vtorc may pool primary and replica at an inconvenient timing,
- // such that the replica may _seems_ to have more entries than the primary, when in fact
- // it's just that the primary's probing is stale.
- redactedExecutedGtidSet, _ := NewOracleGtidSet(instance.ExecutedGtidSet)
- for _, uuid := range strings.Split(instance.AncestryUUID, ",") {
- if uuid != instance.ServerUUID {
- redactedExecutedGtidSet.RemoveUUID(uuid)
- }
- if instance.IsCoPrimary && uuid == instance.ServerUUID {
- // If this is a co-primary, then this server is likely to show its own generated GTIDs as errant,
- // because its co-primary has not applied them yet
- redactedExecutedGtidSet.RemoveUUID(uuid)
- }
- }
- // Avoid querying the database if there's no point:
- if !redactedExecutedGtidSet.IsEmpty() {
- redactedPrimaryExecutedGtidSet, _ := NewOracleGtidSet(instance.primaryExecutedGtidSet)
- redactedPrimaryExecutedGtidSet.RemoveUUID(instance.SourceUUID)
-
- instance.GtidErrant, err = replication.Subtract(redactedExecutedGtidSet.String(), redactedPrimaryExecutedGtidSet.String())
- if err == nil {
- var gtidCount int64
- gtidCount, err = replication.GTIDCount(instance.GtidErrant)
- currentErrantGTIDCount.Set(tabletAlias, gtidCount)
- }
- }
- }
+ err = detectErrantGTIDs(instance, tablet)
}
latency.Stop("instance")
@@ -412,6 +389,63 @@ Cleanup:
return nil, err
}
+// detectErrantGTIDs detects the errant GTIDs on an instance.
+func detectErrantGTIDs(instance *Instance, tablet *topodatapb.Tablet) (err error) {
+ // If the tablet is not replicating from anyone, then it could be the previous primary.
+ // We should check for errant GTIDs by finding the difference with the shard's current primary.
+ if instance.primaryExecutedGtidSet == "" && instance.SourceHost == "" {
+ var primaryInstance *Instance
+ primaryAlias, _, _ := ReadShardPrimaryInformation(tablet.Keyspace, tablet.Shard)
+ if primaryAlias != "" {
+ // Check if the current tablet is the primary.
+ // If it is, then we don't need to run errant gtid detection on it.
+ if primaryAlias == instance.InstanceAlias {
+ return nil
+ }
+ primaryInstance, _, _ = ReadInstance(primaryAlias)
+ }
+ // Only run errant GTID detection, if we are sure that the data read of the current primary
+ // is up-to-date enough to reflect that it has been promoted. This is needed to prevent
+ // flagging incorrect errant GTIDs. If we were to use old data, we could have some GTIDs
+ // accepted by the old primary (this tablet) that don't show in the new primary's set.
+ if primaryInstance != nil {
+ if primaryInstance.SourceHost == "" {
+ instance.primaryExecutedGtidSet = primaryInstance.ExecutedGtidSet
+ }
+ }
+ }
+ if instance.ExecutedGtidSet != "" && instance.primaryExecutedGtidSet != "" {
+ // Compare primary & replica GTID sets, but ignore the sets that present the primary's UUID.
+ // This is because vtorc may pool primary and replica at an inconvenient timing,
+ // such that the replica may _seems_ to have more entries than the primary, when in fact
+ // it's just that the primary's probing is stale.
+ redactedExecutedGtidSet, _ := NewOracleGtidSet(instance.ExecutedGtidSet)
+ for _, uuid := range strings.Split(instance.AncestryUUID, ",") {
+ if uuid != instance.ServerUUID {
+ redactedExecutedGtidSet.RemoveUUID(uuid)
+ }
+ if instance.IsCoPrimary && uuid == instance.ServerUUID {
+ // If this is a co-primary, then this server is likely to show its own generated GTIDs as errant,
+ // because its co-primary has not applied them yet
+ redactedExecutedGtidSet.RemoveUUID(uuid)
+ }
+ }
+ // Avoid querying the database if there's no point:
+ if !redactedExecutedGtidSet.IsEmpty() {
+ redactedPrimaryExecutedGtidSet, _ := NewOracleGtidSet(instance.primaryExecutedGtidSet)
+ redactedPrimaryExecutedGtidSet.RemoveUUID(instance.SourceUUID)
+
+ instance.GtidErrant, err = replication.Subtract(redactedExecutedGtidSet.String(), redactedPrimaryExecutedGtidSet.String())
+ if err == nil {
+ var gtidCount int64
+ gtidCount, err = replication.GTIDCount(instance.GtidErrant)
+ currentErrantGTIDCount.Set(instance.InstanceAlias, gtidCount)
+ }
+ }
+ }
+ return err
+}
+
// getKeyspaceShardName returns a single string having both the keyspace and shard
func getKeyspaceShardName(keyspace, shard string) string {
return fmt.Sprintf("%v:%v", keyspace, shard)
@@ -439,16 +473,16 @@ func ReadInstanceClusterAttributes(instance *Instance) (err error) {
var primaryExecutedGtidSet string
primaryDataFound := false
- query := `
- select
- replication_depth,
- source_host,
- source_port,
- ancestry_uuid,
- executed_gtid_set
- from database_instance
- where hostname=? and port=?
- `
+ query := `SELECT
+ replication_depth,
+ source_host,
+ source_port,
+ ancestry_uuid,
+ executed_gtid_set
+ FROM database_instance
+ WHERE
+ hostname = ?
+ AND port = ?`
primaryHostname := instance.SourceHost
primaryPort := instance.SourcePort
args := sqlutils.Args(primaryHostname, primaryPort)
@@ -544,8 +578,8 @@ func readInstanceRow(m sqlutils.RowMap) *Instance {
instance.ReplicationDepth = m.GetUint("replication_depth")
instance.IsCoPrimary = m.GetBool("is_co_primary")
instance.HasReplicationCredentials = m.GetBool("has_replication_credentials")
- instance.IsUpToDate = (m.GetUint("seconds_since_last_checked") <= config.Config.InstancePollSeconds)
- instance.IsRecentlyChecked = (m.GetUint("seconds_since_last_checked") <= config.Config.InstancePollSeconds*5)
+ instance.IsUpToDate = m.GetUint("seconds_since_last_checked") <= config.GetInstancePollSeconds()
+ instance.IsRecentlyChecked = m.GetUint("seconds_since_last_checked") <= config.GetInstancePollSeconds()*5
instance.LastSeenTimestamp = m.GetString("last_seen")
instance.IsLastCheckValid = m.GetBool("is_last_check_valid")
instance.SecondsSinceLastSeen = m.GetNullInt64("seconds_since_last_seen")
@@ -562,7 +596,7 @@ func readInstanceRow(m sqlutils.RowMap) *Instance {
instance.Problems = append(instance.Problems, "not_recently_checked")
} else if instance.ReplicationThreadsExist() && !instance.ReplicaRunning() {
instance.Problems = append(instance.Problems, "not_replicating")
- } else if instance.ReplicationLagSeconds.Valid && util.AbsInt64(instance.ReplicationLagSeconds.Int64-int64(instance.SQLDelay)) > int64(config.Config.ReasonableReplicationLagSeconds) {
+ } else if instance.ReplicationLagSeconds.Valid && util.AbsInt64(instance.ReplicationLagSeconds.Int64-int64(instance.SQLDelay)) > int64(config.GetReasonableReplicationLagSeconds()) {
instance.Problems = append(instance.Problems, "replication_lag")
}
if instance.GtidErrant != "" {
@@ -580,20 +614,22 @@ func readInstancesByCondition(condition string, args []any, sort string) ([](*In
if sort == "" {
sort = `alias`
}
- query := fmt.Sprintf(`
- select
- *,
- strftime('%%s', 'now') - strftime('%%s', last_checked) as seconds_since_last_checked,
- ifnull(last_checked <= last_seen, 0) as is_last_check_valid,
- strftime('%%s', 'now') - strftime('%%s', last_seen) as seconds_since_last_seen
- from
- vitess_tablet
- left join database_instance using (alias, hostname, port)
- where
- %s
- order by
- %s
- `, condition, sort)
+ query := fmt.Sprintf(`SELECT
+ *,
+ STRFTIME('%%s', 'now') - STRFTIME('%%s', last_checked) AS seconds_since_last_checked,
+ IFNULL(last_checked <= last_seen, 0) AS is_last_check_valid,
+ STRFTIME('%%s', 'now') - STRFTIME('%%s', last_seen) AS seconds_since_last_seen
+ FROM
+ vitess_tablet
+ LEFT JOIN database_instance USING (alias, hostname, port)
+ WHERE
+ %s
+ ORDER BY
+ %s
+ `,
+ condition,
+ sort,
+ )
err := db.QueryVTOrc(query, args, func(m sqlutils.RowMap) error {
instance := readInstanceRow(m)
@@ -614,9 +650,7 @@ func readInstancesByCondition(condition string, args []any, sort string) ([](*In
// ReadInstance reads an instance from the vtorc backend database
func ReadInstance(tabletAlias string) (*Instance, bool, error) {
- condition := `
- alias = ?
- `
+ condition := `alias = ?`
instances, err := readInstancesByCondition(condition, sqlutils.Args(tabletAlias), "")
// We know there will be at most one (alias is the PK).
// And we expect to find one.
@@ -633,30 +667,28 @@ func ReadInstance(tabletAlias string) (*Instance, bool, error) {
// ReadProblemInstances reads all instances with problems
func ReadProblemInstances(keyspace string, shard string) ([](*Instance), error) {
condition := `
- keyspace LIKE (CASE WHEN ? = '' THEN '%' ELSE ? END)
- and shard LIKE (CASE WHEN ? = '' THEN '%' ELSE ? END)
- and (
- (last_seen < last_checked)
- or (strftime('%%s', 'now') - strftime('%%s', last_checked) > ?)
- or (replication_sql_thread_state not in (-1 ,1))
- or (replication_io_thread_state not in (-1 ,1))
- or (abs(cast(replication_lag_seconds as integer) - cast(sql_delay as integer)) > ?)
- or (abs(cast(replica_lag_seconds as integer) - cast(sql_delay as integer)) > ?)
- or (gtid_errant != '')
- )
- `
-
- args := sqlutils.Args(keyspace, keyspace, shard, shard, config.Config.InstancePollSeconds*5, config.Config.ReasonableReplicationLagSeconds, config.Config.ReasonableReplicationLagSeconds)
+ keyspace LIKE (CASE WHEN ? = '' THEN '%' ELSE ? END)
+ AND shard LIKE (CASE WHEN ? = '' THEN '%' ELSE ? END)
+ AND (
+ (last_seen < last_checked)
+ OR (STRFTIME('%%s', 'now') - STRFTIME('%%s', last_checked) > ?)
+ OR (replication_sql_thread_state NOT IN (-1 ,1))
+ OR (replication_io_thread_state NOT IN (-1 ,1))
+ OR (ABS(CAST(replication_lag_seconds AS integer) - CAST(sql_delay AS integer)) > ?)
+ OR (ABS(CAST(replica_lag_seconds AS integer) - CAST(sql_delay AS integer)) > ?)
+ OR (gtid_errant != '')
+ )`
+
+ args := sqlutils.Args(keyspace, keyspace, shard, shard, config.GetInstancePollSeconds()*5, config.GetReasonableReplicationLagSeconds(), config.GetReasonableReplicationLagSeconds())
return readInstancesByCondition(condition, args, "")
}
// ReadInstancesWithErrantGTIds reads all instances with errant GTIDs
func ReadInstancesWithErrantGTIds(keyspace string, shard string) ([]*Instance, error) {
condition := `
- keyspace LIKE (CASE WHEN ? = '' THEN '%' ELSE ? END)
- and shard LIKE (CASE WHEN ? = '' THEN '%' ELSE ? END)
- and gtid_errant != ''
- `
+ keyspace LIKE (CASE WHEN ? = '' THEN '%' ELSE ? END)
+ AND shard LIKE (CASE WHEN ? = '' THEN '%' ELSE ? END)
+ AND gtid_errant != ''`
args := sqlutils.Args(keyspace, keyspace, shard, shard)
return readInstancesByCondition(condition, args, "")
@@ -664,15 +696,14 @@ func ReadInstancesWithErrantGTIds(keyspace string, shard string) ([]*Instance, e
// GetKeyspaceShardName gets the keyspace shard name for the given instance key
func GetKeyspaceShardName(tabletAlias string) (keyspace string, shard string, err error) {
- query := `
- select
- keyspace,
- shard
- from
- vitess_tablet
- where
- alias = ?
- `
+ query := `SELECT
+ keyspace,
+ shard
+ FROM
+ vitess_tablet
+ WHERE
+ alias = ?
+ `
err = db.QueryVTOrc(query, sqlutils.Args(tabletAlias), func(m sqlutils.RowMap) error {
keyspace = m.GetString("keyspace")
shard = m.GetString("shard")
@@ -695,28 +726,27 @@ func GetKeyspaceShardName(tabletAlias string) (keyspace string, shard string, er
// the instance.
func ReadOutdatedInstanceKeys() ([]string, error) {
var res []string
- query := `
- SELECT
- alias
- FROM
- database_instance
- WHERE
- CASE
- WHEN last_attempted_check <= last_checked
- THEN last_checked < datetime('now', printf('-%d second', ?))
- ELSE last_checked < datetime('now', printf('-%d second', ?))
- END
- UNION
- SELECT
- vitess_tablet.alias
- FROM
- vitess_tablet LEFT JOIN database_instance ON (
- vitess_tablet.alias = database_instance.alias
- )
- WHERE
- database_instance.alias IS NULL
- `
- args := sqlutils.Args(config.Config.InstancePollSeconds, 2*config.Config.InstancePollSeconds)
+ query := `SELECT
+ alias
+ FROM
+ database_instance
+ WHERE
+ CASE
+ WHEN last_attempted_check <= last_checked
+ THEN last_checked < DATETIME('now', PRINTF('-%d SECOND', ?))
+ ELSE last_checked < DATETIME('now', PRINTF('-%d SECOND', ?))
+ END
+ UNION
+ SELECT
+ vitess_tablet.alias
+ FROM
+ vitess_tablet LEFT JOIN database_instance ON (
+ vitess_tablet.alias = database_instance.alias
+ )
+ WHERE
+ database_instance.alias IS NULL
+ `
+ args := sqlutils.Args(config.GetInstancePollSeconds(), 2*config.GetInstancePollSeconds())
err := db.QueryVTOrc(query, args, func(m sqlutils.RowMap) error {
tabletAlias := m.GetString("alias")
@@ -758,12 +788,17 @@ func mkInsert(table string, columns []string, values []string, nrRows int, inser
}
col := strings.Join(columns, ", ")
- q.WriteString(fmt.Sprintf(`%s %s
- (%s)
- VALUES
- %s
- `,
- insertStr, table, col, val.String()))
+ query := fmt.Sprintf(`%s %s
+ (%s)
+ VALUES
+ %s
+ `,
+ insertStr,
+ table,
+ col,
+ val.String(),
+ )
+ q.WriteString(query)
return q.String(), nil
}
@@ -849,13 +884,13 @@ func mkInsertForInstances(instances []*Instance, instanceWasActuallyFound bool,
for i := range columns {
values[i] = "?"
}
- values[3] = "datetime('now')" // last_checked
- values[4] = "datetime('now')" // last_attempted_check
+ values[3] = "DATETIME('now')" // last_checked
+ values[4] = "DATETIME('now')" // last_attempted_check
values[5] = "1" // last_check_partial_success
if updateLastSeen {
columns = append(columns, "last_seen")
- values = append(values, "datetime('now')")
+ values = append(values, "DATETIME('now')")
}
var args []any
@@ -971,14 +1006,13 @@ func WriteInstance(instance *Instance, instanceWasActuallyFound bool, lastError
// for a given instance
func UpdateInstanceLastChecked(tabletAlias string, partialSuccess bool) error {
writeFunc := func() error {
- _, err := db.ExecVTOrc(`
- update
- database_instance
- set
- last_checked = datetime('now'),
- last_check_partial_success = ?
- where
- alias = ?`,
+ _, err := db.ExecVTOrc(`UPDATE database_instance
+ SET
+ last_checked = DATETIME('now'),
+ last_check_partial_success = ?
+ WHERE
+ alias = ?
+ `,
partialSuccess,
tabletAlias,
)
@@ -1000,13 +1034,12 @@ func UpdateInstanceLastChecked(tabletAlias string, partialSuccess bool) error {
// we have a "hanging" issue.
func UpdateInstanceLastAttemptedCheck(tabletAlias string) error {
writeFunc := func() error {
- _, err := db.ExecVTOrc(`
- update
- database_instance
- set
- last_attempted_check = datetime('now')
- where
- alias = ?`,
+ _, err := db.ExecVTOrc(`UPDATE database_instance
+ SET
+ last_attempted_check = DATETIME('now')
+ WHERE
+ alias = ?
+ `,
tabletAlias,
)
if err != nil {
@@ -1037,11 +1070,11 @@ func ForgetInstance(tabletAlias string) error {
currentErrantGTIDCount.Reset(tabletAlias)
// Delete from the 'vitess_tablet' table.
- _, err := db.ExecVTOrc(`
- delete
- from vitess_tablet
- where
- alias = ?`,
+ _, err := db.ExecVTOrc(`DELETE
+ FROM vitess_tablet
+ WHERE
+ alias = ?
+ `,
tabletAlias,
)
if err != nil {
@@ -1050,11 +1083,11 @@ func ForgetInstance(tabletAlias string) error {
}
// Also delete from the 'database_instance' table.
- sqlResult, err := db.ExecVTOrc(`
- delete
- from database_instance
- where
- alias = ?`,
+ sqlResult, err := db.ExecVTOrc(`DELETE
+ FROM database_instance
+ WHERE
+ alias = ?
+ `,
tabletAlias,
)
if err != nil {
@@ -1078,11 +1111,11 @@ func ForgetInstance(tabletAlias string) error {
// ForgetLongUnseenInstances will remove entries of all instances that have long since been last seen.
func ForgetLongUnseenInstances() error {
- sqlResult, err := db.ExecVTOrc(`
- delete
- from database_instance
- where
- last_seen < datetime('now', printf('-%d hour', ?))`,
+ sqlResult, err := db.ExecVTOrc(`DELETE
+ FROM database_instance
+ WHERE
+ last_seen < DATETIME('now', PRINTF('-%d HOUR', ?))
+ `,
config.UnseenInstanceForgetHours,
)
if err != nil {
@@ -1103,18 +1136,26 @@ func ForgetLongUnseenInstances() error {
// SnapshotTopologies records topology graph for all existing topologies
func SnapshotTopologies() error {
writeFunc := func() error {
- _, err := db.ExecVTOrc(`
- insert or ignore into
- database_instance_topology_history (snapshot_unix_timestamp,
- alias, hostname, port, source_host, source_port, keyspace, shard, version)
- select
- strftime('%s', 'now'),
- vitess_tablet.alias, vitess_tablet.hostname, vitess_tablet.port,
- database_instance.source_host, database_instance.source_port,
+ _, err := db.ExecVTOrc(`INSERT OR IGNORE
+ INTO database_instance_topology_history (
+ snapshot_unix_timestamp,
+ alias,
+ hostname,
+ port,
+ source_host,
+ source_port,
+ keyspace,
+ shard,
+ version
+ )
+ SELECT
+ STRFTIME('%s', 'now'),
+ vitess_tablet.alias, vitess_tablet.hostname, vitess_tablet.port,
+ database_instance.source_host, database_instance.source_port,
vitess_tablet.keyspace, vitess_tablet.shard, database_instance.version
- from
- vitess_tablet left join database_instance using (alias, hostname, port)
- `,
+ FROM
+ vitess_tablet LEFT JOIN database_instance USING (alias, hostname, port)
+ `,
)
if err != nil {
log.Error(err)
@@ -1127,15 +1168,17 @@ func SnapshotTopologies() error {
}
func ExpireStaleInstanceBinlogCoordinates() error {
- expireSeconds := config.Config.ReasonableReplicationLagSeconds * 2
+ expireSeconds := config.GetReasonableReplicationLagSeconds() * 2
if expireSeconds < config.StaleInstanceCoordinatesExpireSeconds {
expireSeconds = config.StaleInstanceCoordinatesExpireSeconds
}
writeFunc := func() error {
- _, err := db.ExecVTOrc(`
- delete from database_instance_stale_binlog_coordinates
- where first_seen < datetime('now', printf('-%d second', ?))
- `, expireSeconds,
+ _, err := db.ExecVTOrc(`DELETE
+ FROM database_instance_stale_binlog_coordinates
+ WHERE
+ first_seen < DATETIME('now', PRINTF('-%d SECOND', ?))
+ `,
+ expireSeconds,
)
if err != nil {
log.Error(err)
@@ -1157,7 +1200,7 @@ func GetDatabaseState() (string, error) {
ts := tableState{
TableName: tableName,
}
- err := db.QueryVTOrc("select * from "+tableName, nil, func(rowMap sqlutils.RowMap) error {
+ err := db.QueryVTOrc("SELECT * FROM "+tableName, nil, func(rowMap sqlutils.RowMap) error {
ts.Rows = append(ts.Rows, rowMap)
return nil
})
diff --git a/go/vt/vtorc/inst/instance_dao_test.go b/go/vt/vtorc/inst/instance_dao_test.go
index 2416c1abb90..cc3217442ed 100644
--- a/go/vt/vtorc/inst/instance_dao_test.go
+++ b/go/vt/vtorc/inst/instance_dao_test.go
@@ -14,6 +14,7 @@ import (
"vitess.io/vitess/go/vt/external/golib/sqlutils"
"vitess.io/vitess/go/vt/log"
topodatapb "vitess.io/vitess/go/vt/proto/topodata"
+ "vitess.io/vitess/go/vt/topo"
"vitess.io/vitess/go/vt/topo/topoproto"
"vitess.io/vitess/go/vt/vtorc/config"
"vitess.io/vitess/go/vt/vtorc/db"
@@ -65,7 +66,7 @@ func TestMkInsertSingle(t *testing.T) {
replica_sql_running, replica_io_running, replication_sql_thread_state, replication_io_thread_state, has_replication_filters, supports_oracle_gtid, oracle_gtid, source_uuid, ancestry_uuid, executed_gtid_set, gtid_mode, gtid_purged, gtid_errant, mariadb_gtid, pseudo_gtid,
source_log_file, read_source_log_pos, relay_source_log_file, exec_source_log_pos, relay_log_file, relay_log_pos, last_sql_error, last_io_error, replication_lag_seconds, replica_lag_seconds, sql_delay, data_center, region, physical_environment, replication_depth, is_co_primary, has_replication_credentials, allow_tls, semi_sync_enforced, semi_sync_primary_enabled, semi_sync_primary_timeout, semi_sync_primary_wait_for_replica_count, semi_sync_replica_enabled, semi_sync_primary_status, semi_sync_primary_clients, semi_sync_replica_status, last_discovery_latency, last_seen)
VALUES
- (?, ?, ?, datetime('now'), datetime('now'), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, datetime('now'))
+ (?, ?, ?, DATETIME('now'), DATETIME('now'), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, DATETIME('now'))
`
a1 := `zone1-i710, i710, 3306, 710, , 5.6.7, 5.6, MySQL, false, false, STATEMENT,
FULL, false, false, , 0, , 0, 0, 0,
@@ -88,9 +89,9 @@ func TestMkInsertThree(t *testing.T) {
replica_sql_running, replica_io_running, replication_sql_thread_state, replication_io_thread_state, has_replication_filters, supports_oracle_gtid, oracle_gtid, source_uuid, ancestry_uuid, executed_gtid_set, gtid_mode, gtid_purged, gtid_errant, mariadb_gtid, pseudo_gtid,
source_log_file, read_source_log_pos, relay_source_log_file, exec_source_log_pos, relay_log_file, relay_log_pos, last_sql_error, last_io_error, replication_lag_seconds, replica_lag_seconds, sql_delay, data_center, region, physical_environment, replication_depth, is_co_primary, has_replication_credentials, allow_tls, semi_sync_enforced, semi_sync_primary_enabled, semi_sync_primary_timeout, semi_sync_primary_wait_for_replica_count, semi_sync_replica_enabled, semi_sync_primary_status, semi_sync_primary_clients, semi_sync_replica_status, last_discovery_latency, last_seen)
VALUES
- (?, ?, ?, datetime('now'), datetime('now'), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, datetime('now')),
- (?, ?, ?, datetime('now'), datetime('now'), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, datetime('now')),
- (?, ?, ?, datetime('now'), datetime('now'), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, datetime('now'))
+ (?, ?, ?, DATETIME('now'), DATETIME('now'), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, DATETIME('now')),
+ (?, ?, ?, DATETIME('now'), DATETIME('now'), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, DATETIME('now')),
+ (?, ?, ?, DATETIME('now'), DATETIME('now'), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, DATETIME('now'))
`
a3 := `
zone1-i710, i710, 3306, 710, , 5.6.7, 5.6, MySQL, false, false, STATEMENT, FULL, false, false, , 0, , 0, 0, 0, false, false, 0, 0, false, false, false, , , , , , , false, false, , 0, mysql.000007, 10, , 0, , , {0 false}, {0 false}, 0, , , , 0, false, false, false, false, false, 0, 0, false, false, 0, false, 0,
@@ -241,11 +242,11 @@ func TestReadProblemInstances(t *testing.T) {
// We need to set InstancePollSeconds to a large value otherwise all the instances are reported as having problems since their last_checked is very old.
// Setting this value to a hundred years, we ensure that this test doesn't fail with this issue for the next hundred years.
- oldVal := config.Config.InstancePollSeconds
+ oldVal := config.GetInstancePollTime()
defer func() {
- config.Config.InstancePollSeconds = oldVal
+ config.SetInstancePollTime(oldVal)
}()
- config.Config.InstancePollSeconds = 60 * 60 * 24 * 365 * 100
+ config.SetInstancePollTime(60 * 60 * 24 * 365 * 100 * time.Second)
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
@@ -325,11 +326,11 @@ func TestReadInstancesWithErrantGTIds(t *testing.T) {
// We need to set InstancePollSeconds to a large value otherwise all the instances are reported as having problems since their last_checked is very old.
// Setting this value to a hundred years, we ensure that this test doesn't fail with this issue for the next hundred years.
- oldVal := config.Config.InstancePollSeconds
+ oldVal := config.GetInstancePollTime()
defer func() {
- config.Config.InstancePollSeconds = oldVal
+ config.SetInstancePollTime(oldVal)
}()
- config.Config.InstancePollSeconds = 60 * 60 * 24 * 365 * 100
+ config.SetInstancePollTime(60 * 60 * 24 * 365 * 100 * time.Second)
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
@@ -428,27 +429,27 @@ func TestReadOutdatedInstanceKeys(t *testing.T) {
}{
{
name: "No problems",
- sql: []string{"update database_instance set last_checked = datetime('now')"},
+ sql: []string{"update database_instance set last_checked = DATETIME('now')"},
instancesRequired: nil,
}, {
name: "One instance is outdated",
sql: []string{
- "update database_instance set last_checked = datetime('now')",
- "update database_instance set last_checked = datetime('now', '-1 hour') where alias = 'zone1-0000000100'",
+ "update database_instance set last_checked = DATETIME('now')",
+ "update database_instance set last_checked = DATETIME('now', '-1 hour') where alias = 'zone1-0000000100'",
},
instancesRequired: []string{"zone1-0000000100"},
}, {
name: "One instance doesn't have myql data",
sql: []string{
- "update database_instance set last_checked = datetime('now')",
+ "update database_instance set last_checked = DATETIME('now')",
`INSERT INTO vitess_tablet VALUES('zone1-0000000103','localhost',7706,'ks','0','zone1',2,'0001-01-01 00:00:00+00:00','');`,
},
instancesRequired: []string{"zone1-0000000103"},
}, {
name: "One instance doesn't have myql data and one is outdated",
sql: []string{
- "update database_instance set last_checked = datetime('now')",
- "update database_instance set last_checked = datetime('now', '-1 hour') where alias = 'zone1-0000000100'",
+ "update database_instance set last_checked = DATETIME('now')",
+ "update database_instance set last_checked = DATETIME('now', '-1 hour') where alias = 'zone1-0000000100'",
`INSERT INTO vitess_tablet VALUES('zone1-0000000103','localhost',7706,'ks','0','zone1',2,'0001-01-01 00:00:00+00:00','');`,
},
instancesRequired: []string{"zone1-0000000103", "zone1-0000000100"},
@@ -459,13 +460,13 @@ func TestReadOutdatedInstanceKeys(t *testing.T) {
waitForCacheInitialization()
// We are setting InstancePollSeconds to 59 minutes, just for the test.
- oldVal := config.Config.InstancePollSeconds
+ oldVal := config.GetInstancePollTime()
oldCache := forgetAliases
defer func() {
forgetAliases = oldCache
- config.Config.InstancePollSeconds = oldVal
+ config.SetInstancePollTime(oldVal)
}()
- config.Config.InstancePollSeconds = 60 * 25
+ config.SetInstancePollTime(60 * 25 * time.Second)
forgetAliases = cache.New(time.Minute, time.Minute)
for _, tt := range tests {
@@ -485,10 +486,10 @@ func TestReadOutdatedInstanceKeys(t *testing.T) {
errInDataCollection := db.QueryVTOrcRowsMap(`select alias,
last_checked,
last_attempted_check,
-ROUND((JULIANDAY(datetime('now')) - JULIANDAY(last_checked)) * 86400) AS difference,
+ROUND((JULIANDAY(DATETIME('now')) - JULIANDAY(last_checked)) * 86400) AS difference,
last_attempted_check <= last_checked as use1,
-last_checked < datetime('now', '-1500 second') as is_outdated1,
-last_checked < datetime('now', '-3000 second') as is_outdated2
+last_checked < DATETIME('now', '-1500 second') as is_outdated1,
+last_checked < DATETIME('now', '-3000 second') as is_outdated2
from database_instance`, func(rowMap sqlutils.RowMap) error {
log.Errorf("Row in database_instance - %+v", rowMap)
return nil
@@ -512,12 +513,12 @@ func TestUpdateInstanceLastChecked(t *testing.T) {
name: "Verify updated last checked",
tabletAlias: "zone1-0000000100",
partialSuccess: false,
- conditionToCheck: "last_checked >= datetime('now', '-30 second') and last_check_partial_success = false",
+ conditionToCheck: "last_checked >= DATETIME('now', '-30 second') and last_check_partial_success = false",
}, {
name: "Verify partial success",
tabletAlias: "zone1-0000000100",
partialSuccess: true,
- conditionToCheck: "last_checked >= datetime('now', '-30 second') and last_check_partial_success = true",
+ conditionToCheck: "last_checked >= DATETIME('now', '-30 second') and last_check_partial_success = true",
}, {
name: "Verify no error on unknown tablet",
tabletAlias: "unknown tablet",
@@ -563,7 +564,7 @@ func TestUpdateInstanceLastAttemptedCheck(t *testing.T) {
{
name: "Verify updated last checked",
tabletAlias: "zone1-0000000100",
- conditionToCheck: "last_attempted_check >= datetime('now', '-30 second')",
+ conditionToCheck: "last_attempted_check >= DATETIME('now', '-30 second')",
}, {
name: "Verify no error on unknown tablet",
tabletAlias: "unknown tablet",
@@ -718,10 +719,10 @@ func TestGetDatabaseState(t *testing.T) {
}
func TestExpireTableData(t *testing.T) {
- oldVal := config.Config.AuditPurgeDays
- config.Config.AuditPurgeDays = 10
+ oldVal := config.GetAuditPurgeDays()
+ config.SetAuditPurgeDays(10)
defer func() {
- config.Config.AuditPurgeDays = oldVal
+ config.SetAuditPurgeDays(oldVal)
}()
tests := []struct {
@@ -736,19 +737,19 @@ func TestExpireTableData(t *testing.T) {
tableName: "audit",
timestampColumn: "audit_timestamp",
expectedRowCount: 1,
- insertQuery: `insert into audit (audit_id, audit_timestamp, audit_type, alias, message, keyspace, shard) values
-(1, datetime('now', '-50 DAY'), 'a','a','a','a','a'),
-(2, datetime('now', '-5 DAY'), 'a','a','a','a','a')`,
+ insertQuery: `INSERT INTO audit (audit_id, audit_timestamp, audit_type, alias, message, keyspace, shard) VALUES
+(1, DATETIME('now', '-50 DAY'), 'a','a','a','a','a'),
+(2, DATETIME('now', '-5 DAY'), 'a','a','a','a','a')`,
},
{
name: "ExpireRecoveryDetectionHistory",
tableName: "recovery_detection",
timestampColumn: "detection_timestamp",
expectedRowCount: 2,
- insertQuery: `insert into recovery_detection (detection_id, detection_timestamp, alias, analysis, keyspace, shard) values
-(1, datetime('now', '-3 DAY'),'a','a','a','a'),
-(2, datetime('now', '-5 DAY'),'a','a','a','a'),
-(3, datetime('now', '-15 DAY'),'a','a','a','a')`,
+ insertQuery: `INSERT INTO recovery_detection (detection_id, detection_timestamp, alias, analysis, keyspace, shard) VALUES
+(1, DATETIME('now', '-3 DAY'),'a','a','a','a'),
+(2, DATETIME('now', '-5 DAY'),'a','a','a','a'),
+(3, DATETIME('now', '-15 DAY'),'a','a','a','a')`,
},
}
for _, tt := range tests {
@@ -773,3 +774,165 @@ func TestExpireTableData(t *testing.T) {
})
}
}
+
+func TestDetectErrantGTIDs(t *testing.T) {
+ tests := []struct {
+ name string
+ instance *Instance
+ primaryInstance *Instance
+ wantErr bool
+ wantErrantGTID string
+ }{
+ {
+ name: "No errant GTIDs",
+ instance: &Instance{
+ ExecutedGtidSet: "230ea8ea-81e3-11e4-972a-e25ec4bd140a:1-10539,8bc65c84-3fe4-11ed-a912-257f0fcdd6c9:1-34",
+ primaryExecutedGtidSet: "230ea8ea-81e3-11e4-972a-e25ec4bd140a:1-10591,8bc65c84-3fe4-11ed-a912-257f0fcdd6c9:1-34",
+ AncestryUUID: "316d193c-70e5-11e5-adb2-ecf4bb2262ff,230ea8ea-81e3-11e4-972a-e25ec4bd140a",
+ ServerUUID: "316d193c-70e5-11e5-adb2-ecf4bb2262ff",
+ SourceUUID: "230ea8ea-81e3-11e4-972a-e25ec4bd140a",
+ },
+ }, {
+ name: "Errant GTIDs on replica",
+ instance: &Instance{
+ ExecutedGtidSet: "230ea8ea-81e3-11e4-972a-e25ec4bd140a:1-10539,8bc65c84-3fe4-11ed-a912-257f0fcdd6c9:1-34,316d193c-70e5-11e5-adb2-ecf4bb2262ff:34",
+ primaryExecutedGtidSet: "230ea8ea-81e3-11e4-972a-e25ec4bd140a:1-10591,8bc65c84-3fe4-11ed-a912-257f0fcdd6c9:1-34",
+ AncestryUUID: "316d193c-70e5-11e5-adb2-ecf4bb2262ff,230ea8ea-81e3-11e4-972a-e25ec4bd140a",
+ ServerUUID: "316d193c-70e5-11e5-adb2-ecf4bb2262ff",
+ SourceUUID: "230ea8ea-81e3-11e4-972a-e25ec4bd140a",
+ },
+ wantErrantGTID: "316d193c-70e5-11e5-adb2-ecf4bb2262ff:34",
+ },
+ {
+ name: "No errant GTIDs on old primary",
+ instance: &Instance{
+ ExecutedGtidSet: "230ea8ea-81e3-11e4-972a-e25ec4bd140a:1-10539,8bc65c84-3fe4-11ed-a912-257f0fcdd6c9:1-34,316d193c-70e5-11e5-adb2-ecf4bb2262ff:1-341",
+ AncestryUUID: "316d193c-70e5-11e5-adb2-ecf4bb2262ff",
+ ServerUUID: "316d193c-70e5-11e5-adb2-ecf4bb2262ff",
+ },
+ primaryInstance: &Instance{
+ SourceHost: "",
+ ExecutedGtidSet: "230ea8ea-81e3-11e4-972a-e25ec4bd140a:1-10589,8bc65c84-3fe4-11ed-a912-257f0fcdd6c9:1-34,316d193c-70e5-11e5-adb2-ecf4bb2262ff:1-341",
+ },
+ },
+ {
+ name: "Errant GTIDs on old primary",
+ instance: &Instance{
+ ExecutedGtidSet: "230ea8ea-81e3-11e4-972a-e25ec4bd140a:1-10539,8bc65c84-3fe4-11ed-a912-257f0fcdd6c9:1-34,316d193c-70e5-11e5-adb2-ecf4bb2262ff:1-342",
+ AncestryUUID: "316d193c-70e5-11e5-adb2-ecf4bb2262ff",
+ ServerUUID: "316d193c-70e5-11e5-adb2-ecf4bb2262ff",
+ },
+ primaryInstance: &Instance{
+ SourceHost: "",
+ ExecutedGtidSet: "230ea8ea-81e3-11e4-972a-e25ec4bd140a:1-10589,8bc65c84-3fe4-11ed-a912-257f0fcdd6c9:1-34,316d193c-70e5-11e5-adb2-ecf4bb2262ff:1-341",
+ },
+ wantErrantGTID: "316d193c-70e5-11e5-adb2-ecf4bb2262ff:342",
+ }, {
+ name: "Old information for new primary",
+ instance: &Instance{
+ ExecutedGtidSet: "230ea8ea-81e3-11e4-972a-e25ec4bd140a:1-10539,8bc65c84-3fe4-11ed-a912-257f0fcdd6c9:1-34,316d193c-70e5-11e5-adb2-ecf4bb2262ff:1-342",
+ AncestryUUID: "316d193c-70e5-11e5-adb2-ecf4bb2262ff",
+ ServerUUID: "316d193c-70e5-11e5-adb2-ecf4bb2262ff",
+ },
+ primaryInstance: &Instance{
+ SourceHost: "localhost",
+ ExecutedGtidSet: "230ea8ea-81e3-11e4-972a-e25ec4bd140a:1-10539,8bc65c84-3fe4-11ed-a912-257f0fcdd6c9:1-34,316d193c-70e5-11e5-adb2-ecf4bb2262ff:1-311",
+ },
+ },
+ }
+
+ keyspaceName := "ks"
+ shardName := "0"
+ tablet := &topodatapb.Tablet{
+ Alias: &topodatapb.TabletAlias{
+ Cell: "zone-1",
+ Uid: 100,
+ },
+ Keyspace: keyspaceName,
+ Shard: shardName,
+ }
+ primaryTablet := &topodatapb.Tablet{
+ Alias: &topodatapb.TabletAlias{
+ Cell: "zone-1",
+ Uid: 101,
+ },
+ Keyspace: keyspaceName,
+ Shard: shardName,
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ // Clear the database after the test. The easiest way to do that is to run all the initialization commands again.
+ defer func() {
+ db.ClearVTOrcDatabase()
+ }()
+ db.ClearVTOrcDatabase()
+
+ // Save shard record for the primary tablet.
+ err := SaveShard(topo.NewShardInfo(keyspaceName, shardName, &topodatapb.Shard{
+ PrimaryAlias: primaryTablet.Alias,
+ }, nil))
+ require.NoError(t, err)
+
+ if tt.primaryInstance != nil {
+ tt.primaryInstance.InstanceAlias = topoproto.TabletAliasString(primaryTablet.Alias)
+ err = SaveTablet(primaryTablet)
+ require.NoError(t, err)
+ err = WriteInstance(tt.primaryInstance, true, nil)
+ require.NoError(t, err)
+ }
+
+ tt.instance.InstanceAlias = topoproto.TabletAliasString(tablet.Alias)
+ err = detectErrantGTIDs(tt.instance, tablet)
+ if tt.wantErr {
+ require.Error(t, err)
+ return
+ }
+ require.NoError(t, err)
+ require.EqualValues(t, tt.wantErrantGTID, tt.instance.GtidErrant)
+ })
+ }
+}
+
+// TestPrimaryErrantGTIDs tests that we don't run Errant GTID detection on the primary tablet itself!
+func TestPrimaryErrantGTIDs(t *testing.T) {
+ // Clear the database after the test. The easiest way to do that is to run all the initialization commands again.
+ defer func() {
+ db.ClearVTOrcDatabase()
+ }()
+ db.ClearVTOrcDatabase()
+ keyspaceName := "ks"
+ shardName := "0"
+ tablet := &topodatapb.Tablet{
+ Alias: &topodatapb.TabletAlias{
+ Cell: "zone-1",
+ Uid: 100,
+ },
+ Keyspace: keyspaceName,
+ Shard: shardName,
+ }
+ instance := &Instance{
+ SourceHost: "",
+ ExecutedGtidSet: "230ea8ea-81e3-11e4-972a-e25ec4bd140a:1-10589,8bc65c84-3fe4-11ed-a912-257f0fcdd6c9:1-34,316d193c-70e5-11e5-adb2-ecf4bb2262ff:1-341",
+ InstanceAlias: topoproto.TabletAliasString(tablet.Alias),
+ }
+
+ // Save shard record for the primary tablet.
+ err := SaveShard(topo.NewShardInfo(keyspaceName, shardName, &topodatapb.Shard{
+ PrimaryAlias: tablet.Alias,
+ }, nil))
+ require.NoError(t, err)
+
+ // Store the tablet record and the instance.
+ err = SaveTablet(tablet)
+ require.NoError(t, err)
+ err = WriteInstance(instance, true, nil)
+ require.NoError(t, err)
+
+ // After this if we read a new information for the record that updates its
+ // gtid set further, we shouldn't be detecting errant GTIDs on it since it is the primary!
+ // We shouldn't be comparing it with a previous version of itself!
+ instance.ExecutedGtidSet = "230ea8ea-81e3-11e4-972a-e25ec4bd140a:1-10589,8bc65c84-3fe4-11ed-a912-257f0fcdd6c9:1-34,316d193c-70e5-11e5-adb2-ecf4bb2262ff:1-351"
+ err = detectErrantGTIDs(instance, tablet)
+ require.NoError(t, err)
+ require.EqualValues(t, "", instance.GtidErrant)
+}
diff --git a/go/vt/vtorc/inst/tablet_dao.go b/go/vt/vtorc/inst/tablet_dao.go
index af304292a70..f48f2b97370 100644
--- a/go/vt/vtorc/inst/tablet_dao.go
+++ b/go/vt/vtorc/inst/tablet_dao.go
@@ -56,13 +56,13 @@ func fullStatus(tabletAlias string) (*replicationdatapb.FullStatus, error) {
// ReadTablet reads the vitess tablet record.
func ReadTablet(tabletAlias string) (*topodatapb.Tablet, error) {
- query := `
- select
- info
- from
- vitess_tablet
- where alias = ?
- `
+ query := `SELECT
+ info
+ FROM
+ vitess_tablet
+ WHERE
+ alias = ?
+ `
args := sqlutils.Args(tabletAlias)
tablet := &topodatapb.Tablet{}
opts := prototext.UnmarshalOptions{DiscardUnknown: true}
@@ -84,14 +84,28 @@ func SaveTablet(tablet *topodatapb.Tablet) error {
if err != nil {
return err
}
- _, err = db.ExecVTOrc(`
- replace
- into vitess_tablet (
- alias, hostname, port, cell, keyspace, shard, tablet_type, primary_timestamp, info
- ) values (
- ?, ?, ?, ?, ?, ?, ?, ?, ?
- )
- `,
+ _, err = db.ExecVTOrc(`REPLACE
+ INTO vitess_tablet (
+ alias,
+ hostname,
+ port,
+ cell,
+ keyspace,
+ shard,
+ tablet_type,
+ primary_timestamp,
+ info
+ ) VALUES (
+ ?,
+ ?,
+ ?,
+ ?,
+ ?,
+ ?,
+ ?,
+ ?,
+ ?
+ )`,
topoproto.TabletAliasString(tablet.Alias),
tablet.MysqlHostname,
int(tablet.MysqlPort),
diff --git a/go/vt/vtorc/logic/disable_recovery.go b/go/vt/vtorc/logic/disable_recovery.go
index 60650798876..c5446eeb9ff 100644
--- a/go/vt/vtorc/logic/disable_recovery.go
+++ b/go/vt/vtorc/logic/disable_recovery.go
@@ -40,14 +40,13 @@ import (
// IsRecoveryDisabled returns true if Recoveries are disabled globally
func IsRecoveryDisabled() (disabled bool, err error) {
- query := `
- SELECT
- COUNT(*) as mycount
- FROM
- global_recovery_disable
- WHERE
- disable_recovery=?
- `
+ query := `SELECT
+ COUNT(*) AS mycount
+ FROM
+ global_recovery_disable
+ WHERE
+ disable_recovery = ?
+ `
err = db.QueryVTOrc(query, sqlutils.Args(1), func(m sqlutils.RowMap) error {
mycount := m.GetInt("mycount")
disabled = (mycount > 0)
@@ -63,21 +62,19 @@ func IsRecoveryDisabled() (disabled bool, err error) {
// DisableRecovery ensures recoveries are disabled globally
func DisableRecovery() error {
- _, err := db.ExecVTOrc(`
- INSERT OR IGNORE INTO global_recovery_disable
- (disable_recovery)
- VALUES (1)
- `,
- )
+ _, err := db.ExecVTOrc(`INSERT OR IGNORE
+ INTO global_recovery_disable (
+ disable_recovery
+ ) VALUES (1)`)
return err
}
// EnableRecovery ensures recoveries are enabled globally
func EnableRecovery() error {
// The "WHERE" clause is just to avoid full-scan reports by monitoring tools
- _, err := db.ExecVTOrc(`
- DELETE FROM global_recovery_disable WHERE disable_recovery >= 0
- `,
- )
+ _, err := db.ExecVTOrc(`DELETE
+ FROM global_recovery_disable
+ WHERE
+ disable_recovery >= 0`)
return err
}
diff --git a/go/vt/vtorc/logic/keyspace_shard_discovery.go b/go/vt/vtorc/logic/keyspace_shard_discovery.go
index b1e93fe2a01..0dd17cb65fd 100644
--- a/go/vt/vtorc/logic/keyspace_shard_discovery.go
+++ b/go/vt/vtorc/logic/keyspace_shard_discovery.go
@@ -29,17 +29,16 @@ import (
)
// RefreshAllKeyspacesAndShards reloads the keyspace and shard information for the keyspaces that vtorc is concerned with.
-func RefreshAllKeyspacesAndShards() {
+func RefreshAllKeyspacesAndShards(ctx context.Context) error {
var keyspaces []string
if len(clustersToWatch) == 0 { // all known keyspaces
- ctx, cancel := context.WithTimeout(context.Background(), topo.RemoteOperationTimeout)
+ ctx, cancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout)
defer cancel()
var err error
// Get all the keyspaces
keyspaces, err = ts.GetKeyspaces(ctx)
if err != nil {
- log.Error(err)
- return
+ return err
}
} else {
// Parse input and build list of keyspaces
@@ -55,14 +54,14 @@ func RefreshAllKeyspacesAndShards() {
}
if len(keyspaces) == 0 {
log.Errorf("Found no keyspaces for input: %+v", clustersToWatch)
- return
+ return nil
}
}
// Sort the list of keyspaces.
// The list can have duplicates because the input to clusters to watch may have multiple shards of the same keyspace
sort.Strings(keyspaces)
- refreshCtx, refreshCancel := context.WithTimeout(context.Background(), topo.RemoteOperationTimeout)
+ refreshCtx, refreshCancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout)
defer refreshCancel()
var wg sync.WaitGroup
for idx, keyspace := range keyspaces {
@@ -83,6 +82,8 @@ func RefreshAllKeyspacesAndShards() {
}(keyspace)
}
wg.Wait()
+
+ return nil
}
// RefreshKeyspaceAndShard refreshes the keyspace record and shard record for the given keyspace and shard.
diff --git a/go/vt/vtorc/logic/keyspace_shard_discovery_test.go b/go/vt/vtorc/logic/keyspace_shard_discovery_test.go
index 097865db84a..5cbe139728b 100644
--- a/go/vt/vtorc/logic/keyspace_shard_discovery_test.go
+++ b/go/vt/vtorc/logic/keyspace_shard_discovery_test.go
@@ -92,7 +92,7 @@ func TestRefreshAllKeyspaces(t *testing.T) {
// Set clusters to watch to only watch ks1 and ks3
onlyKs1and3 := []string{"ks1/-80", "ks3/-80", "ks3/80-"}
clustersToWatch = onlyKs1and3
- RefreshAllKeyspacesAndShards()
+ require.NoError(t, RefreshAllKeyspacesAndShards(context.Background()))
// Verify that we only have ks1 and ks3 in vtorc's db.
verifyKeyspaceInfo(t, "ks1", keyspaceDurabilityNone, "")
@@ -107,7 +107,7 @@ func TestRefreshAllKeyspaces(t *testing.T) {
clustersToWatch = nil
// Change the durability policy of ks1
reparenttestutil.SetKeyspaceDurability(ctx, t, ts, "ks1", "semi_sync")
- RefreshAllKeyspacesAndShards()
+ require.NoError(t, RefreshAllKeyspacesAndShards(context.Background()))
// Verify that all the keyspaces are correctly reloaded
verifyKeyspaceInfo(t, "ks1", keyspaceDurabilitySemiSync, "")
diff --git a/go/vt/vtorc/logic/tablet_discovery.go b/go/vt/vtorc/logic/tablet_discovery.go
index e9bbcee35cb..990192a23f7 100644
--- a/go/vt/vtorc/logic/tablet_discovery.go
+++ b/go/vt/vtorc/logic/tablet_discovery.go
@@ -27,7 +27,6 @@ import (
"time"
"github.com/spf13/pflag"
-
"google.golang.org/protobuf/encoding/prototext"
"google.golang.org/protobuf/proto"
@@ -38,7 +37,6 @@ import (
"vitess.io/vitess/go/vt/vtorc/config"
"vitess.io/vitess/go/vt/vtorc/db"
"vitess.io/vitess/go/vt/vtorc/inst"
- "vitess.io/vitess/go/vt/vtorc/process"
"vitess.io/vitess/go/vt/vttablet/tmclient"
topodatapb "vitess.io/vitess/go/vt/proto/topodata"
@@ -66,39 +64,36 @@ func OpenTabletDiscovery() <-chan time.Time {
ts = topo.Open()
tmc = inst.InitializeTMC()
// Clear existing cache and perform a new refresh.
- if _, err := db.ExecVTOrc("delete from vitess_tablet"); err != nil {
+ if _, err := db.ExecVTOrc("DELETE FROM vitess_tablet"); err != nil {
log.Error(err)
}
- // We refresh all information from the topo once before we start the ticks to do it on a timer.
- populateAllInformation()
- return time.Tick(time.Second * time.Duration(config.Config.TopoInformationRefreshSeconds)) //nolint SA1015: using time.Tick leaks the underlying ticker
-}
-
-// populateAllInformation initializes all the information for VTOrc to function.
-func populateAllInformation() {
- refreshAllInformation()
- // We have completed one full discovery cycle. We should update the process health.
- process.FirstDiscoveryCycleComplete.Store(true)
+ // We refresh all information from the topo once before we start the ticks to do
+ // it on a timer.
+ ctx, cancel := context.WithTimeout(context.Background(), topo.RemoteOperationTimeout)
+ defer cancel()
+ if err := refreshAllInformation(ctx); err != nil {
+ log.Errorf("failed to initialize topo information: %+v", err)
+ }
+ return time.Tick(config.GetTopoInformationRefreshDuration()) //nolint SA1015: using time.Tick leaks the underlying ticker
}
// refreshAllTablets reloads the tablets from topo and discovers the ones which haven't been refreshed in a while
-func refreshAllTablets() {
- refreshTabletsUsing(func(tabletAlias string) {
+func refreshAllTablets(ctx context.Context) error {
+ return refreshTabletsUsing(ctx, func(tabletAlias string) {
DiscoverInstance(tabletAlias, false /* forceDiscovery */)
}, false /* forceRefresh */)
}
-func refreshTabletsUsing(loader func(tabletAlias string), forceRefresh bool) {
+func refreshTabletsUsing(ctx context.Context, loader func(tabletAlias string), forceRefresh bool) error {
if len(clustersToWatch) == 0 { // all known clusters
- ctx, cancel := context.WithTimeout(context.Background(), topo.RemoteOperationTimeout)
+ ctx, cancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout)
defer cancel()
cells, err := ts.GetKnownCells(ctx)
if err != nil {
- log.Error(err)
- return
+ return err
}
- refreshCtx, refreshCancel := context.WithTimeout(context.Background(), topo.RemoteOperationTimeout)
+ refreshCtx, refreshCancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout)
defer refreshCancel()
var wg sync.WaitGroup
for _, cell := range cells {
@@ -119,7 +114,7 @@ func refreshTabletsUsing(loader func(tabletAlias string), forceRefresh bool) {
keyspaceShards = append(keyspaceShards, &topo.KeyspaceShard{Keyspace: input[0], Shard: input[1]})
} else {
// Assume this is a keyspace and find all shards in keyspace
- ctx, cancel := context.WithTimeout(context.Background(), topo.RemoteOperationTimeout)
+ ctx, cancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout)
defer cancel()
shards, err := ts.GetShardNames(ctx, ks)
if err != nil {
@@ -138,9 +133,9 @@ func refreshTabletsUsing(loader func(tabletAlias string), forceRefresh bool) {
}
if len(keyspaceShards) == 0 {
log.Errorf("Found no keyspaceShards for input: %+v", clustersToWatch)
- return
+ return nil
}
- refreshCtx, refreshCancel := context.WithTimeout(context.Background(), topo.RemoteOperationTimeout)
+ refreshCtx, refreshCancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout)
defer refreshCancel()
var wg sync.WaitGroup
for _, ks := range keyspaceShards {
@@ -152,6 +147,7 @@ func refreshTabletsUsing(loader func(tabletAlias string), forceRefresh bool) {
}
wg.Wait()
}
+ return nil
}
func refreshTabletsInCell(ctx context.Context, cell string, loader func(tabletAlias string), forceRefresh bool) {
diff --git a/go/vt/vtorc/logic/tablet_discovery_test.go b/go/vt/vtorc/logic/tablet_discovery_test.go
index bc9eeba1fb7..f6a7af38382 100644
--- a/go/vt/vtorc/logic/tablet_discovery_test.go
+++ b/go/vt/vtorc/logic/tablet_discovery_test.go
@@ -37,7 +37,6 @@ import (
"vitess.io/vitess/go/vt/vtctl/grpcvtctldserver/testutil"
"vitess.io/vitess/go/vt/vtorc/db"
"vitess.io/vitess/go/vt/vtorc/inst"
- "vitess.io/vitess/go/vt/vtorc/process"
)
var (
@@ -369,25 +368,6 @@ func TestGetLockAction(t *testing.T) {
}
}
-// TestProcessHealth tests that the health of the process reflects that we have run the first discovery once correctly.
-func TestProcessHealth(t *testing.T) {
- require.False(t, process.FirstDiscoveryCycleComplete.Load())
- originalTs := ts
- defer func() {
- ts = originalTs
- process.FirstDiscoveryCycleComplete.Store(false)
- }()
- // Verify in the beginning, we have the first DiscoveredOnce field false.
- _, discoveredOnce := process.HealthTest()
- require.False(t, discoveredOnce)
- ts = memorytopo.NewServer(context.Background(), cell1)
- populateAllInformation()
- require.True(t, process.FirstDiscoveryCycleComplete.Load())
- // Verify after we populate all information, we have the first DiscoveredOnce field true.
- _, discoveredOnce = process.HealthTest()
- require.True(t, discoveredOnce)
-}
-
func TestSetReadOnly(t *testing.T) {
tests := []struct {
name string
diff --git a/go/vt/vtorc/logic/topology_recovery.go b/go/vt/vtorc/logic/topology_recovery.go
index aec137a45b4..f14eca624c9 100644
--- a/go/vt/vtorc/logic/topology_recovery.go
+++ b/go/vt/vtorc/logic/topology_recovery.go
@@ -21,7 +21,6 @@ import (
"encoding/json"
"fmt"
"math/rand/v2"
- "time"
"vitess.io/vitess/go/stats"
"vitess.io/vitess/go/vt/log"
@@ -235,8 +234,8 @@ func runEmergencyReparentOp(ctx context.Context, analysisEntry *inst.Replication
tablet.Shard,
reparentutil.EmergencyReparentOptions{
IgnoreReplicas: nil,
- WaitReplicasTimeout: time.Duration(config.Config.WaitReplicasTimeoutSeconds) * time.Second,
- PreventCrossCellPromotion: config.Config.PreventCrossDataCenterPrimaryFailover,
+ WaitReplicasTimeout: config.GetWaitReplicasTimeout(),
+ PreventCrossCellPromotion: config.GetPreventCrossCellFailover(),
WaitAllTablets: waitForAllTablets,
},
)
@@ -703,8 +702,8 @@ func electNewPrimary(ctx context.Context, analysisEntry *inst.ReplicationAnalysi
analyzedTablet.Keyspace,
analyzedTablet.Shard,
reparentutil.PlannedReparentOptions{
- WaitReplicasTimeout: time.Duration(config.Config.WaitReplicasTimeoutSeconds) * time.Second,
- TolerableReplLag: time.Duration(config.Config.TolerableReplicationLagSeconds) * time.Second,
+ WaitReplicasTimeout: config.GetWaitReplicasTimeout(),
+ TolerableReplLag: config.GetTolerableReplicationLag(),
},
)
diff --git a/go/vt/vtorc/logic/topology_recovery_dao.go b/go/vt/vtorc/logic/topology_recovery_dao.go
index 730e6b2a158..137251c4fc8 100644
--- a/go/vt/vtorc/logic/topology_recovery_dao.go
+++ b/go/vt/vtorc/logic/topology_recovery_dao.go
@@ -30,21 +30,20 @@ import (
// InsertRecoveryDetection inserts the recovery analysis that has been detected.
func InsertRecoveryDetection(analysisEntry *inst.ReplicationAnalysis) error {
- sqlResult, err := db.ExecVTOrc(`
- insert or ignore
- into recovery_detection (
- alias,
- analysis,
- keyspace,
- shard,
- detection_timestamp
- ) values (
- ?,
- ?,
- ?,
- ?,
- datetime('now')
- )`,
+ sqlResult, err := db.ExecVTOrc(`INSERT OR IGNORE
+ INTO recovery_detection (
+ alias,
+ analysis,
+ keyspace,
+ shard,
+ detection_timestamp
+ ) VALUES (
+ ?,
+ ?,
+ ?,
+ ?,
+ DATETIME('now')
+ )`,
analysisEntry.AnalyzedInstanceAlias,
string(analysisEntry.Analysis),
analysisEntry.ClusterDetails.Keyspace,
@@ -65,26 +64,24 @@ func InsertRecoveryDetection(analysisEntry *inst.ReplicationAnalysis) error {
func writeTopologyRecovery(topologyRecovery *TopologyRecovery) (*TopologyRecovery, error) {
analysisEntry := topologyRecovery.AnalysisEntry
- sqlResult, err := db.ExecVTOrc(`
- insert or ignore
- into topology_recovery (
- recovery_id,
- alias,
- start_recovery,
- analysis,
- keyspace,
- shard,
- detection_id
- ) values (
- ?,
- ?,
- datetime('now'),
- ?,
- ?,
- ?,
- ?
- )
- `,
+ sqlResult, err := db.ExecVTOrc(`INSERT OR IGNORE
+ INTO topology_recovery (
+ recovery_id,
+ alias,
+ start_recovery,
+ analysis,
+ keyspace,
+ shard,
+ detection_id
+ ) VALUES (
+ ?,
+ ?,
+ DATETIME('now'),
+ ?,
+ ?,
+ ?,
+ ?
+ )`,
sqlutils.NilIfZero(topologyRecovery.ID),
analysisEntry.AnalyzedInstanceAlias,
string(analysisEntry.Analysis),
@@ -138,15 +135,16 @@ func AttemptRecoveryRegistration(analysisEntry *inst.ReplicationAnalysis) (*Topo
// ResolveRecovery is called on completion of a recovery process and updates the recovery status.
// It does not clear the "active period" as this still takes place in order to avoid flapping.
func writeResolveRecovery(topologyRecovery *TopologyRecovery) error {
- _, err := db.ExecVTOrc(`
- update topology_recovery set
- is_successful = ?,
- successor_alias = ?,
- all_errors = ?,
- end_recovery = datetime('now')
- where
- recovery_id = ?
- `, topologyRecovery.IsSuccessful,
+ _, err := db.ExecVTOrc(`UPDATE topology_recovery
+ SET
+ is_successful = ?,
+ successor_alias = ?,
+ all_errors = ?,
+ end_recovery = DATETIME('now')
+ WHERE
+ recovery_id = ?
+ `,
+ topologyRecovery.IsSuccessful,
topologyRecovery.SuccessorAlias,
strings.Join(topologyRecovery.AllErrors, "\n"),
topologyRecovery.ID,
@@ -160,26 +158,27 @@ func writeResolveRecovery(topologyRecovery *TopologyRecovery) error {
// readRecoveries reads recovery entry/audit entries from topology_recovery
func readRecoveries(whereCondition string, limit string, args []any) ([]*TopologyRecovery, error) {
res := []*TopologyRecovery{}
- query := fmt.Sprintf(`
- select
- recovery_id,
- alias,
- start_recovery,
- IFNULL(end_recovery, '') AS end_recovery,
- is_successful,
- ifnull(successor_alias, '') as successor_alias,
- analysis,
- keyspace,
- shard,
- all_errors,
- detection_id
- from
+ query := fmt.Sprintf(`SELECT
+ recovery_id,
+ alias,
+ start_recovery,
+ IFNULL(end_recovery, '') AS end_recovery,
+ is_successful,
+ IFNULL(successor_alias, '') AS successor_alias,
+ analysis,
+ keyspace,
+ shard,
+ all_errors,
+ detection_id
+ FROM
topology_recovery
%s
- order by
- recovery_id desc
+ ORDER BY recovery_id DESC
%s
- `, whereCondition, limit)
+ `,
+ whereCondition,
+ limit,
+ )
err := db.QueryVTOrc(query, args, func(m sqlutils.RowMap) error {
topologyRecovery := *NewTopologyRecovery(inst.ReplicationAnalysis{})
topologyRecovery.ID = m.GetInt64("recovery_id")
@@ -211,11 +210,10 @@ func readRecoveries(whereCondition string, limit string, args []any) ([]*Topolog
// ReadActiveClusterRecoveries reads recoveries that are ongoing for the given cluster.
func ReadActiveClusterRecoveries(keyspace string, shard string) ([]*TopologyRecovery, error) {
- whereClause := `
- where
- end_recovery IS NULL
- and keyspace=?
- and shard=?`
+ whereClause := `WHERE
+ end_recovery IS NULL
+ AND keyspace = ?
+ AND shard = ?`
return readRecoveries(whereClause, ``, sqlutils.Args(keyspace, shard))
}
@@ -225,23 +223,30 @@ func ReadRecentRecoveries(page int) ([]*TopologyRecovery, error) {
whereClause := ""
var args []any
if len(whereConditions) > 0 {
- whereClause = fmt.Sprintf("where %s", strings.Join(whereConditions, " and "))
+ whereClause = fmt.Sprintf("WHERE %s", strings.Join(whereConditions, " AND "))
}
- limit := `
- limit ?
- offset ?`
+ limit := `LIMIT ? OFFSET ?`
args = append(args, config.AuditPageSize, page*config.AuditPageSize)
return readRecoveries(whereClause, limit, args)
}
// writeTopologyRecoveryStep writes down a single step in a recovery process
func writeTopologyRecoveryStep(topologyRecoveryStep *TopologyRecoveryStep) error {
- sqlResult, err := db.ExecVTOrc(`
- insert or ignore
- into topology_recovery_steps (
- recovery_step_id, recovery_id, audit_at, message
- ) values (?, ?, datetime('now'), ?)
- `, sqlutils.NilIfZero(topologyRecoveryStep.ID), topologyRecoveryStep.RecoveryID, topologyRecoveryStep.Message,
+ sqlResult, err := db.ExecVTOrc(`INSERT OR IGNORE
+ INTO topology_recovery_steps (
+ recovery_step_id,
+ recovery_id,
+ audit_at,
+ message
+ ) VALUES (
+ ?,
+ ?,
+ DATETIME('now'),
+ ?
+ )`,
+ sqlutils.NilIfZero(topologyRecoveryStep.ID),
+ topologyRecoveryStep.RecoveryID,
+ topologyRecoveryStep.Message,
)
if err != nil {
log.Error(err)
diff --git a/go/vt/vtorc/logic/topology_recovery_dao_test.go b/go/vt/vtorc/logic/topology_recovery_dao_test.go
index 20dfb7e91e2..6a1d7c4c48f 100644
--- a/go/vt/vtorc/logic/topology_recovery_dao_test.go
+++ b/go/vt/vtorc/logic/topology_recovery_dao_test.go
@@ -70,10 +70,10 @@ func TestTopologyRecovery(t *testing.T) {
}
func TestExpireTableData(t *testing.T) {
- oldVal := config.Config.AuditPurgeDays
- config.Config.AuditPurgeDays = 10
+ oldVal := config.GetAuditPurgeDays()
+ config.SetAuditPurgeDays(10)
defer func() {
- config.Config.AuditPurgeDays = oldVal
+ config.SetAuditPurgeDays(oldVal)
}()
tests := []struct {
diff --git a/go/vt/vtorc/logic/vtorc.go b/go/vt/vtorc/logic/vtorc.go
index 9a468d1508a..39326525ce2 100644
--- a/go/vt/vtorc/logic/vtorc.go
+++ b/go/vt/vtorc/logic/vtorc.go
@@ -17,15 +17,14 @@
package logic
import (
- "os"
- "os/signal"
+ "context"
"sync"
"sync/atomic"
- "syscall"
"time"
"github.com/patrickmn/go-cache"
"github.com/sjmudd/stopwatch"
+ "golang.org/x/sync/errgroup"
"vitess.io/vitess/go/stats"
"vitess.io/vitess/go/vt/log"
@@ -35,6 +34,7 @@ import (
"vitess.io/vitess/go/vt/vtorc/discovery"
"vitess.io/vitess/go/vt/vtorc/inst"
ometrics "vitess.io/vitess/go/vt/vtorc/metrics"
+ "vitess.io/vitess/go/vt/vtorc/process"
"vitess.io/vitess/go/vt/vtorc/util"
)
@@ -73,26 +73,6 @@ func init() {
})
}
-// used in several places
-func instancePollSecondsDuration() time.Duration {
- return time.Duration(config.Config.InstancePollSeconds) * time.Second
-}
-
-// acceptSighupSignal registers for SIGHUP signal from the OS to reload the configuration files.
-func acceptSighupSignal() {
- c := make(chan os.Signal, 1)
-
- signal.Notify(c, syscall.SIGHUP)
- go func() {
- for range c {
- log.Infof("Received SIGHUP. Reloading configuration")
- _ = inst.AuditOperation("reload-configuration", "", "Triggered via SIGHUP")
- config.Reload()
- discoveryMetrics.SetExpirePeriod(time.Duration(config.DiscoveryCollectionRetentionSeconds) * time.Second)
- }
- }()
-}
-
// closeVTOrc runs all the operations required to cleanly shutdown VTOrc
func closeVTOrc() {
log.Infof("Starting VTOrc shutdown")
@@ -161,7 +141,7 @@ func DiscoverInstance(tabletAlias string, forceDiscovery bool) {
defer func() {
latency.Stop("total")
discoveryTime := latency.Elapsed("total")
- if discoveryTime > instancePollSecondsDuration() {
+ if discoveryTime > config.GetInstancePollTime() {
instancePollSecondsExceededCounter.Add(1)
log.Warningf("discoverInstance exceeded InstancePollSeconds for %+v, took %.4fs", tabletAlias, discoveryTime.Seconds())
if metric != nil {
@@ -177,7 +157,7 @@ func DiscoverInstance(tabletAlias string, forceDiscovery bool) {
// Calculate the expiry period each time as InstancePollSeconds
// _may_ change during the run of the process (via SIGHUP) and
// it is not possible to change the cache's default expiry..
- if existsInCacheError := recentDiscoveryOperationKeys.Add(tabletAlias, true, instancePollSecondsDuration()); existsInCacheError != nil && !forceDiscovery {
+ if existsInCacheError := recentDiscoveryOperationKeys.Add(tabletAlias, true, config.GetInstancePollTime()); existsInCacheError != nil && !forceDiscovery {
// Just recently attempted
return
}
@@ -271,24 +251,23 @@ func onHealthTick() {
// nolint SA1015: using time.Tick leaks the underlying ticker
func ContinuousDiscovery() {
log.Infof("continuous discovery: setting up")
- recentDiscoveryOperationKeys = cache.New(instancePollSecondsDuration(), time.Second)
+ recentDiscoveryOperationKeys = cache.New(config.GetInstancePollTime(), time.Second)
go handleDiscoveryRequests()
healthTick := time.Tick(config.HealthPollSeconds * time.Second)
caretakingTick := time.Tick(time.Minute)
- recoveryTick := time.Tick(time.Duration(config.Config.RecoveryPollSeconds) * time.Second)
+ recoveryTick := time.Tick(config.GetRecoveryPollDuration())
tabletTopoTick := OpenTabletDiscovery()
var recoveryEntrance int64
var snapshotTopologiesTick <-chan time.Time
- if config.Config.SnapshotTopologiesIntervalHours > 0 {
- snapshotTopologiesTick = time.Tick(time.Duration(config.Config.SnapshotTopologiesIntervalHours) * time.Hour)
+ if config.GetSnapshotTopologyInterval() > 0 {
+ snapshotTopologiesTick = time.Tick(config.GetSnapshotTopologyInterval())
}
go func() {
_ = ometrics.InitMetrics()
}()
- go acceptSighupSignal()
// On termination of the server, we should close VTOrc cleanly
servenv.OnTermSync(closeVTOrc)
@@ -328,30 +307,34 @@ func ContinuousDiscovery() {
go inst.SnapshotTopologies()
}()
case <-tabletTopoTick:
- refreshAllInformation()
+ ctx, cancel := context.WithTimeout(context.Background(), config.GetTopoInformationRefreshDuration())
+ if err := refreshAllInformation(ctx); err != nil {
+ log.Errorf("failed to refresh topo information: %+v", err)
+ }
+ cancel()
}
}
}
// refreshAllInformation refreshes both shard and tablet information. This is meant to be run on tablet topo ticks.
-func refreshAllInformation() {
- // Create a wait group
- var wg sync.WaitGroup
+func refreshAllInformation(ctx context.Context) error {
+ // Create an errgroup
+ eg, ctx := errgroup.WithContext(ctx)
// Refresh all keyspace information.
- wg.Add(1)
- go func() {
- defer wg.Done()
- RefreshAllKeyspacesAndShards()
- }()
+ eg.Go(func() error {
+ return RefreshAllKeyspacesAndShards(ctx)
+ })
// Refresh all tablets.
- wg.Add(1)
- go func() {
- defer wg.Done()
- refreshAllTablets()
- }()
+ eg.Go(func() error {
+ return refreshAllTablets(ctx)
+ })
// Wait for both the refreshes to complete
- wg.Wait()
+ err := eg.Wait()
+ if err == nil {
+ process.FirstDiscoveryCycleComplete.Store(true)
+ }
+ return err
}
diff --git a/go/vt/vtorc/logic/vtorc_test.go b/go/vt/vtorc/logic/vtorc_test.go
index c8f2ac3bfdc..edd8141e8b7 100644
--- a/go/vt/vtorc/logic/vtorc_test.go
+++ b/go/vt/vtorc/logic/vtorc_test.go
@@ -1,11 +1,17 @@
package logic
import (
+ "context"
"sync/atomic"
"testing"
"time"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "vitess.io/vitess/go/vt/topo/memorytopo"
+ "vitess.io/vitess/go/vt/vtorc/db"
+ "vitess.io/vitess/go/vt/vtorc/process"
)
func TestWaitForLocksRelease(t *testing.T) {
@@ -54,3 +60,41 @@ func waitForLocksReleaseAndGetTimeWaitedFor() time.Duration {
waitForLocksRelease()
return time.Since(start)
}
+
+func TestRefreshAllInformation(t *testing.T) {
+ // Store the old flags and restore on test completion
+ oldTs := ts
+ defer func() {
+ ts = oldTs
+ }()
+
+ // Clear the database after the test. The easiest way to do that is to run all the initialization commands again.
+ defer func() {
+ db.ClearVTOrcDatabase()
+ }()
+
+ // Verify in the beginning, we have the first DiscoveredOnce field false.
+ _, discoveredOnce := process.HealthTest()
+ require.False(t, discoveredOnce)
+
+ // Create a memory topo-server and create the keyspace and shard records
+ ts = memorytopo.NewServer(context.Background(), cell1)
+ _, err := ts.GetOrCreateShard(context.Background(), keyspace, shard)
+ require.NoError(t, err)
+
+ // Test error
+ ctx, cancel := context.WithCancel(context.Background())
+ cancel() // cancel context to simulate timeout
+ require.Error(t, refreshAllInformation(ctx))
+ require.False(t, process.FirstDiscoveryCycleComplete.Load())
+ _, discoveredOnce = process.HealthTest()
+ require.False(t, discoveredOnce)
+
+ // Test success
+ ctx2, cancel2 := context.WithCancel(context.Background())
+ defer cancel2()
+ require.NoError(t, refreshAllInformation(ctx2))
+ require.True(t, process.FirstDiscoveryCycleComplete.Load())
+ _, discoveredOnce = process.HealthTest()
+ require.True(t, discoveredOnce)
+}
diff --git a/go/vt/vtorc/process/health.go b/go/vt/vtorc/process/health.go
index 87a11733f66..d448f03bb83 100644
--- a/go/vt/vtorc/process/health.go
+++ b/go/vt/vtorc/process/health.go
@@ -35,12 +35,17 @@ var ThisNodeHealth = &NodeHealth{}
// writeHealthToDatabase writes to the database and returns if it was successful.
func writeHealthToDatabase() bool {
- _, err := db.ExecVTOrc("delete from node_health")
+ _, err := db.ExecVTOrc("DELETE FROM node_health")
if err != nil {
log.Error(err)
return false
}
- sqlResult, err := db.ExecVTOrc(`insert into node_health (last_seen_active) values (datetime('now'))`)
+ sqlResult, err := db.ExecVTOrc(`INSERT
+ INTO node_health (
+ last_seen_active
+ ) VALUES (
+ DATETIME('now')
+ )`)
if err != nil {
log.Error(err)
return false
@@ -57,7 +62,7 @@ func writeHealthToDatabase() bool {
func HealthTest() (health *NodeHealth, discoveredOnce bool) {
ThisNodeHealth.LastReported = time.Now()
discoveredOnce = FirstDiscoveryCycleComplete.Load()
- ThisNodeHealth.Healthy = writeHealthToDatabase()
+ ThisNodeHealth.Healthy = discoveredOnce && writeHealthToDatabase()
return ThisNodeHealth, discoveredOnce
}
diff --git a/go/vt/vtorc/process/health_test.go b/go/vt/vtorc/process/health_test.go
new file mode 100644
index 00000000000..c198deda4e4
--- /dev/null
+++ b/go/vt/vtorc/process/health_test.go
@@ -0,0 +1,46 @@
+/*
+Copyright 2024 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package process
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestHealthTest(t *testing.T) {
+ defer func() {
+ FirstDiscoveryCycleComplete.Store(false)
+ ThisNodeHealth = &NodeHealth{}
+ }()
+
+ require.Zero(t, ThisNodeHealth.LastReported)
+ require.False(t, ThisNodeHealth.Healthy)
+
+ ThisNodeHealth = &NodeHealth{}
+ health, discoveredOnce := HealthTest()
+ require.False(t, health.Healthy)
+ require.False(t, discoveredOnce)
+ require.NotZero(t, ThisNodeHealth.LastReported)
+
+ ThisNodeHealth = &NodeHealth{}
+ FirstDiscoveryCycleComplete.Store(true)
+ health, discoveredOnce = HealthTest()
+ require.True(t, health.Healthy)
+ require.True(t, discoveredOnce)
+ require.NotZero(t, ThisNodeHealth.LastReported)
+}
diff --git a/go/vt/vtorc/server/api.go b/go/vt/vtorc/server/api.go
index 5e9a84c0a29..177f2c80333 100644
--- a/go/vt/vtorc/server/api.go
+++ b/go/vt/vtorc/server/api.go
@@ -25,6 +25,7 @@ import (
"time"
"vitess.io/vitess/go/acl"
+ "vitess.io/vitess/go/viperutil/debug"
"vitess.io/vitess/go/vt/servenv"
"vitess.io/vitess/go/vt/vtorc/collection"
"vitess.io/vitess/go/vt/vtorc/discovery"
@@ -46,6 +47,7 @@ const (
enableGlobalRecoveriesAPI = "/api/enable-global-recoveries"
replicationAnalysisAPI = "/api/replication-analysis"
databaseStateAPI = "/api/database-state"
+ configAPI = "/api/config"
healthAPI = "/debug/health"
AggregatedDiscoveryMetricsAPI = "/api/aggregated-discovery-metrics"
@@ -62,6 +64,7 @@ var (
enableGlobalRecoveriesAPI,
replicationAnalysisAPI,
databaseStateAPI,
+ configAPI,
healthAPI,
AggregatedDiscoveryMetricsAPI,
}
@@ -90,6 +93,8 @@ func (v *vtorcAPI) ServeHTTP(response http.ResponseWriter, request *http.Request
replicationAnalysisAPIHandler(response, request)
case databaseStateAPI:
databaseStateAPIHandler(response)
+ case configAPI:
+ configAPIHandler(response)
case AggregatedDiscoveryMetricsAPI:
AggregatedDiscoveryMetricsAPIHandler(response, request)
default:
@@ -106,7 +111,7 @@ func getACLPermissionLevelForAPI(apiEndpoint string) string {
return acl.MONITORING
case disableGlobalRecoveriesAPI, enableGlobalRecoveriesAPI:
return acl.ADMIN
- case replicationAnalysisAPI:
+ case replicationAnalysisAPI, configAPI:
return acl.MONITORING
case healthAPI, databaseStateAPI:
return acl.MONITORING
@@ -180,6 +185,17 @@ func databaseStateAPIHandler(response http.ResponseWriter) {
writePlainTextResponse(response, ds, http.StatusOK)
}
+// configAPIHandler is the handler for the configAPI endpoint
+func configAPIHandler(response http.ResponseWriter) {
+ settingsMap := debug.AllSettings()
+ jsonOut, err := json.MarshalIndent(settingsMap, "", "\t")
+ if err != nil {
+ http.Error(response, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ writePlainTextResponse(response, string(jsonOut), http.StatusOK)
+}
+
// AggregatedDiscoveryMetricsAPIHandler is the handler for the discovery metrics endpoint
func AggregatedDiscoveryMetricsAPIHandler(response http.ResponseWriter, request *http.Request) {
// return metrics for last x seconds
diff --git a/go/vt/vtorc/server/api_test.go b/go/vt/vtorc/server/api_test.go
index c352d1e600f..ab6b9eed9af 100644
--- a/go/vt/vtorc/server/api_test.go
+++ b/go/vt/vtorc/server/api_test.go
@@ -31,6 +31,9 @@ func TestGetACLPermissionLevelForAPI(t *testing.T) {
}, {
apiEndpoint: healthAPI,
want: acl.MONITORING,
+ }, {
+ apiEndpoint: configAPI,
+ want: acl.MONITORING,
}, {
apiEndpoint: "gibberish",
want: acl.ADMIN,
diff --git a/go/vt/vttablet/common/flags.go b/go/vt/vttablet/common/flags.go
index 3c6141d62eb..75e8e58982f 100644
--- a/go/vt/vttablet/common/flags.go
+++ b/go/vt/vttablet/common/flags.go
@@ -33,8 +33,7 @@ const (
)
var (
- // Default flags: currently VReplicationExperimentalFlagVPlayerBatching is not enabled by default.
- vreplicationExperimentalFlags = VReplicationExperimentalFlagOptimizeInserts | VReplicationExperimentalFlagAllowNoBlobBinlogRowImage
+ vreplicationExperimentalFlags = VReplicationExperimentalFlagOptimizeInserts | VReplicationExperimentalFlagAllowNoBlobBinlogRowImage | VReplicationExperimentalFlagVPlayerBatching
vreplicationNetReadTimeout = 300
vreplicationNetWriteTimeout = 600
vreplicationCopyPhaseDuration = 1 * time.Hour
diff --git a/go/vt/vttablet/endtoend/config_test.go b/go/vt/vttablet/endtoend/config_test.go
index 4abf5b36c21..c3ad5f8a9db 100644
--- a/go/vt/vttablet/endtoend/config_test.go
+++ b/go/vt/vttablet/endtoend/config_test.go
@@ -36,7 +36,7 @@ import (
)
func TestPoolSize(t *testing.T) {
- revert := changeVar(t, "PoolSize", "1")
+ revert := changeVar(t, "ReadPoolSize", "1")
defer revert()
vstart := framework.DebugVars()
@@ -92,7 +92,7 @@ func TestTxPoolSize(t *testing.T) {
defer client2.Rollback()
verifyIntValue(t, framework.DebugVars(), "FoundRowsPoolAvailable", framework.FetchInt(vstart, "FoundRowsPoolAvailable")-1)
- revert := changeVar(t, "TxPoolSize", "1")
+ revert := changeVar(t, "TransactionPoolSize", "1")
defer revert()
vend := framework.DebugVars()
verifyIntValue(t, vend, "TransactionPoolAvailable", 0)
diff --git a/go/vt/vttablet/endtoend/connecttcp/main_test.go b/go/vt/vttablet/endtoend/connecttcp/main_test.go
index 9d52b1287a1..43be05893cc 100644
--- a/go/vt/vttablet/endtoend/connecttcp/main_test.go
+++ b/go/vt/vttablet/endtoend/connecttcp/main_test.go
@@ -22,6 +22,7 @@ import (
"fmt"
"os"
"testing"
+ "time"
"vitess.io/vitess/go/mysql"
vttestpb "vitess.io/vitess/go/vt/proto/vttest"
@@ -86,8 +87,7 @@ func TestMain(m *testing.M) {
defer cancel()
config := tabletenv.NewDefaultConfig()
- config.TwoPCEnable = true
- config.TwoPCAbandonAge = 1
+ config.TwoPCAbandonAge = 1 * time.Second
if err := framework.StartCustomServer(ctx, connParams, connAppDebugParams, cluster.DbName(), config); err != nil {
fmt.Fprintf(os.Stderr, "%v", err)
diff --git a/go/vt/vttablet/endtoend/framework/server.go b/go/vt/vttablet/endtoend/framework/server.go
index 0124bb992ba..3374aadb450 100644
--- a/go/vt/vttablet/endtoend/framework/server.go
+++ b/go/vt/vttablet/endtoend/framework/server.go
@@ -108,8 +108,7 @@ func StartCustomServer(ctx context.Context, connParams, connAppDebugParams mysql
func StartServer(ctx context.Context, connParams, connAppDebugParams mysql.ConnParams, dbName string) error {
config := tabletenv.NewDefaultConfig()
config.StrictTableACL = true
- config.TwoPCEnable = true
- config.TwoPCAbandonAge = 1
+ config.TwoPCAbandonAge = 1 * time.Second
config.HotRowProtection.Mode = tabletenv.Enable
config.TrackSchemaVersions = true
config.GracePeriods.Shutdown = 2 * time.Second
diff --git a/go/vt/vttablet/endtoend/twopc/main_test.go b/go/vt/vttablet/endtoend/twopc/main_test.go
index 090751503d4..3b68ce273e1 100644
--- a/go/vt/vttablet/endtoend/twopc/main_test.go
+++ b/go/vt/vttablet/endtoend/twopc/main_test.go
@@ -22,6 +22,7 @@ import (
"fmt"
"os"
"testing"
+ "time"
"vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/vt/vttablet/endtoend/framework"
@@ -83,8 +84,7 @@ func TestMain(m *testing.M) {
defer cancel()
config := tabletenv.NewDefaultConfig()
- config.TwoPCEnable = true
- config.TwoPCAbandonAge = 1
+ config.TwoPCAbandonAge = 1 * time.Second
err := framework.StartCustomServer(ctx, connParams, connAppDebugParams, cluster.DbName(), config)
if err != nil {
fmt.Fprintf(os.Stderr, "%v", err)
diff --git a/go/vt/vttablet/grpctmserver/server.go b/go/vt/vttablet/grpctmserver/server.go
index 777f641b1fc..6f0fd2aa4dc 100644
--- a/go/vt/vttablet/grpctmserver/server.go
+++ b/go/vt/vttablet/grpctmserver/server.go
@@ -637,6 +637,7 @@ func (s *server) StopReplicationAndGetStatus(ctx context.Context, request *table
if err == nil {
response.Status = statusResponse.Status
}
+
return response, err
}
diff --git a/go/vt/vttablet/onlineddl/executor.go b/go/vt/vttablet/onlineddl/executor.go
index 555cadd53ea..f8b5cfd9b8d 100644
--- a/go/vt/vttablet/onlineddl/executor.go
+++ b/go/vt/vttablet/onlineddl/executor.go
@@ -94,13 +94,18 @@ var (
ptOSCBinaryPath = "/usr/bin/pt-online-schema-change"
migrationCheckInterval = 1 * time.Minute
retainOnlineDDLTables = 24 * time.Hour
- defaultCutOverThreshold = 10 * time.Second
maxConcurrentOnlineDDLs = 256
migrationNextCheckIntervals = []time.Duration{1 * time.Second, 5 * time.Second, 10 * time.Second, 20 * time.Second}
cutoverIntervals = []time.Duration{0, 1 * time.Minute, 5 * time.Minute, 10 * time.Minute, 30 * time.Minute}
)
+const (
+ defaultCutOverThreshold = 10 * time.Second
+ minCutOverThreshold = 5 * time.Second
+ maxCutOverThreshold = 30 * time.Second
+)
+
func init() {
servenv.OnParseFor("vtcombo", registerOnlineDDLFlags)
servenv.OnParseFor("vttablet", registerOnlineDDLFlags)
@@ -199,13 +204,19 @@ func newGCTableRetainTime() time.Time {
return time.Now().UTC().Add(retainOnlineDDLTables)
}
-// getMigrationCutOverThreshold returns the cut-over threshold for the given migration. The migration's
-// DDL Strategy may explicitly set the threshold; otherwise, we return the default cut-over threshold.
-func getMigrationCutOverThreshold(onlineDDL *schema.OnlineDDL) time.Duration {
- if threshold, _ := onlineDDL.StrategySetting().CutOverThreshold(); threshold != 0 {
- return threshold
+// safeMigrationCutOverThreshold receives a desired threshold, and returns a cut-over threshold that
+// is reasonable to use
+func safeMigrationCutOverThreshold(threshold time.Duration) (time.Duration, error) {
+ switch {
+ case threshold == 0:
+ return defaultCutOverThreshold, nil
+ case threshold < minCutOverThreshold:
+ return defaultCutOverThreshold, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "cut-over min value is %v", minCutOverThreshold)
+ case threshold > maxCutOverThreshold:
+ return defaultCutOverThreshold, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "cut-over max value is %v", maxCutOverThreshold)
+ default:
+ return threshold, nil
}
- return defaultCutOverThreshold
}
// NewExecutor creates a new gh-ost executor.
@@ -890,8 +901,6 @@ func (e *Executor) cutOverVReplMigration(ctx context.Context, s *VReplStream, sh
var sentryTableName string
- migrationCutOverThreshold := getMigrationCutOverThreshold(onlineDDL)
-
waitForPos := func(s *VReplStream, pos replication.Position, timeout time.Duration) error {
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
@@ -951,8 +960,8 @@ func (e *Executor) cutOverVReplMigration(ctx context.Context, s *VReplStream, sh
}
defer preparationsConn.Recycle()
// Set large enough `@@lock_wait_timeout` so that it does not interfere with the cut-over operation.
- // The code will ensure everything that needs to be terminated by `migrationCutOverThreshold` will be terminated.
- preparationConnRestoreLockWaitTimeout, err := e.initConnectionLockWaitTimeout(ctx, preparationsConn.Conn, 3*migrationCutOverThreshold)
+ // The code will ensure everything that needs to be terminated by `onlineDDL.CutOverThreshold` will be terminated.
+ preparationConnRestoreLockWaitTimeout, err := e.initConnectionLockWaitTimeout(ctx, preparationsConn.Conn, 3*onlineDDL.CutOverThreshold)
if err != nil {
return vterrors.Wrap(err, "failed setting lock_wait_timeout on locking connection")
}
@@ -989,7 +998,7 @@ func (e *Executor) cutOverVReplMigration(ctx context.Context, s *VReplStream, sh
// impacts query serving so we wait for a multiple of the cutover threshold here, with
// that variable primarily serving to limit the max time we later spend waiting for
// a position again AFTER we've taken the locks and table access is blocked.
- if err := waitForPos(s, postSentryPos, migrationCutOverThreshold*3); err != nil {
+ if err := waitForPos(s, postSentryPos, onlineDDL.CutOverThreshold*3); err != nil {
return vterrors.Wrapf(err, "failed waiting for pos after sentry creation")
}
e.updateMigrationStage(ctx, onlineDDL.UUID, "post-sentry pos reached")
@@ -1001,8 +1010,8 @@ func (e *Executor) cutOverVReplMigration(ctx context.Context, s *VReplStream, sh
}
defer lockConn.Recycle()
// Set large enough `@@lock_wait_timeout` so that it does not interfere with the cut-over operation.
- // The code will ensure everything that needs to be terminated by `migrationCutOverThreshold` will be terminated.
- lockConnRestoreLockWaitTimeout, err := e.initConnectionLockWaitTimeout(ctx, lockConn.Conn, 5*migrationCutOverThreshold)
+ // The code will ensure everything that needs to be terminated by `onlineDDL.CutOverThreshold` will be terminated.
+ lockConnRestoreLockWaitTimeout, err := e.initConnectionLockWaitTimeout(ctx, lockConn.Conn, 5*onlineDDL.CutOverThreshold)
if err != nil {
return vterrors.Wrapf(err, "failed setting lock_wait_timeout on locking connection")
}
@@ -1016,8 +1025,8 @@ func (e *Executor) cutOverVReplMigration(ctx context.Context, s *VReplStream, sh
return vterrors.Wrapf(err, "failed getting rename connection")
}
// Set large enough `@@lock_wait_timeout` so that it does not interfere with the cut-over operation.
- // The code will ensure everything that needs to be terminated by `migrationCutOverThreshold` will be terminated.
- renameConnRestoreLockWaitTimeout, err := e.initConnectionLockWaitTimeout(ctx, renameConn.Conn, 5*migrationCutOverThreshold*4)
+ // The code will ensure everything that needs to be terminated by `onlineDDL.CutOverThreshold` will be terminated.
+ renameConnRestoreLockWaitTimeout, err := e.initConnectionLockWaitTimeout(ctx, renameConn.Conn, 5*onlineDDL.CutOverThreshold*4)
if err != nil {
return vterrors.Wrapf(err, "failed setting lock_wait_timeout on rename connection")
}
@@ -1052,7 +1061,7 @@ func (e *Executor) cutOverVReplMigration(ctx context.Context, s *VReplStream, sh
// This function waits until it finds the RENAME TABLE... query running in MySQL's PROCESSLIST, or until timeout
// The function assumes that one of the renamed tables is locked, thus causing the RENAME to block. If nothing
// is locked, then the RENAME will be near-instantaneous and it's unlikely that the function will find it.
- renameWaitCtx, cancel := context.WithTimeout(ctx, migrationCutOverThreshold)
+ renameWaitCtx, cancel := context.WithTimeout(ctx, onlineDDL.CutOverThreshold)
defer cancel()
for {
@@ -1081,7 +1090,7 @@ func (e *Executor) cutOverVReplMigration(ctx context.Context, s *VReplStream, sh
// Preparation is complete. We proceed to cut-over.
toggleBuffering := func(bufferQueries bool) error {
log.Infof("toggling buffering: %t in migration %v", bufferQueries, onlineDDL.UUID)
- timeout := migrationCutOverThreshold + qrBufferExtraTimeout
+ timeout := onlineDDL.CutOverThreshold + qrBufferExtraTimeout
e.toggleBufferTableFunc(bufferingCtx, onlineDDL.Table, timeout, bufferQueries)
if !bufferQueries {
@@ -1147,7 +1156,7 @@ func (e *Executor) cutOverVReplMigration(ctx context.Context, s *VReplStream, sh
// real production
e.updateMigrationStage(ctx, onlineDDL.UUID, "locking tables")
- lockCtx, cancel := context.WithTimeout(ctx, migrationCutOverThreshold)
+ lockCtx, cancel := context.WithTimeout(ctx, onlineDDL.CutOverThreshold)
defer cancel()
lockTableQuery := sqlparser.BuildParsedQuery(sqlLockTwoTablesWrite, sentryTableName, onlineDDL.Table)
if _, err := lockConn.Conn.Exec(lockCtx, lockTableQuery.Query, 1, false); err != nil {
@@ -1187,7 +1196,7 @@ func (e *Executor) cutOverVReplMigration(ctx context.Context, s *VReplStream, sh
}
e.updateMigrationStage(ctx, onlineDDL.UUID, "waiting for post-lock pos: %v", replication.EncodePosition(postWritesPos))
- if err := waitForPos(s, postWritesPos, migrationCutOverThreshold); err != nil {
+ if err := waitForPos(s, postWritesPos, onlineDDL.CutOverThreshold); err != nil {
e.updateMigrationStage(ctx, onlineDDL.UUID, "timeout while waiting for post-lock pos: %v", err)
return vterrors.Wrapf(err, "failed waiting for pos after locking")
}
@@ -1220,14 +1229,14 @@ func (e *Executor) cutOverVReplMigration(ctx context.Context, s *VReplStream, sh
{
dropTableQuery := sqlparser.BuildParsedQuery(sqlDropTable, sentryTableName)
- lockCtx, cancel := context.WithTimeout(ctx, migrationCutOverThreshold)
+ lockCtx, cancel := context.WithTimeout(ctx, onlineDDL.CutOverThreshold)
defer cancel()
if _, err := lockConn.Conn.Exec(lockCtx, dropTableQuery.Query, 1, false); err != nil {
return vterrors.Wrapf(err, "failed dropping sentry table")
}
}
{
- lockCtx, cancel := context.WithTimeout(ctx, migrationCutOverThreshold)
+ lockCtx, cancel := context.WithTimeout(ctx, onlineDDL.CutOverThreshold)
defer cancel()
e.updateMigrationStage(ctx, onlineDDL.UUID, "unlocking tables")
if _, err := lockConn.Conn.Exec(lockCtx, sqlUnlockTables, 1, false); err != nil {
@@ -1235,7 +1244,7 @@ func (e *Executor) cutOverVReplMigration(ctx context.Context, s *VReplStream, sh
}
}
{
- lockCtx, cancel := context.WithTimeout(ctx, migrationCutOverThreshold)
+ lockCtx, cancel := context.WithTimeout(ctx, onlineDDL.CutOverThreshold)
defer cancel()
e.updateMigrationStage(lockCtx, onlineDDL.UUID, "waiting for RENAME to complete")
if err := <-renameCompleteChan; err != nil {
@@ -2034,7 +2043,9 @@ func (e *Executor) readMigration(ctx context.Context, uuid string) (onlineDDL *s
WasReadyToComplete: row.AsInt64("was_ready_to_complete", 0),
TabletAlias: row["tablet"].ToString(),
MigrationContext: row["migration_context"].ToString(),
+ CutOverThreshold: time.Second * time.Duration(row.AsInt64("cutover_threshold_seconds", 0)),
}
+ onlineDDL.CutOverThreshold, _ = safeMigrationCutOverThreshold(onlineDDL.CutOverThreshold)
return onlineDDL, row, nil
}
@@ -3572,55 +3583,36 @@ func (e *Executor) isPreserveForeignKeySupported(ctx context.Context) (isSupport
// and is up to date with the binlogs.
func (e *Executor) isVReplMigrationReadyToCutOver(ctx context.Context, onlineDDL *schema.OnlineDDL, s *VReplStream) (isReady bool, err error) {
// Check all the cases where migration is still running:
- {
- // when ready to cut-over, pos must have some value
- if s.pos == "" {
- return false, nil
- }
+ // when ready to cut-over, pos must have some value
+ if s.pos == "" {
+ return false, nil
}
- {
- // Both time_updated and transaction_timestamp must be in close proximity to each
- // other and to the time now, otherwise that means we're lagging and it's not a good time
- // to cut-over
- durationDiff := func(t1, t2 time.Time) time.Duration {
- return t1.Sub(t2).Abs()
- }
- migrationCutOverThreshold := getMigrationCutOverThreshold(onlineDDL)
-
- timeNow := time.Now()
- timeUpdated := time.Unix(s.timeUpdated, 0)
- if durationDiff(timeNow, timeUpdated) > migrationCutOverThreshold {
- return false, nil
- }
- // Let's look at transaction timestamp. This gets written by any ongoing
- // writes on the server (whether on this table or any other table)
- transactionTimestamp := time.Unix(s.transactionTimestamp, 0)
- if durationDiff(timeNow, transactionTimestamp) > migrationCutOverThreshold {
- return false, nil
- }
+ // Both time_updated and transaction_timestamp must be in close proximity to each
+ // other and to the time now, otherwise that means we're lagging and it's not a good time
+ // to cut-over
+ if s.Lag() > onlineDDL.CutOverThreshold {
+ return false, nil
}
- {
- // copy_state must have no entries for this vreplication id: if entries are
- // present that means copy is still in progress
- query, err := sqlparser.ParseAndBind(sqlReadCountCopyState,
- sqltypes.Int32BindVariable(s.id),
- )
- if err != nil {
- return false, err
- }
- r, err := e.execQuery(ctx, query)
- if err != nil {
- return false, err
- }
- csRow := r.Named().Row()
- if csRow == nil {
- return false, err
- }
- count := csRow.AsInt64("cnt", 0)
- if count > 0 {
- // Still copying
- return false, nil
- }
+ // copy_state must have no entries for this vreplication id: if entries are
+ // present that means copy is still in progress
+ query, err := sqlparser.ParseAndBind(sqlReadCountCopyState,
+ sqltypes.Int32BindVariable(s.id),
+ )
+ if err != nil {
+ return false, err
+ }
+ r, err := e.execQuery(ctx, query)
+ if err != nil {
+ return false, err
+ }
+ csRow := r.Named().Row()
+ if csRow == nil {
+ return false, err
+ }
+ count := csRow.AsInt64("cnt", 0)
+ if count > 0 {
+ // Still copying
+ return false, nil
}
return true, nil
@@ -3767,6 +3759,7 @@ func (e *Executor) reviewRunningMigrations(ctx context.Context) (countRunnning i
}
_ = e.updateRowsCopied(ctx, uuid, s.rowsCopied)
_ = e.updateMigrationProgressByRowsCopied(ctx, uuid, s.rowsCopied)
+ _ = e.updateMigrationVreplicationLagSeconds(ctx, uuid, int64(s.Lag().Seconds()))
_ = e.updateMigrationETASecondsByProgress(ctx, uuid)
if s.timeThrottled != 0 {
// Avoid creating a 0000-00-00 00:00:00 timestamp
@@ -4525,6 +4518,18 @@ func (e *Executor) updateRowsCopied(ctx context.Context, uuid string, rowsCopied
return err
}
+func (e *Executor) updateMigrationVreplicationLagSeconds(ctx context.Context, uuid string, vreplicationLagSeconds int64) error {
+ query, err := sqlparser.ParseAndBind(sqlUpdateMigrationVreplicationLagSeconds,
+ sqltypes.Int64BindVariable(vreplicationLagSeconds),
+ sqltypes.StringBindVariable(uuid),
+ )
+ if err != nil {
+ return err
+ }
+ _, err = e.execQuery(ctx, query)
+ return err
+}
+
func (e *Executor) updateVitessLivenessIndicator(ctx context.Context, uuid string, livenessIndicator int64) error {
query, err := sqlparser.ParseAndBind(sqlUpdateMigrationVitessLivenessIndicator,
sqltypes.Int64BindVariable(livenessIndicator),
@@ -4747,6 +4752,42 @@ func (e *Executor) ForceCutOverPendingMigrations(ctx context.Context) (result *s
return result, nil
}
+func (e *Executor) SetMigrationCutOverThreshold(ctx context.Context, uuid string, thresholdString string) (result *sqltypes.Result, err error) {
+ if atomic.LoadInt64(&e.isOpen) == 0 {
+ return nil, vterrors.New(vtrpcpb.Code_FAILED_PRECONDITION, schema.ErrOnlineDDLDisabled.Error())
+ }
+ if !schema.IsOnlineDDLUUID(uuid) {
+ return nil, vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "Not a valid migration ID in FORCE_CUTOVER: %s", uuid)
+ }
+ threshold, err := time.ParseDuration(thresholdString)
+ if err != nil {
+ return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid cut-over threshold value: %s. Try '5s' to '30s'", thresholdString)
+ }
+
+ log.Infof("SetMigrationCutOverThreshold: request to set cut-over threshold to %v on migration %s", threshold, uuid)
+ e.migrationMutex.Lock()
+ defer e.migrationMutex.Unlock()
+
+ threshold, err = safeMigrationCutOverThreshold(threshold)
+ if err != nil {
+ return nil, err
+ }
+ query, err := sqlparser.ParseAndBind(sqlUpdateCutOverThresholdSeconds,
+ sqltypes.Int64BindVariable(int64(threshold.Seconds())),
+ sqltypes.StringBindVariable(uuid),
+ )
+ if err != nil {
+ return nil, err
+ }
+ rs, err := e.execQuery(ctx, query)
+ if err != nil {
+ return nil, err
+ }
+ e.triggerNextCheckInterval()
+ log.Infof("SetMigrationCutOverThreshold: migration %s cut-over threshold was set to", uuid, threshold)
+ return rs, nil
+}
+
// CompleteMigration clears the postpone_completion flag for a given migration, assuming it was set in the first place
func (e *Executor) CompleteMigration(ctx context.Context, uuid string) (result *sqltypes.Result, err error) {
if atomic.LoadInt64(&e.isOpen) == 0 {
@@ -5032,7 +5073,14 @@ func (e *Executor) SubmitMigration(
// Explicit retention indicated by `--retain-artifact` DDL strategy flag for this migration. Override!
retainArtifactsSeconds = int64((retainArtifacts).Seconds())
}
-
+ cutoverThreshold, err := onlineDDL.StrategySetting().CutOverThreshold()
+ if err != nil {
+ return nil, vterrors.Wrapf(err, "parsing cut-over threshold in migration %v", onlineDDL.UUID)
+ }
+ cutoverThreshold, err = safeMigrationCutOverThreshold(cutoverThreshold)
+ if err != nil {
+ return nil, vterrors.Wrapf(err, "validating cut-over threshold in migration %v", onlineDDL.UUID)
+ }
_, allowConcurrentMigration := e.allowConcurrentMigration(onlineDDL)
submitQuery, err := sqlparser.ParseAndBind(sqlInsertMigration,
sqltypes.StringBindVariable(onlineDDL.UUID),
@@ -5048,6 +5096,7 @@ func (e *Executor) SubmitMigration(
sqltypes.StringBindVariable(string(schema.OnlineDDLStatusQueued)),
sqltypes.StringBindVariable(e.TabletAliasString()),
sqltypes.Int64BindVariable(retainArtifactsSeconds),
+ sqltypes.Int64BindVariable(int64(cutoverThreshold.Seconds())),
sqltypes.BoolBindVariable(onlineDDL.StrategySetting().IsPostponeLaunch()),
sqltypes.BoolBindVariable(onlineDDL.StrategySetting().IsPostponeCompletion()),
sqltypes.BoolBindVariable(allowConcurrentMigration),
diff --git a/go/vt/vttablet/onlineddl/executor_test.go b/go/vt/vttablet/onlineddl/executor_test.go
index 2533f3a4b48..105da7fc1e3 100644
--- a/go/vt/vttablet/onlineddl/executor_test.go
+++ b/go/vt/vttablet/onlineddl/executor_test.go
@@ -25,6 +25,7 @@ import (
"time"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
)
func TestShouldCutOverAccordingToBackoff(t *testing.T) {
@@ -164,3 +165,59 @@ func TestShouldCutOverAccordingToBackoff(t *testing.T) {
})
}
}
+
+func TestSafeMigrationCutOverThreshold(t *testing.T) {
+ require.NotZero(t, defaultCutOverThreshold)
+ require.GreaterOrEqual(t, defaultCutOverThreshold, minCutOverThreshold)
+ require.LessOrEqual(t, defaultCutOverThreshold, maxCutOverThreshold)
+
+ tcases := []struct {
+ threshold time.Duration
+ expect time.Duration
+ isErr bool
+ }{
+ {
+ threshold: 0,
+ expect: defaultCutOverThreshold,
+ },
+ {
+ threshold: 2 * time.Second,
+ expect: defaultCutOverThreshold,
+ isErr: true,
+ },
+ {
+ threshold: 75 * time.Second,
+ expect: defaultCutOverThreshold,
+ isErr: true,
+ },
+ {
+ threshold: defaultCutOverThreshold,
+ expect: defaultCutOverThreshold,
+ },
+ {
+ threshold: 5 * time.Second,
+ expect: 5 * time.Second,
+ },
+ {
+ threshold: 15 * time.Second,
+ expect: 15 * time.Second,
+ },
+ {
+ threshold: 25 * time.Second,
+ expect: 25 * time.Second,
+ },
+ }
+ for _, tcase := range tcases {
+ t.Run(tcase.threshold.String(), func(t *testing.T) {
+ threshold, err := safeMigrationCutOverThreshold(tcase.threshold)
+ if tcase.isErr {
+ assert.Error(t, err)
+ require.Equal(t, tcase.expect, defaultCutOverThreshold)
+ // And keep testing, because we then also expect the threshold to be the default
+ } else {
+ assert.NoError(t, err)
+ }
+ assert.Equal(t, tcase.expect, threshold)
+ })
+ }
+}
diff --git a/go/vt/vttablet/onlineddl/schema.go b/go/vt/vttablet/onlineddl/schema.go
index 1b120dfa58c..943a3b1df07 100644
--- a/go/vt/vttablet/onlineddl/schema.go
+++ b/go/vt/vttablet/onlineddl/schema.go
@@ -32,13 +32,14 @@ const (
migration_status,
tablet,
retain_artifacts_seconds,
+ cutover_threshold_seconds,
postpone_launch,
postpone_completion,
allow_concurrent,
reverted_uuid,
is_view
) VALUES (
- %a, %a, %a, %a, %a, %a, %a, %a, %a, NOW(6), %a, %a, %a, %a, %a, %a, %a, %a, %a
+ %a, %a, %a, %a, %a, %a, %a, %a, %a, NOW(6), %a, %a, %a, %a, %a, %a, %a, %a, %a, %a
)`
sqlSelectQueuedMigrations = `SELECT
@@ -86,6 +87,11 @@ const (
WHERE
migration_uuid=%a
`
+ sqlUpdateMigrationVreplicationLagSeconds = `UPDATE _vt.schema_migrations
+ SET vreplication_lag_seconds=%a
+ WHERE
+ migration_uuid=%a
+ `
sqlUpdateMigrationIsView = `UPDATE _vt.schema_migrations
SET is_view=%a
WHERE
@@ -181,6 +187,11 @@ const (
WHERE
migration_uuid=%a
`
+ sqlUpdateCutOverThresholdSeconds = `UPDATE _vt.schema_migrations
+ SET cutover_threshold_seconds=%a
+ WHERE
+ migration_uuid=%a
+ `
sqlUpdateLaunchMigration = `UPDATE _vt.schema_migrations
SET postpone_launch=0
WHERE
@@ -429,6 +440,7 @@ const (
removed_unique_keys,
migration_context,
retain_artifacts_seconds,
+ cutover_threshold_seconds,
is_view,
ready_to_complete,
ready_to_complete_timestamp is not null as was_ready_to_complete,
diff --git a/go/vt/vttablet/onlineddl/vrepl.go b/go/vt/vttablet/onlineddl/vrepl.go
index 26eb614e95a..2761c27c801 100644
--- a/go/vt/vttablet/onlineddl/vrepl.go
+++ b/go/vt/vttablet/onlineddl/vrepl.go
@@ -30,6 +30,7 @@ import (
"net/url"
"strconv"
"strings"
+ "time"
"vitess.io/vitess/go/mysql/collations"
"vitess.io/vitess/go/mysql/collations/charset"
@@ -96,6 +97,19 @@ func (v *VReplStream) hasError() (isTerminal bool, vreplError error) {
return false, nil
}
+// Lag returns the vreplication lag, as determined by the higher of the transaction timestamp and the time updated.
+func (s *VReplStream) Lag() time.Duration {
+ durationDiff := func(t1, t2 time.Time) time.Duration {
+ return t1.Sub(t2).Abs()
+ }
+ timeNow := time.Now()
+ timeUpdated := time.Unix(s.timeUpdated, 0)
+ // Let's look at transaction timestamp. This gets written by any ongoing
+ // writes on the server (whether on this table or any other table)
+ transactionTimestamp := time.Unix(s.transactionTimestamp, 0)
+ return max(durationDiff(timeNow, timeUpdated), durationDiff(timeNow, transactionTimestamp))
+}
+
// VRepl is an online DDL helper for VReplication based migrations (ddl_strategy="online")
type VRepl struct {
workflow string
diff --git a/go/vt/vttablet/tabletmanager/rpc_agent.go b/go/vt/vttablet/tabletmanager/rpc_agent.go
index 203e1d006ab..445d74cb930 100644
--- a/go/vt/vttablet/tabletmanager/rpc_agent.go
+++ b/go/vt/vttablet/tabletmanager/rpc_agent.go
@@ -166,6 +166,8 @@ type RPCTM interface {
RestoreFromBackup(ctx context.Context, logger logutil.Logger, request *tabletmanagerdatapb.RestoreFromBackupRequest) error
+ IsBackupRunning() bool
+
// HandleRPCPanic is to be called in a defer statement in each
// RPC input point.
HandleRPCPanic(ctx context.Context, name string, args, reply any, verbose bool, err *error)
diff --git a/go/vt/vttablet/tabletmanager/rpc_backup.go b/go/vt/vttablet/tabletmanager/rpc_backup.go
index a66264d98af..22fe72716dd 100644
--- a/go/vt/vttablet/tabletmanager/rpc_backup.go
+++ b/go/vt/vttablet/tabletmanager/rpc_backup.go
@@ -205,6 +205,10 @@ func (tm *TabletManager) RestoreFromBackup(ctx context.Context, logger logutil.L
return err
}
+func (tm *TabletManager) IsBackupRunning() bool {
+ return tm._isBackupRunning
+}
+
func (tm *TabletManager) beginBackup(backupMode string) error {
tm.mutex.Lock()
defer tm.mutex.Unlock()
diff --git a/go/vt/vttablet/tabletmanager/rpc_replication.go b/go/vt/vttablet/tabletmanager/rpc_replication.go
index 90e4d835a79..f13efa66124 100644
--- a/go/vt/vttablet/tabletmanager/rpc_replication.go
+++ b/go/vt/vttablet/tabletmanager/rpc_replication.go
@@ -46,7 +46,11 @@ func (tm *TabletManager) ReplicationStatus(ctx context.Context) (*replicationdat
if err != nil {
return nil, err
}
- return replication.ReplicationStatusToProto(status), nil
+
+ protoStatus := replication.ReplicationStatusToProto(status)
+ protoStatus.BackupRunning = tm.IsBackupRunning()
+
+ return protoStatus, nil
}
// FullStatus returns the full status of MySQL including the replication information, semi-sync information, GTID information among others
@@ -893,6 +897,7 @@ func (tm *TabletManager) StopReplicationAndGetStatus(ctx context.Context, stopRe
return StopReplicationAndGetStatusResponse{}, vterrors.Wrap(err, "before status failed")
}
before := replication.ReplicationStatusToProto(rs)
+ before.BackupRunning = tm.IsBackupRunning()
if stopReplicationMode == replicationdatapb.StopReplicationMode_IOTHREADONLY {
if !rs.IOHealthy() {
@@ -939,6 +944,7 @@ func (tm *TabletManager) StopReplicationAndGetStatus(ctx context.Context, stopRe
}, vterrors.Wrap(err, "acquiring replication status failed")
}
after := replication.ReplicationStatusToProto(rsAfter)
+ after.BackupRunning = tm.IsBackupRunning()
rs.Position = rsAfter.Position
rs.RelayLogPosition = rsAfter.RelayLogPosition
diff --git a/go/vt/vttablet/tabletmanager/rpc_vreplication_test.go b/go/vt/vttablet/tabletmanager/rpc_vreplication_test.go
index 0a5bd9f26fd..3f8bc85ac7f 100644
--- a/go/vt/vttablet/tabletmanager/rpc_vreplication_test.go
+++ b/go/vt/vttablet/tabletmanager/rpc_vreplication_test.go
@@ -305,7 +305,6 @@ func TestCreateVReplicationWorkflow(t *testing.T) {
// results returned. Followed by ensuring that SwitchTraffic
// and ReverseTraffic also work as expected.
func TestMoveTablesUnsharded(t *testing.T) {
- t.Skip("Skipping test temporarily as it is flaky on CI, pending investigation")
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
sourceKs := "sourceks"
@@ -403,6 +402,9 @@ func TestMoveTablesUnsharded(t *testing.T) {
ftc.vrdbClient.AddInvariant(getCopyStateQuery, &sqltypes.Result{})
tenv.tmc.setVReplicationExecResults(ftc.tablet, getCopyState, &sqltypes.Result{})
ftc.vrdbClient.ExpectRequest(fmt.Sprintf(readAllWorkflows, tenv.dbName, ""), &sqltypes.Result{}, nil)
+ for _, table := range defaultSchema.TableDefinitions {
+ tenv.db.AddQuery(fmt.Sprintf(getNonEmptyTableQuery, table.Name), &sqltypes.Result{})
+ }
insert := fmt.Sprintf(`%s values ('%s', 'keyspace:"%s" shard:"%s" filter:{rules:{match:"t1" filter:"select * from t1"}}', '', 0, 0, '%s', 'primary,replica,rdonly', now(), 0, 'Stopped', '%s', %d, 0, 0, '{}')`,
insertVReplicationPrefix, wf, sourceKs, sourceShard, tenv.cells[0], tenv.dbName, vreplID)
ftc.vrdbClient.ExpectRequest(insert, &sqltypes.Result{InsertID: 1}, nil)
@@ -1780,7 +1782,7 @@ func addInvariants(dbClient *binlogplayer.MockDBClient, vreplID, sourceTabletUID
"0",
))
dbClient.AddInvariant(fmt.Sprintf(updatePickedSourceTablet, cell, sourceTabletUID, vreplID), &sqltypes.Result{})
-
+ dbClient.AddInvariant("update _vt.vreplication set state='Running', message='' where id=1", &sqltypes.Result{})
}
func addMaterializeSettingsTablesToSchema(ms *vtctldatapb.MaterializeSettings, tenv *testEnv, venv *vtenv.Environment) {
diff --git a/go/vt/vttablet/tabletmanager/vdiff/framework_test.go b/go/vt/vttablet/tabletmanager/vdiff/framework_test.go
index 33a0da8e23f..7d4cdb78c20 100644
--- a/go/vt/vttablet/tabletmanager/vdiff/framework_test.go
+++ b/go/vt/vttablet/tabletmanager/vdiff/framework_test.go
@@ -396,6 +396,10 @@ func (dbc *realDBClient) Close() {
dbc.conn.Close()
}
+func (dbc *realDBClient) IsClosed() bool {
+ return dbc.conn.IsClosed()
+}
+
func (dbc *realDBClient) ExecuteFetch(query string, maxrows int) (*sqltypes.Result, error) {
// Use Clone() because the contents of memory region referenced by
// string can change when clients (e.g. vcopier) use unsafe string methods.
diff --git a/go/vt/vttablet/tabletmanager/vreplication/external_connector_test.go b/go/vt/vttablet/tabletmanager/vreplication/external_connector_test.go
index e00a5578171..c671d2a086d 100644
--- a/go/vt/vttablet/tabletmanager/vreplication/external_connector_test.go
+++ b/go/vt/vttablet/tabletmanager/vreplication/external_connector_test.go
@@ -163,8 +163,7 @@ func TestExternalConnectorPlay(t *testing.T) {
expectDBClientAndVreplicationQueries(t, []string{
"begin",
- "insert into tab1(id,val) values (1,_binary'a')",
- "insert into tab1(id,val) values (2,_binary'b')",
+ "insert into tab1(id,val) values (1,_binary'a'), (2,_binary'b')",
"/update _vt.vreplication set pos=",
"commit",
}, pos)
diff --git a/go/vt/vttablet/tabletmanager/vreplication/framework_test.go b/go/vt/vttablet/tabletmanager/vreplication/framework_test.go
index 12d20e3a867..12a05a69dbc 100644
--- a/go/vt/vttablet/tabletmanager/vreplication/framework_test.go
+++ b/go/vt/vttablet/tabletmanager/vreplication/framework_test.go
@@ -141,7 +141,6 @@ func setup(ctx context.Context) (func(), int) {
resetBinlogClient()
vttablet.InitVReplicationConfigDefaults()
- vttablet.DefaultVReplicationConfig.ExperimentalFlags = 0
// Engines cannot be initialized in testenv because it introduces circular dependencies.
streamerEngine = vstreamer.NewEngine(env.TabletEnv, env.SrvTopo, env.SchemaEngine, nil, env.Cells[0])
@@ -479,6 +478,10 @@ func (dbc *realDBClient) Close() {
dbc.conn.Close()
}
+func (dbc *realDBClient) IsClosed() bool {
+ return dbc.conn.IsClosed()
+}
+
func (dbc *realDBClient) ExecuteFetch(query string, maxrows int) (*sqltypes.Result, error) {
// Use Clone() because the contents of memory region referenced by
// string can change when clients (e.g. vcopier) use unsafe string methods.
diff --git a/go/vt/vttablet/tabletmanager/vreplication/replicator_plan.go b/go/vt/vttablet/tabletmanager/vreplication/replicator_plan.go
index 6a416cb4414..62d6166b5ca 100644
--- a/go/vt/vttablet/tabletmanager/vreplication/replicator_plan.go
+++ b/go/vt/vttablet/tabletmanager/vreplication/replicator_plan.go
@@ -618,49 +618,40 @@ func valsEqual(v1, v2 sqltypes.Value) bool {
func (tp *TablePlan) appendFromRow(buf *bytes2.Buffer, row *querypb.Row) error {
bindLocations := tp.BulkInsertValues.BindLocations()
if len(tp.Fields) < len(bindLocations) {
- return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "wrong number of fields: got %d fields for %d bind locations ",
+ return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "wrong number of fields: got %d fields for %d bind locations",
len(tp.Fields), len(bindLocations))
}
- type colInfo struct {
- typ querypb.Type
- length int64
- offset int64
- field *querypb.Field
- }
- rowInfo := make([]*colInfo, 0)
-
- offset := int64(0)
- for i, field := range tp.Fields { // collect info required for fields to be bound
- length := row.Lengths[i]
- if !tp.FieldsToSkip[strings.ToLower(field.Name)] {
- rowInfo = append(rowInfo, &colInfo{
- typ: field.Type,
- length: length,
- offset: offset,
- field: field,
- })
- }
- if length > 0 {
- offset += row.Lengths[i]
+ // Bind field values to locations.
+ var (
+ offset int64
+ offsetQuery int
+ fieldsIndex int
+ field *querypb.Field
+ )
+ for i, loc := range bindLocations {
+ field = tp.Fields[fieldsIndex]
+ length := row.Lengths[fieldsIndex]
+ for tp.FieldsToSkip[strings.ToLower(field.Name)] {
+ if length > 0 {
+ offset += length
+ }
+ fieldsIndex++
+ field = tp.Fields[fieldsIndex]
+ length = row.Lengths[fieldsIndex]
}
- }
- // bind field values to locations
- var offsetQuery int
- for i, loc := range bindLocations {
- col := rowInfo[i]
buf.WriteString(tp.BulkInsertValues.Query[offsetQuery:loc.Offset])
- typ := col.typ
+ typ := field.Type
switch typ {
case querypb.Type_TUPLE:
return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected Type_TUPLE for value %d", i)
case querypb.Type_JSON:
- if col.length < 0 { // An SQL NULL and not an actual JSON value
+ if length < 0 { // An SQL NULL and not an actual JSON value
buf.WriteString(sqltypes.NullStr)
} else { // A JSON value (which may be a JSON null literal value)
- buf2 := row.Values[col.offset : col.offset+col.length]
+ buf2 := row.Values[offset : offset+length]
vv, err := vjson.MarshalSQLValue(buf2)
if err != nil {
return err
@@ -668,16 +659,16 @@ func (tp *TablePlan) appendFromRow(buf *bytes2.Buffer, row *querypb.Row) error {
buf.WriteString(vv.RawStr())
}
default:
- if col.length < 0 {
+ if length < 0 {
// -1 means a null variable; serialize it directly
buf.WriteString(sqltypes.NullStr)
} else {
- raw := row.Values[col.offset : col.offset+col.length]
+ raw := row.Values[offset : offset+length]
var vv sqltypes.Value
- if conversion, ok := tp.ConvertCharset[col.field.Name]; ok && col.length > 0 {
+ if conversion, ok := tp.ConvertCharset[field.Name]; ok && length > 0 {
// Non-null string value, for which we have a charset conversion instruction
- out, err := tp.convertStringCharset(raw, conversion, col.field.Name)
+ out, err := tp.convertStringCharset(raw, conversion, field.Name)
if err != nil {
return err
}
@@ -690,6 +681,10 @@ func (tp *TablePlan) appendFromRow(buf *bytes2.Buffer, row *querypb.Row) error {
}
}
offsetQuery = loc.Offset + loc.Length
+ if length > 0 {
+ offset += length
+ }
+ fieldsIndex++
}
buf.WriteString(tp.BulkInsertValues.Query[offsetQuery:])
return nil
diff --git a/go/vt/vttablet/tabletmanager/vreplication/replicator_plan_test.go b/go/vt/vttablet/tabletmanager/vreplication/replicator_plan_test.go
index 644b4585914..09ace916f11 100644
--- a/go/vt/vttablet/tabletmanager/vreplication/replicator_plan_test.go
+++ b/go/vt/vttablet/tabletmanager/vreplication/replicator_plan_test.go
@@ -21,17 +21,18 @@ import (
"strings"
"testing"
- vttablet "vitess.io/vitess/go/vt/vttablet/common"
-
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ "vitess.io/vitess/go/bytes2"
"vitess.io/vitess/go/mysql/collations"
"vitess.io/vitess/go/sqltypes"
"vitess.io/vitess/go/vt/binlog/binlogplayer"
"vitess.io/vitess/go/vt/sqlparser"
binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata"
+ querypb "vitess.io/vitess/go/vt/proto/query"
+ vttablet "vitess.io/vitess/go/vt/vttablet/common"
)
type TestReplicatorPlan struct {
@@ -829,3 +830,137 @@ func TestBuildPlayerPlanExclude(t *testing.T) {
wantPlan, _ := json.Marshal(want)
assert.Equal(t, string(gotPlan), string(wantPlan))
}
+
+func TestAppendFromRow(t *testing.T) {
+ testCases := []struct {
+ name string
+ tp *TablePlan
+ row *querypb.Row
+ want string
+ wantErr string
+ }{
+ {
+ name: "simple",
+ tp: &TablePlan{
+ BulkInsertValues: sqlparser.BuildParsedQuery("values (%a, %a, %a)",
+ ":c1", ":c2", ":c3",
+ ),
+ Fields: []*querypb.Field{
+ {Name: "c1", Type: querypb.Type_INT32},
+ {Name: "c2", Type: querypb.Type_INT32},
+ {Name: "c3", Type: querypb.Type_INT32},
+ },
+ },
+ row: sqltypes.RowToProto3(
+ []sqltypes.Value{
+ sqltypes.NewInt64(1),
+ sqltypes.NewInt64(2),
+ sqltypes.NewInt64(3),
+ },
+ ),
+ want: "values (1, 2, 3)",
+ },
+ {
+ name: "too few fields",
+ tp: &TablePlan{
+ BulkInsertValues: sqlparser.BuildParsedQuery("values (%a, %a, %a)",
+ ":c1", ":c2", ":c3",
+ ),
+ Fields: []*querypb.Field{
+ {Name: "c1", Type: querypb.Type_INT32},
+ {Name: "c2", Type: querypb.Type_INT32},
+ },
+ },
+ wantErr: "wrong number of fields: got 2 fields for 3 bind locations",
+ },
+ {
+ name: "skip half",
+ tp: &TablePlan{
+ BulkInsertValues: sqlparser.BuildParsedQuery("values (%a, %a, %a, %a)",
+ ":c1", ":c2", ":c4", ":c8",
+ ),
+ Fields: []*querypb.Field{
+ {Name: "c1", Type: querypb.Type_INT32},
+ {Name: "c2", Type: querypb.Type_INT32},
+ {Name: "c3", Type: querypb.Type_INT32},
+ {Name: "c4", Type: querypb.Type_INT32},
+ {Name: "c5", Type: querypb.Type_INT32},
+ {Name: "c6", Type: querypb.Type_INT32},
+ {Name: "c7", Type: querypb.Type_INT32},
+ {Name: "c8", Type: querypb.Type_INT32},
+ },
+ FieldsToSkip: map[string]bool{
+ "c3": true,
+ "c5": true,
+ "c6": true,
+ "c7": true,
+ },
+ },
+ row: sqltypes.RowToProto3(
+ []sqltypes.Value{
+ sqltypes.NewInt64(1),
+ sqltypes.NewInt64(2),
+ sqltypes.NewInt64(3),
+ sqltypes.NewInt64(4),
+ sqltypes.NewInt64(5),
+ sqltypes.NewInt64(6),
+ sqltypes.NewInt64(7),
+ sqltypes.NewInt64(8),
+ },
+ ),
+ want: "values (1, 2, 4, 8)",
+ },
+ {
+ name: "skip all but one",
+ tp: &TablePlan{
+ BulkInsertValues: sqlparser.BuildParsedQuery("values (%a)",
+ ":c4",
+ ),
+ Fields: []*querypb.Field{
+ {Name: "c1", Type: querypb.Type_INT32},
+ {Name: "c2", Type: querypb.Type_INT32},
+ {Name: "c3", Type: querypb.Type_INT32},
+ {Name: "c4", Type: querypb.Type_INT32},
+ {Name: "c5", Type: querypb.Type_INT32},
+ {Name: "c6", Type: querypb.Type_INT32},
+ {Name: "c7", Type: querypb.Type_INT32},
+ {Name: "c8", Type: querypb.Type_INT32},
+ },
+ FieldsToSkip: map[string]bool{
+ "c1": true,
+ "c2": true,
+ "c3": true,
+ "c5": true,
+ "c6": true,
+ "c7": true,
+ "c8": true,
+ },
+ },
+ row: sqltypes.RowToProto3(
+ []sqltypes.Value{
+ sqltypes.NewInt64(1),
+ sqltypes.NewInt64(2),
+ sqltypes.NewInt64(3),
+ sqltypes.NewInt64(4),
+ sqltypes.NewInt64(5),
+ sqltypes.NewInt64(6),
+ sqltypes.NewInt64(7),
+ sqltypes.NewInt64(8),
+ },
+ ),
+ want: "values (4)",
+ },
+ }
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ bb := &bytes2.Buffer{}
+ err := tc.tp.appendFromRow(bb, tc.row)
+ if tc.wantErr != "" {
+ require.EqualError(t, err, tc.wantErr)
+ } else {
+ require.NoError(t, err)
+ require.Equal(t, tc.want, bb.String())
+ }
+ })
+ }
+}
diff --git a/go/vt/vttablet/tabletmanager/vreplication/utils.go b/go/vt/vttablet/tabletmanager/vreplication/utils.go
index bb1c469cc93..67b52c56261 100644
--- a/go/vt/vttablet/tabletmanager/vreplication/utils.go
+++ b/go/vt/vttablet/tabletmanager/vreplication/utils.go
@@ -232,6 +232,24 @@ func isUnrecoverableError(err error) bool {
sqlerror.ERWrongValueCountOnRow:
log.Errorf("Got unrecoverable error: %v", sqlErr)
return true
+ case sqlerror.ERErrorDuringCommit:
+ switch sqlErr.HaErrorCode() {
+ case
+ 0, // Not really a HA error.
+ sqlerror.HaErrLockDeadlock,
+ sqlerror.HaErrLockTableFull,
+ sqlerror.HaErrLockWaitTimeout,
+ sqlerror.HaErrNotInLockPartitions,
+ sqlerror.HaErrQueryInterrupted,
+ sqlerror.HaErrRolledBack,
+ sqlerror.HaErrTooManyConcurrentTrxs,
+ sqlerror.HaErrUndoRecTooBig:
+ // These are recoverable errors.
+ return false
+ default:
+ log.Errorf("Got unrecoverable error: %v", sqlErr)
+ return true
+ }
}
return false
}
diff --git a/go/vt/vttablet/tabletmanager/vreplication/utils_test.go b/go/vt/vttablet/tabletmanager/vreplication/utils_test.go
index 69a57c34341..15093e299fc 100644
--- a/go/vt/vttablet/tabletmanager/vreplication/utils_test.go
+++ b/go/vt/vttablet/tabletmanager/vreplication/utils_test.go
@@ -152,6 +152,16 @@ func TestIsUnrecoverableError(t *testing.T) {
err: sqlerror.NewSQLError(sqlerror.ERDataOutOfRange, "data out of range", "test"),
expected: true,
},
+ {
+ name: "SQL error with HaErrDiskFullNowait error",
+ err: sqlerror.NewSQLError(sqlerror.ERErrorDuringCommit, "unknown", "ERROR HY000: Got error 204 - 'No more room in disk' during COMMIT"),
+ expected: true,
+ },
+ {
+ name: "SQL error with HaErrLockDeadlock error",
+ err: sqlerror.NewSQLError(sqlerror.ERErrorDuringCommit, "unknown", "ERROR HY000: Got error 149 - 'Lock deadlock; Retry transaction' during COMMIT"),
+ expected: false,
+ },
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
diff --git a/go/vt/vttablet/tabletmanager/vreplication/vcopier_test.go b/go/vt/vttablet/tabletmanager/vreplication/vcopier_test.go
index a95e0bf17c5..a7e4794ba76 100644
--- a/go/vt/vttablet/tabletmanager/vreplication/vcopier_test.go
+++ b/go/vt/vttablet/tabletmanager/vreplication/vcopier_test.go
@@ -684,6 +684,14 @@ func testPlayerCopyBigTable(t *testing.T) {
reset := vstreamer.AdjustPacketSize(1)
defer reset()
+ // The test is written to match the behavior w/o
+ // VReplicationExperimentalFlagOptimizeInserts enabled.
+ origExperimentalFlags := vttablet.DefaultVReplicationConfig.ExperimentalFlags
+ vttablet.DefaultVReplicationConfig.ExperimentalFlags = 0
+ defer func() {
+ vttablet.DefaultVReplicationConfig.ExperimentalFlags = origExperimentalFlags
+ }()
+
savedCopyPhaseDuration := vttablet.DefaultVReplicationConfig.CopyPhaseDuration
// copyPhaseDuration should be low enough to have time to send one row.
vttablet.DefaultVReplicationConfig.CopyPhaseDuration = 500 * time.Millisecond
@@ -814,6 +822,14 @@ func testPlayerCopyWildcardRule(t *testing.T) {
reset := vstreamer.AdjustPacketSize(1)
defer reset()
+ // The test is written to match the behavior w/o
+ // VReplicationExperimentalFlagOptimizeInserts enabled.
+ origExperimentalFlags := vttablet.DefaultVReplicationConfig.ExperimentalFlags
+ vttablet.DefaultVReplicationConfig.ExperimentalFlags = 0
+ defer func() {
+ vttablet.DefaultVReplicationConfig.ExperimentalFlags = origExperimentalFlags
+ }()
+
savedCopyPhaseDuration := vttablet.DefaultVReplicationConfig.CopyPhaseDuration
// copyPhaseDuration should be low enough to have time to send one row.
vttablet.DefaultVReplicationConfig.CopyPhaseDuration = 500 * time.Millisecond
diff --git a/go/vt/vttablet/tabletmanager/vreplication/vdbclient.go b/go/vt/vttablet/tabletmanager/vreplication/vdbclient.go
index b8339cdf874..63538be881d 100644
--- a/go/vt/vttablet/tabletmanager/vreplication/vdbclient.go
+++ b/go/vt/vttablet/tabletmanager/vreplication/vdbclient.go
@@ -30,6 +30,8 @@ import (
"vitess.io/vitess/go/vt/vterrors"
)
+const beginStmtLen = int64(len("begin;"))
+
// vdbClient is a wrapper on binlogplayer.DBClient.
// It allows us to retry a failed transactions on lock errors.
type vdbClient struct {
@@ -56,16 +58,19 @@ func (vc *vdbClient) Begin() error {
if vc.InTransaction {
return nil
}
- if err := vc.DBClient.Begin(); err != nil {
- return err
+ if vc.maxBatchSize > 0 {
+ // We are batching the contents of the transaction, which
+ // starts with the BEGIN and ends with the COMMIT, so we
+ // do not send a BEGIN down the wire ahead of time.
+ vc.queriesPos = int64(len(vc.queries))
+ vc.batchSize = beginStmtLen
+ } else {
+ // We're not batching so we start the transaction here
+ // by sending the BEGIN down the wire.
+ if err := vc.DBClient.Begin(); err != nil {
+ return err
+ }
}
-
- // If we're batching, we only batch the contents of the
- // transaction, which starts with the begin and ends with
- // the commit.
- vc.queriesPos = int64(len(vc.queries))
- vc.batchSize = 6 // begin and semicolon
-
vc.queries = append(vc.queries, "begin")
vc.InTransaction = true
vc.startTime = time.Now()
@@ -171,7 +176,7 @@ func (vc *vdbClient) Execute(query string) (*sqltypes.Result, error) {
func (vc *vdbClient) ExecuteWithRetry(ctx context.Context, query string) (*sqltypes.Result, error) {
qr, err := vc.Execute(query)
for err != nil {
- if sqlErr, ok := err.(*sqlerror.SQLError); ok && sqlErr.Number() == sqlerror.ERLockDeadlock || sqlErr.Number() == sqlerror.ERLockWaitTimeout {
+ if sqlErr, ok := err.(*sqlerror.SQLError); ok && (sqlErr.Number() == sqlerror.ERLockDeadlock || sqlErr.Number() == sqlerror.ERLockWaitTimeout) {
log.Infof("retryable error: %v, waiting for %v and retrying", sqlErr, dbLockRetryDelay)
if err := vc.Rollback(); err != nil {
return nil, err
diff --git a/go/vt/vttablet/tabletmanager/vreplication/vplayer.go b/go/vt/vttablet/tabletmanager/vreplication/vplayer.go
index db2f3f341ac..98e36119622 100644
--- a/go/vt/vttablet/tabletmanager/vreplication/vplayer.go
+++ b/go/vt/vttablet/tabletmanager/vreplication/vplayer.go
@@ -133,7 +133,8 @@ func newVPlayer(vr *vreplicator, settings binlogplayer.VRSettings, copyState map
return vr.dbClient.Commit()
}
batchMode := false
- if vr.workflowConfig.ExperimentalFlags&vttablet.VReplicationExperimentalFlagVPlayerBatching != 0 {
+ // We only do batching in the running/replicating phase.
+ if len(copyState) == 0 && vr.workflowConfig.ExperimentalFlags&vttablet.VReplicationExperimentalFlagVPlayerBatching != 0 {
batchMode = true
}
if batchMode {
diff --git a/go/vt/vttablet/tabletmanager/vreplication/vplayer_flaky_test.go b/go/vt/vttablet/tabletmanager/vreplication/vplayer_flaky_test.go
index 0cc568c1cf1..50d93e60e5a 100644
--- a/go/vt/vttablet/tabletmanager/vreplication/vplayer_flaky_test.go
+++ b/go/vt/vttablet/tabletmanager/vreplication/vplayer_flaky_test.go
@@ -628,7 +628,6 @@ func TestPlayerStatementModeWithFilterAndErrorHandling(t *testing.T) {
// It does not work when filter is enabled
output := qh.Expect(
- "begin",
"rollback",
fmt.Sprintf("/update _vt.vreplication set message='%s", expectedMsg),
)
@@ -975,8 +974,7 @@ func TestPlayerFilters(t *testing.T) {
input: "insert into src4 values (1,100,'aaa'),(2,200,'bbb'),(3,100,'ccc')",
output: qh.Expect(
"begin",
- "insert into dst4(id1,val) values (1,_binary'aaa')",
- "insert into dst4(id1,val) values (3,_binary'ccc')",
+ "insert into dst4(id1,val) values (1,_binary'aaa'), (3,_binary'ccc')",
"/update _vt.vreplication set pos=",
"commit",
),
@@ -987,8 +985,7 @@ func TestPlayerFilters(t *testing.T) {
input: "insert into src5 values (1,100,'abc'),(2,200,'xyz'),(3,100,'xyz'),(4,300,'abc'),(5,200,'xyz')",
output: qh.Expect(
"begin",
- "insert into dst5(id1,val) values (1,_binary'abc')",
- "insert into dst5(id1,val) values (4,_binary'abc')",
+ "insert into dst5(id1,val) values (1,_binary'abc'), (4,_binary'abc')",
"/update _vt.vreplication set pos=",
"commit",
),
@@ -1495,9 +1492,7 @@ func TestPlayerRowMove(t *testing.T) {
})
expectDBClientQueries(t, qh.Expect(
"begin",
- "insert into dst(val1,sval2,rcount) values (1,ifnull(1, 0),1) on duplicate key update sval2=sval2+ifnull(values(sval2), 0), rcount=rcount+1",
- "insert into dst(val1,sval2,rcount) values (2,ifnull(2, 0),1) on duplicate key update sval2=sval2+ifnull(values(sval2), 0), rcount=rcount+1",
- "insert into dst(val1,sval2,rcount) values (2,ifnull(3, 0),1) on duplicate key update sval2=sval2+ifnull(values(sval2), 0), rcount=rcount+1",
+ "insert into dst(val1,sval2,rcount) values (1,ifnull(1, 0),1), (2,ifnull(2, 0),1), (2,ifnull(3, 0),1) on duplicate key update sval2=sval2+ifnull(values(sval2), 0), rcount=rcount+1",
"/update _vt.vreplication set pos=",
"commit",
))
@@ -1505,7 +1500,7 @@ func TestPlayerRowMove(t *testing.T) {
{"1", "1", "1"},
{"2", "5", "2"},
})
- validateQueryCountStat(t, "replicate", 3)
+ validateQueryCountStat(t, "replicate", 1)
execStatements(t, []string{
"update src set val1=1, val2=4 where id=3",
@@ -1521,7 +1516,7 @@ func TestPlayerRowMove(t *testing.T) {
{"1", "5", "2"},
{"2", "2", "1"},
})
- validateQueryCountStat(t, "replicate", 5)
+ validateQueryCountStat(t, "replicate", 3)
}
func TestPlayerTypes(t *testing.T) {
@@ -2179,6 +2174,14 @@ func TestPlayerSplitTransaction(t *testing.T) {
func TestPlayerLockErrors(t *testing.T) {
defer deleteTablet(addTablet(100))
+ // The immediate retry behavior does not apply when doing
+ // VPlayer Batching.
+ origExperimentalFlags := vttablet.DefaultVReplicationConfig.ExperimentalFlags
+ vttablet.DefaultVReplicationConfig.ExperimentalFlags = 0
+ defer func() {
+ vttablet.DefaultVReplicationConfig.ExperimentalFlags = origExperimentalFlags
+ }()
+
execStatements(t, []string{
"create table t1(id int, val varchar(128), primary key(id))",
fmt.Sprintf("create table %s.t1(id int, val varchar(128), primary key(id))", vrepldb),
@@ -2258,6 +2261,14 @@ func TestPlayerLockErrors(t *testing.T) {
func TestPlayerCancelOnLock(t *testing.T) {
defer deleteTablet(addTablet(100))
+ // The immediate retry behavior does not apply when doing
+ // VPlayer Batching.
+ origExperimentalFlags := vttablet.DefaultVReplicationConfig.ExperimentalFlags
+ vttablet.DefaultVReplicationConfig.ExperimentalFlags = 0
+ defer func() {
+ vttablet.DefaultVReplicationConfig.ExperimentalFlags = origExperimentalFlags
+ }()
+
execStatements(t, []string{
"create table t1(id int, val varchar(128), primary key(id))",
fmt.Sprintf("create table %s.t1(id int, val varchar(128), primary key(id))", vrepldb),
diff --git a/go/vt/vttablet/tabletmanager/vreplication/vreplicator.go b/go/vt/vttablet/tabletmanager/vreplication/vreplicator.go
index 0c5c0b5b334..42701288a44 100644
--- a/go/vt/vttablet/tabletmanager/vreplication/vreplicator.go
+++ b/go/vt/vttablet/tabletmanager/vreplication/vreplicator.go
@@ -186,11 +186,18 @@ func newVReplicator(id int32, source *binlogdatapb.BinlogSource, sourceVStreamer
// code.
func (vr *vreplicator) Replicate(ctx context.Context) error {
err := vr.replicate(ctx)
- if err != nil {
- if err := vr.setMessage(err.Error()); err != nil {
- binlogplayer.LogError("Failed to set error state", err)
+ if err == nil {
+ return nil
+ }
+ if vr.dbClient.IsClosed() {
+ // Connection was possible terminated by the server. We should renew it.
+ if cerr := vr.dbClient.Connect(); cerr != nil {
+ return vterrors.Wrapf(err, "failed to reconnect to the database: %v", cerr)
}
}
+ if err := vr.setMessage(err.Error()); err != nil {
+ binlogplayer.LogError("Failed to set error state", err)
+ }
return err
}
@@ -501,8 +508,14 @@ func (vr *vreplicator) setState(state binlogdatapb.VReplicationWorkflowState, me
}
vr.stats.State.Store(state.String())
query := fmt.Sprintf("update _vt.vreplication set state='%v', message=%v where id=%v", state, encodeString(binlogplayer.MessageTruncate(message)), vr.id)
- if _, err := vr.dbClient.ExecuteFetch(query, 1); err != nil {
- return fmt.Errorf("could not set state: %v: %v", query, err)
+ // If we're batching a transaction, then include the state update
+ // in the current transaction batch.
+ if vr.dbClient.InTransaction && vr.dbClient.maxBatchSize > 0 {
+ vr.dbClient.AddQueryToTrxBatch(query)
+ } else { // Otherwise, send it down the wire
+ if _, err := vr.dbClient.ExecuteFetch(query, 1); err != nil {
+ return fmt.Errorf("could not set state: %v: %v", query, err)
+ }
}
if state == vr.state {
return nil
diff --git a/go/vt/vttablet/tabletserver/debugenv.go b/go/vt/vttablet/tabletserver/debugenv.go
index 54cf09db7d6..6f1ea854ea9 100644
--- a/go/vt/vttablet/tabletserver/debugenv.go
+++ b/go/vt/vttablet/tabletserver/debugenv.go
@@ -23,9 +23,10 @@ import (
"html"
"net/http"
"strconv"
- "text/template"
"time"
+ "github.com/google/safehtml/template"
+
"vitess.io/vitess/go/acl"
"vitess.io/vitess/go/vt/log"
)
@@ -70,90 +71,131 @@ func debugEnvHandler(tsv *TabletServer, w http.ResponseWriter, r *http.Request)
return
}
+ switch r.Method {
+ case http.MethodPost:
+ handlePost(tsv, w, r)
+ case http.MethodGet:
+ handleGet(tsv, w, r)
+ default:
+ http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
+ }
+}
+
+func handlePost(tsv *TabletServer, w http.ResponseWriter, r *http.Request) {
+ varname := r.FormValue("varname")
+ value := r.FormValue("value")
+
var msg string
- if r.Method == "POST" {
- varname := r.FormValue("varname")
- value := r.FormValue("value")
- setIntVal := func(f func(int)) {
- ival, err := strconv.Atoi(value)
- if err != nil {
- msg = fmt.Sprintf("Failed setting value for %v: %v", varname, err)
- return
- }
- f(ival)
- msg = fmt.Sprintf("Setting %v to: %v", varname, value)
+ if varname == "" || value == "" {
+ http.Error(w, "Missing varname or value", http.StatusBadRequest)
+ return
+ }
+
+ setIntVal := func(f func(int)) error {
+ ival, err := strconv.Atoi(value)
+ if err != nil {
+ return fmt.Errorf("invalid int value for %v: %v", varname, err)
}
- setIntValCtx := func(f func(context.Context, int) error) {
- ival, err := strconv.Atoi(value)
- if err == nil {
- err = f(r.Context(), ival)
- if err == nil {
- msg = fmt.Sprintf("Setting %v to: %v", varname, value)
- return
- }
- }
- msg = fmt.Sprintf("Failed setting value for %v: %v", varname, err)
+ f(ival)
+ msg = fmt.Sprintf("Setting %v to: %v", varname, value)
+ return nil
+ }
+
+ setIntValCtx := func(f func(context.Context, int) error) error {
+ ival, err := strconv.Atoi(value)
+ if err == nil {
+ err = f(r.Context(), ival)
}
- setInt64Val := func(f func(int64)) {
- ival, err := strconv.ParseInt(value, 10, 64)
- if err != nil {
- msg = fmt.Sprintf("Failed setting value for %v: %v", varname, err)
- return
- }
- f(ival)
- msg = fmt.Sprintf("Setting %v to: %v", varname, value)
+ if err != nil {
+ return fmt.Errorf("failed setting value for %v: %v", varname, err)
}
- setDurationVal := func(f func(time.Duration)) {
- durationVal, err := time.ParseDuration(value)
- if err != nil {
- msg = fmt.Sprintf("Failed setting value for %v: %v", varname, err)
- return
- }
- f(durationVal)
- msg = fmt.Sprintf("Setting %v to: %v", varname, value)
+ msg = fmt.Sprintf("Setting %v to: %v", varname, value)
+ return nil
+ }
+
+ setInt64Val := func(f func(int64)) error {
+ ival, err := strconv.ParseInt(value, 10, 64)
+ if err != nil {
+ return fmt.Errorf("invalid int64 value for %v: %v", varname, err)
}
- setFloat64Val := func(f func(float64)) {
- fval, err := strconv.ParseFloat(value, 64)
- if err != nil {
- msg = fmt.Sprintf("Failed setting value for %v: %v", varname, err)
- return
- }
- f(fval)
- msg = fmt.Sprintf("Setting %v to: %v", varname, value)
+ f(ival)
+ msg = fmt.Sprintf("Setting %v to: %v", varname, value)
+ return nil
+ }
+
+ setDurationVal := func(f func(time.Duration)) error {
+ durationVal, err := time.ParseDuration(value)
+ if err != nil {
+ return fmt.Errorf("invalid duration value for %v: %v", varname, err)
}
- switch varname {
- case "PoolSize":
- setIntValCtx(tsv.SetPoolSize)
- case "StreamPoolSize":
- setIntValCtx(tsv.SetStreamPoolSize)
- case "TxPoolSize":
- setIntValCtx(tsv.SetTxPoolSize)
- case "MaxResultSize":
- setIntVal(tsv.SetMaxResultSize)
- case "WarnResultSize":
- setIntVal(tsv.SetWarnResultSize)
- case "RowStreamerMaxInnoDBTrxHistLen":
- setInt64Val(func(val int64) { tsv.Config().RowStreamer.MaxInnoDBTrxHistLen = val })
- case "RowStreamerMaxMySQLReplLagSecs":
- setInt64Val(func(val int64) { tsv.Config().RowStreamer.MaxMySQLReplLagSecs = val })
- case "UnhealthyThreshold":
- setDurationVal(func(d time.Duration) { tsv.Config().Healthcheck.UnhealthyThreshold = d })
- setDurationVal(tsv.hs.SetUnhealthyThreshold)
- setDurationVal(tsv.sm.SetUnhealthyThreshold)
- case "ThrottleMetricThreshold":
- setFloat64Val(tsv.SetThrottleMetricThreshold)
- case "Consolidator":
- tsv.SetConsolidatorMode(value)
- msg = fmt.Sprintf("Setting %v to: %v", varname, value)
+ f(durationVal)
+ msg = fmt.Sprintf("Setting %v to: %v", varname, value)
+ return nil
+ }
+
+ setFloat64Val := func(f func(float64)) error {
+ fval, err := strconv.ParseFloat(value, 64)
+ if err != nil {
+ return fmt.Errorf("invalid float64 value for %v: %v", varname, err)
}
+ f(fval)
+ msg = fmt.Sprintf("Setting %v to: %v", varname, value)
+ return nil
+ }
+
+ var err error
+ switch varname {
+ case "ReadPoolSize":
+ err = setIntValCtx(tsv.SetPoolSize)
+ case "StreamPoolSize":
+ err = setIntValCtx(tsv.SetStreamPoolSize)
+ case "TransactionPoolSize":
+ err = setIntValCtx(tsv.SetTxPoolSize)
+ case "MaxResultSize":
+ err = setIntVal(tsv.SetMaxResultSize)
+ case "WarnResultSize":
+ err = setIntVal(tsv.SetWarnResultSize)
+ case "RowStreamerMaxInnoDBTrxHistLen":
+ err = setInt64Val(func(val int64) { tsv.Config().RowStreamer.MaxInnoDBTrxHistLen = val })
+ case "RowStreamerMaxMySQLReplLagSecs":
+ err = setInt64Val(func(val int64) { tsv.Config().RowStreamer.MaxMySQLReplLagSecs = val })
+ case "UnhealthyThreshold":
+ err = setDurationVal(func(d time.Duration) { tsv.Config().Healthcheck.UnhealthyThreshold = d })
+ case "ThrottleMetricThreshold":
+ err = setFloat64Val(tsv.SetThrottleMetricThreshold)
+ case "Consolidator":
+ tsv.SetConsolidatorMode(value)
+ msg = fmt.Sprintf("Setting %v to: %v", varname, value)
+ }
+
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return
}
+ vars := getVars(tsv)
+ sendResponse(r, w, vars, msg)
+}
+
+func handleGet(tsv *TabletServer, w http.ResponseWriter, r *http.Request) {
+ vars := getVars(tsv)
+ sendResponse(r, w, vars, "")
+}
+
+func sendResponse(r *http.Request, w http.ResponseWriter, vars []envValue, msg string) {
+ format := r.FormValue("format")
+ if format == "json" {
+ respondWithJSON(w, vars, msg)
+ return
+ }
+ respondWithHTML(w, vars, msg)
+}
+
+func getVars(tsv *TabletServer) []envValue {
var vars []envValue
- vars = addVar(vars, "PoolSize", tsv.PoolSize)
+ vars = addVar(vars, "ReadPoolSize", tsv.PoolSize)
vars = addVar(vars, "StreamPoolSize", tsv.StreamPoolSize)
- vars = addVar(vars, "TxPoolSize", tsv.TxPoolSize)
- vars = addVar(vars, "QueryCacheCapacity", tsv.QueryPlanCacheCap) // QueryCacheCapacity is deprecated in v21, it is replaced by QueryEnginePlanCacheCapacity
- vars = addVar(vars, "QueryEnginePlanCacheCapacity", tsv.QueryPlanCacheCap)
+ vars = addVar(vars, "TransactionPoolSize", tsv.TxPoolSize)
vars = addVar(vars, "MaxResultSize", tsv.MaxResultSize)
vars = addVar(vars, "WarnResultSize", tsv.WarnResultSize)
vars = addVar(vars, "RowStreamerMaxInnoDBTrxHistLen", func() int64 { return tsv.Config().RowStreamer.MaxInnoDBTrxHistLen })
@@ -165,18 +207,22 @@ func debugEnvHandler(tsv *TabletServer, w http.ResponseWriter, r *http.Request)
Value: tsv.ConsolidatorMode(),
})
- format := r.FormValue("format")
- if format == "json" {
- mvars := make(map[string]string)
- for _, v := range vars {
- mvars[v.Name] = v.Value
- }
- w.Header().Set("Content-Type", "application/json")
- _ = json.NewEncoder(w).Encode(mvars)
- return
+ return vars
+}
+
+func respondWithJSON(w http.ResponseWriter, vars []envValue, msg string) {
+ mvars := make(map[string]string)
+ for _, v := range vars {
+ mvars[v.Name] = v.Value
}
+ if msg != "" {
+ mvars["ResponseMessage"] = msg
+ }
+ w.Header().Set("Content-Type", "application/json")
+ _ = json.NewEncoder(w).Encode(mvars)
+}
- // gridTable is reused from twopcz.go.
+func respondWithHTML(w http.ResponseWriter, vars []envValue, msg string) {
w.Write(gridTable)
w.Write([]byte("Internal Variables
\n"))
if msg != "" {
diff --git a/go/vt/vttablet/tabletserver/dt_executor_test.go b/go/vt/vttablet/tabletserver/dt_executor_test.go
index d5322f352f9..b21667392d6 100644
--- a/go/vt/vttablet/tabletserver/dt_executor_test.go
+++ b/go/vt/vttablet/tabletserver/dt_executor_test.go
@@ -705,8 +705,10 @@ func TestNoTwopc(t *testing.T) {
want := "2pc is not enabled"
for _, tc := range testcases {
- err := tc.fun()
- require.EqualError(t, err, want)
+ t.Run(tc.desc, func(t *testing.T) {
+ err := tc.fun()
+ require.EqualError(t, err, want)
+ })
}
}
diff --git a/go/vt/vttablet/tabletserver/health_streamer.go b/go/vt/vttablet/tabletserver/health_streamer.go
index f9f65d197b2..eaeba6315e3 100644
--- a/go/vt/vttablet/tabletserver/health_streamer.go
+++ b/go/vt/vttablet/tabletserver/health_streamer.go
@@ -293,8 +293,10 @@ func (hs *healthStreamer) SetUnhealthyThreshold(v time.Duration) {
// so it can read and write to the MySQL instance for schema-tracking.
func (hs *healthStreamer) MakePrimary(serving bool) {
hs.fieldsMu.Lock()
- defer hs.fieldsMu.Unlock()
hs.isServingPrimary = serving
+ // We let go of the lock here because we don't want to hold the lock when calling RegisterNotifier.
+ // If we keep holding the lock, there is a potential deadlock that can happen.
+ hs.fieldsMu.Unlock()
// We register for notifications from the schema Engine only when schema tracking is enabled,
// and we are going to a serving primary state.
if serving && hs.signalWhenSchemaChange {
diff --git a/go/vt/vttablet/tabletserver/health_streamer_test.go b/go/vt/vttablet/tabletserver/health_streamer_test.go
index 3421141ff80..9561518eed6 100644
--- a/go/vt/vttablet/tabletserver/health_streamer_test.go
+++ b/go/vt/vttablet/tabletserver/health_streamer_test.go
@@ -592,13 +592,14 @@ func TestDeadlockBwCloseAndReload(t *testing.T) {
wg := sync.WaitGroup{}
wg.Add(2)
- // Try running Close and reload in parallel multiple times.
+ // Try running Close & MakePrimary and reload in parallel multiple times.
// This reproduces the deadlock quite readily.
go func() {
defer wg.Done()
for i := 0; i < 100; i++ {
hs.Close()
hs.Open()
+ hs.MakePrimary(true)
}
}()
diff --git a/go/vt/vttablet/tabletserver/query_executor.go b/go/vt/vttablet/tabletserver/query_executor.go
index abf296c0583..519b60b79d6 100644
--- a/go/vt/vttablet/tabletserver/query_executor.go
+++ b/go/vt/vttablet/tabletserver/query_executor.go
@@ -991,6 +991,8 @@ func (qre *QueryExecutor) execAlterMigration() (*sqltypes.Result, error) {
return qre.tsv.onlineDDLExecutor.ForceCutOverMigration(qre.ctx, alterMigration.UUID)
case sqlparser.ForceCutOverAllMigrationType:
return qre.tsv.onlineDDLExecutor.ForceCutOverPendingMigrations(qre.ctx)
+ case sqlparser.SetCutOverThresholdMigrationType:
+ return qre.tsv.onlineDDLExecutor.SetMigrationCutOverThreshold(qre.ctx, alterMigration.UUID, alterMigration.Threshold)
}
return nil, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, "ALTER VITESS_MIGRATION not implemented")
}
diff --git a/go/vt/vttablet/tabletserver/query_executor_test.go b/go/vt/vttablet/tabletserver/query_executor_test.go
index 78daad2e616..ade79ecaef5 100644
--- a/go/vt/vttablet/tabletserver/query_executor_test.go
+++ b/go/vt/vttablet/tabletserver/query_executor_test.go
@@ -1514,20 +1514,17 @@ func newTestTabletServer(ctx context.Context, flags executorFlags, db *fakesqldb
} else {
cfg.StrictTableACL = false
}
- if flags&noTwopc > 0 {
- cfg.TwoPCEnable = false
- } else {
- cfg.TwoPCEnable = true
- }
if flags&disableOnlineDDL > 0 {
cfg.EnableOnlineDDL = false
} else {
cfg.EnableOnlineDDL = true
}
- if flags&shortTwopcAge > 0 {
- cfg.TwoPCAbandonAge = 0.5
+ if flags&noTwopc > 0 {
+ cfg.TwoPCAbandonAge = 0
+ } else if flags&shortTwopcAge > 0 {
+ cfg.TwoPCAbandonAge = 500 * time.Millisecond
} else {
- cfg.TwoPCAbandonAge = 10
+ cfg.TwoPCAbandonAge = 10 * time.Second
}
if flags&smallResultSize > 0 {
cfg.Oltp.MaxRows = 2
diff --git a/go/vt/vttablet/tabletserver/querylogz.go b/go/vt/vttablet/tabletserver/querylogz.go
index 33341d1641b..09f375aa329 100644
--- a/go/vt/vttablet/tabletserver/querylogz.go
+++ b/go/vt/vttablet/tabletserver/querylogz.go
@@ -20,9 +20,10 @@ import (
"net/http"
"strconv"
"strings"
- "text/template"
"time"
+ "github.com/google/safehtml/template"
+
"vitess.io/vitess/go/acl"
"vitess.io/vitess/go/vt/log"
"vitess.io/vitess/go/vt/logz"
diff --git a/go/vt/vttablet/tabletserver/querylogz_test.go b/go/vt/vttablet/tabletserver/querylogz_test.go
index 25f03c762c7..ee26437f330 100644
--- a/go/vt/vttablet/tabletserver/querylogz_test.go
+++ b/go/vt/vttablet/tabletserver/querylogz_test.go
@@ -37,7 +37,7 @@ func TestQuerylogzHandler(t *testing.T) {
req, _ := http.NewRequest("GET", "/querylogz?timeout=10&limit=1", nil)
logStats := tabletenv.NewLogStats(context.Background(), "Execute")
logStats.PlanType = planbuilder.PlanSelect.String()
- logStats.OriginalSQL = "select name from test_table limit 1000"
+ logStats.OriginalSQL = "select name, 'inject ' from test_table limit 1000"
logStats.RowsAffected = 1000
logStats.NumberOfQueries = 1
logStats.StartTime, _ = time.Parse("Jan 2 15:04:05", "Nov 29 13:33:09")
@@ -64,7 +64,7 @@ func TestQuerylogzHandler(t *testing.T) {
`0.001 | `,
`1e-08 | `,
`Select | `,
- `select name from test_table limit 1000 | `,
+ regexp.QuoteMeta(`select name,​ 'inject <script>alert()​;</script>' from test_table limit 1000 | `),
`1 | `,
`none | `,
`1000 | `,
@@ -95,7 +95,7 @@ func TestQuerylogzHandler(t *testing.T) {
`0.001 | `,
`1e-08 | `,
`Select | `,
- `select name from test_table limit 1000 | `,
+ regexp.QuoteMeta(`select name,​ 'inject <script>alert()​;</script>' from test_table limit 1000 | `),
`1 | `,
`none | `,
`1000 | `,
@@ -126,7 +126,7 @@ func TestQuerylogzHandler(t *testing.T) {
`0.001 | `,
`1e-08 | `,
`Select | `,
- `select name from test_table limit 1000 | `,
+ regexp.QuoteMeta(`select name,​ 'inject <script>alert()​;</script>' from test_table limit 1000 | `),
`1 | `,
`none | `,
`1000 | `,
diff --git a/go/vt/vttablet/tabletserver/tabletenv/config.go b/go/vt/vttablet/tabletserver/tabletenv/config.go
index 158f40d5202..994999f2368 100644
--- a/go/vt/vttablet/tabletserver/tabletenv/config.go
+++ b/go/vt/vttablet/tabletserver/tabletenv/config.go
@@ -156,8 +156,12 @@ func registerTabletEnvFlags(fs *pflag.FlagSet) {
fs.BoolVar(¤tConfig.WatchReplication, "watch_replication_stream", false, "When enabled, vttablet will stream the MySQL replication stream from the local server, and use it to update schema when it sees a DDL.")
fs.BoolVar(¤tConfig.TrackSchemaVersions, "track_schema_versions", false, "When enabled, vttablet will store versions of schemas at each position that a DDL is applied and allow retrieval of the schema corresponding to a position")
fs.Int64Var(¤tConfig.SchemaVersionMaxAgeSeconds, "schema-version-max-age-seconds", 0, "max age of schema version records to kept in memory by the vreplication historian")
- fs.BoolVar(¤tConfig.TwoPCEnable, "twopc_enable", defaultConfig.TwoPCEnable, "if the flag is on, 2pc is enabled. Other 2pc flags must be supplied.")
- SecondsVar(fs, ¤tConfig.TwoPCAbandonAge, "twopc_abandon_age", defaultConfig.TwoPCAbandonAge, "time in seconds. Any unresolved transaction older than this time will be sent to the coordinator to be resolved.")
+
+ _ = fs.Bool("twopc_enable", true, "TwoPC is enabled")
+ _ = fs.MarkDeprecated("twopc_enable", "TwoPC is always enabled, the transaction abandon age can be configured")
+ flagutil.FloatDuration(fs, ¤tConfig.TwoPCAbandonAge, "twopc_abandon_age", defaultConfig.TwoPCAbandonAge,
+ "Any unresolved transaction older than this time will be sent to the coordinator to be resolved. NOTE: Providing time as seconds (float64) is deprecated. Use time.Duration format (e.g., '1s', '2m', '1h').")
+
// Tx throttler config
flagutil.DualFormatBoolVar(fs, ¤tConfig.EnableTxThrottler, "enable_tx_throttler", defaultConfig.EnableTxThrottler, "If true replication-lag-based throttling on transactions will be enabled.")
flagutil.DualFormatVar(fs, currentConfig.TxThrottlerConfig, "tx_throttler_config", "The configuration of the transaction throttler as a text-formatted throttlerdata.Configuration protocol buffer message.")
@@ -331,12 +335,11 @@ type TabletConfig struct {
ExternalConnections map[string]*dbconfigs.DBConfigs `json:"externalConnections,omitempty"`
- SanitizeLogMessages bool `json:"-"`
- StrictTableACL bool `json:"-"`
- EnableTableACLDryRun bool `json:"-"`
- TableACLExemptACL string `json:"-"`
- TwoPCEnable bool `json:"-"`
- TwoPCAbandonAge Seconds `json:"-"`
+ SanitizeLogMessages bool `json:"-"`
+ StrictTableACL bool `json:"-"`
+ EnableTableACLDryRun bool `json:"-"`
+ TableACLExemptACL string `json:"-"`
+ TwoPCAbandonAge time.Duration `json:"-"`
EnableTxThrottler bool `json:"-"`
TxThrottlerConfig *TxThrottlerConfigFlag `json:"-"`
@@ -1054,6 +1057,8 @@ var defaultConfig = TabletConfig{
},
EnablePerWorkloadTableMetrics: false,
+
+ TwoPCAbandonAge: 15 * time.Minute,
}
// defaultTxThrottlerConfig returns the default TxThrottlerConfigFlag object based on
diff --git a/go/vt/vttablet/tabletserver/tabletenv/seconds.go b/go/vt/vttablet/tabletserver/tabletenv/seconds.go
deleted file mode 100644
index ae11121f2de..00000000000
--- a/go/vt/vttablet/tabletserver/tabletenv/seconds.go
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
-Copyright 2020 The Vitess Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package tabletenv
-
-import (
- "time"
-
- "github.com/spf13/pflag"
-)
-
-// Seconds provides convenience functions for extracting
-// duration from float64 seconds values.
-type Seconds float64
-
-// SecondsVar is like a flag.Float64Var, but it works for Seconds.
-func SecondsVar(fs *pflag.FlagSet, p *Seconds, name string, value Seconds, usage string) {
- fs.Float64Var((*float64)(p), name, float64(value), usage)
-}
-
-// Get converts Seconds to time.Duration
-func (s Seconds) Get() time.Duration {
- return time.Duration(s * Seconds(1*time.Second))
-}
-
-// Set sets the value from time.Duration
-func (s *Seconds) Set(d time.Duration) {
- *s = Seconds(d) / Seconds(1*time.Second)
-}
diff --git a/go/vt/vttablet/tabletserver/tabletenv/seconds_test.go b/go/vt/vttablet/tabletserver/tabletenv/seconds_test.go
deleted file mode 100644
index dc09a3f419f..00000000000
--- a/go/vt/vttablet/tabletserver/tabletenv/seconds_test.go
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
-Copyright 2020 The Vitess Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package tabletenv
-
-import (
- "testing"
- "time"
-
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
-
- "vitess.io/vitess/go/yaml2"
-)
-
-func TestSecondsYaml(t *testing.T) {
- type testSecond struct {
- Value Seconds `json:"value"`
- }
-
- ts := testSecond{
- Value: 1,
- }
- gotBytes, err := yaml2.Marshal(&ts)
- require.NoError(t, err)
- wantBytes := "value: 1\n"
- assert.Equal(t, wantBytes, string(gotBytes))
-
- var gotts testSecond
- err = yaml2.Unmarshal([]byte(wantBytes), &gotts)
- require.NoError(t, err)
- assert.Equal(t, ts, gotts)
-}
-
-func TestSecondsGetSet(t *testing.T) {
- var val Seconds
- val.Set(2 * time.Second)
- assert.Equal(t, Seconds(2), val)
- assert.Equal(t, 2*time.Second, val.Get())
-}
diff --git a/go/vt/vttablet/tabletserver/throttle/base/metric_cache.go b/go/vt/vttablet/tabletserver/throttle/base/metric_cache.go
index 8695cb83229..faad65ca79e 100644
--- a/go/vt/vttablet/tabletserver/throttle/base/metric_cache.go
+++ b/go/vt/vttablet/tabletserver/throttle/base/metric_cache.go
@@ -49,6 +49,7 @@ import (
"github.com/patrickmn/go-cache"
"vitess.io/vitess/go/stats"
+ "vitess.io/vitess/go/vt/vttablet/tmclient"
)
// MetricsQueryType indicates the type of metrics query on MySQL backend. See following.
@@ -142,13 +143,13 @@ func (metric *ThrottleMetric) WithError(err error) *ThrottleMetric {
// ReadThrottleMetrics returns a metric for the given probe. Either by explicit query
// or via SHOW REPLICA STATUS
-func ReadThrottleMetrics(ctx context.Context, probe *Probe, metricsFunc func(context.Context) ThrottleMetrics) ThrottleMetrics {
+func ReadThrottleMetrics(ctx context.Context, probe *Probe, tmClient tmclient.TabletManagerClient, metricsFunc func(context.Context, tmclient.TabletManagerClient) ThrottleMetrics) ThrottleMetrics {
if metrics := getCachedThrottleMetrics(probe); metrics != nil {
return metrics
}
started := time.Now()
- throttleMetrics := metricsFunc(ctx)
+ throttleMetrics := metricsFunc(ctx, tmClient)
go func(metrics ThrottleMetrics, started time.Time) {
stats.GetOrNewGauge("ThrottlerProbesLatency", "probes latency").Set(time.Since(started).Nanoseconds())
diff --git a/go/vt/vttablet/tabletserver/throttle/base/metric_name.go b/go/vt/vttablet/tabletserver/throttle/base/metric_name.go
index 98e1288fb23..43bd2d17a8c 100644
--- a/go/vt/vttablet/tabletserver/throttle/base/metric_name.go
+++ b/go/vt/vttablet/tabletserver/throttle/base/metric_name.go
@@ -60,11 +60,14 @@ func (names MetricNames) Unique() MetricNames {
}
const (
- DefaultMetricName MetricName = "default"
- LagMetricName MetricName = "lag"
- ThreadsRunningMetricName MetricName = "threads_running"
- CustomMetricName MetricName = "custom"
- LoadAvgMetricName MetricName = "loadavg"
+ DefaultMetricName MetricName = "default"
+ LagMetricName MetricName = "lag"
+ ThreadsRunningMetricName MetricName = "threads_running"
+ CustomMetricName MetricName = "custom"
+ LoadAvgMetricName MetricName = "loadavg"
+ HistoryListLengthMetricName MetricName = "history_list_length"
+ MysqldLoadAvgMetricName MetricName = "mysqld-loadavg"
+ MysqldDatadirUsedRatioMetricName MetricName = "mysqld-datadir-used-ratio"
)
func (metric MetricName) DefaultScope() Scope {
diff --git a/go/vt/vttablet/tabletserver/throttle/base/metric_name_test.go b/go/vt/vttablet/tabletserver/throttle/base/metric_name_test.go
index 9867ca18db3..c2e2b44b36f 100644
--- a/go/vt/vttablet/tabletserver/throttle/base/metric_name_test.go
+++ b/go/vt/vttablet/tabletserver/throttle/base/metric_name_test.go
@@ -21,6 +21,9 @@ import (
"testing"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "vitess.io/vitess/go/textutil"
)
func TestAggregateName(t *testing.T) {
@@ -238,4 +241,27 @@ func TestKnownMetricNames(t *testing.T) {
assert.Contains(t, KnownMetricNames, LoadAvgMetricName)
assert.Contains(t, KnownMetricNames, CustomMetricName)
assert.Contains(t, KnownMetricNames, DefaultMetricName)
+ assert.Contains(t, KnownMetricNames, HistoryListLengthMetricName)
+ assert.Contains(t, KnownMetricNames, MysqldLoadAvgMetricName)
+ assert.Contains(t, KnownMetricNames, MysqldDatadirUsedRatioMetricName)
+}
+
+func TestKnownMetricNamesPascalCase(t *testing.T) {
+ expectCases := map[MetricName]string{
+ LagMetricName: "Lag",
+ ThreadsRunningMetricName: "ThreadsRunning",
+ LoadAvgMetricName: "Loadavg",
+ HistoryListLengthMetricName: "HistoryListLength",
+ CustomMetricName: "Custom",
+ DefaultMetricName: "Default",
+ MysqldLoadAvgMetricName: "MysqldLoadavg",
+ MysqldDatadirUsedRatioMetricName: "MysqldDatadirUsedRatio",
+ }
+ for _, metricName := range KnownMetricNames {
+ t.Run(metricName.String(), func(t *testing.T) {
+ expect, ok := expectCases[metricName]
+ require.True(t, ok)
+ assert.Equal(t, expect, textutil.PascalCase(metricName.String()))
+ })
+ }
}
diff --git a/go/vt/vttablet/tabletserver/throttle/base/self_metric.go b/go/vt/vttablet/tabletserver/throttle/base/self_metric.go
index 220dfa6bf60..88fbe2bdd13 100644
--- a/go/vt/vttablet/tabletserver/throttle/base/self_metric.go
+++ b/go/vt/vttablet/tabletserver/throttle/base/self_metric.go
@@ -21,15 +21,24 @@ import (
"fmt"
"strconv"
+ "vitess.io/vitess/go/vt/topo"
"vitess.io/vitess/go/vt/vttablet/tabletserver/connpool"
+ "vitess.io/vitess/go/vt/vttablet/tmclient"
)
+type SelfMetricReadParams struct {
+ Throttler metricsPublisher
+ Conn *connpool.Conn
+ TmClient tmclient.TabletManagerClient
+ TabletInfo *topo.TabletInfo
+}
+
type SelfMetric interface {
Name() MetricName
DefaultScope() Scope
DefaultThreshold() float64
RequiresConn() bool
- Read(ctx context.Context, throttler ThrottlerMetricsPublisher, conn *connpool.Conn) *ThrottleMetric
+ Read(ctx context.Context, params *SelfMetricReadParams) *ThrottleMetric
}
var (
diff --git a/go/vt/vttablet/tabletserver/throttle/base/self_metric_custom_query.go b/go/vt/vttablet/tabletserver/throttle/base/self_metric_custom_query.go
index 585e63ea285..88f789e5dcd 100644
--- a/go/vt/vttablet/tabletserver/throttle/base/self_metric_custom_query.go
+++ b/go/vt/vttablet/tabletserver/throttle/base/self_metric_custom_query.go
@@ -18,8 +18,6 @@ package base
import (
"context"
-
- "vitess.io/vitess/go/vt/vttablet/tabletserver/connpool"
)
var _ SelfMetric = registerSelfMetric(&CustomQuerySelfMetric{})
@@ -43,6 +41,6 @@ func (m *CustomQuerySelfMetric) RequiresConn() bool {
return true
}
-func (m *CustomQuerySelfMetric) Read(ctx context.Context, throttler ThrottlerMetricsPublisher, conn *connpool.Conn) *ThrottleMetric {
- return ReadSelfMySQLThrottleMetric(ctx, conn, throttler.GetCustomMetricsQuery())
+func (m *CustomQuerySelfMetric) Read(ctx context.Context, params *SelfMetricReadParams) *ThrottleMetric {
+ return ReadSelfMySQLThrottleMetric(ctx, params.Conn, params.Throttler.GetCustomMetricsQuery())
}
diff --git a/go/vt/vttablet/tabletserver/throttle/base/self_metric_default.go b/go/vt/vttablet/tabletserver/throttle/base/self_metric_default.go
index 8bce295da7c..97309fa6ea9 100644
--- a/go/vt/vttablet/tabletserver/throttle/base/self_metric_default.go
+++ b/go/vt/vttablet/tabletserver/throttle/base/self_metric_default.go
@@ -19,8 +19,6 @@ package base
import (
"context"
"fmt"
-
- "vitess.io/vitess/go/vt/vttablet/tabletserver/connpool"
)
var _ SelfMetric = registerSelfMetric(&DefaultSelfMetric{})
@@ -44,7 +42,7 @@ func (m *DefaultSelfMetric) RequiresConn() bool {
return false
}
-func (m *DefaultSelfMetric) Read(ctx context.Context, throttler ThrottlerMetricsPublisher, conn *connpool.Conn) *ThrottleMetric {
+func (m *DefaultSelfMetric) Read(ctx context.Context, params *SelfMetricReadParams) *ThrottleMetric {
return &ThrottleMetric{
Err: fmt.Errorf("unexpected direct call to DefaultSelfMetric.Read"),
}
diff --git a/go/vt/vttablet/tabletserver/throttle/base/self_metric_innodb_history_list_length.go b/go/vt/vttablet/tabletserver/throttle/base/self_metric_innodb_history_list_length.go
new file mode 100644
index 00000000000..2696b1750ea
--- /dev/null
+++ b/go/vt/vttablet/tabletserver/throttle/base/self_metric_innodb_history_list_length.go
@@ -0,0 +1,68 @@
+/*
+Copyright 2024 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package base
+
+import (
+ "context"
+ "math"
+ "sync/atomic"
+ "time"
+)
+
+var (
+ historyListLengthQuery = "select count as history_len from information_schema.INNODB_METRICS where name = 'trx_rseg_history_len'"
+
+ cachedHistoryListLengthMetric atomic.Pointer[ThrottleMetric]
+ historyListLengthCacheDuration = 5 * time.Second
+ historyListLengthDefaultThreshold = math.Pow10(9)
+)
+
+var _ SelfMetric = registerSelfMetric(&HistoryListLengthSelfMetric{})
+
+type HistoryListLengthSelfMetric struct {
+}
+
+func (m *HistoryListLengthSelfMetric) Name() MetricName {
+ return HistoryListLengthMetricName
+}
+
+func (m *HistoryListLengthSelfMetric) DefaultScope() Scope {
+ return SelfScope
+}
+
+func (m *HistoryListLengthSelfMetric) DefaultThreshold() float64 {
+ return historyListLengthDefaultThreshold
+}
+
+func (m *HistoryListLengthSelfMetric) RequiresConn() bool {
+ return true
+}
+
+func (m *HistoryListLengthSelfMetric) Read(ctx context.Context, params *SelfMetricReadParams) *ThrottleMetric {
+ // This function will be called sequentially, and therefore does not need strong mutex protection. Still, we use atomics
+ // to ensure correctness in case an external goroutine tries to read the metric concurrently.
+ metric := cachedHistoryListLengthMetric.Load()
+ if metric != nil {
+ return metric
+ }
+ metric = ReadSelfMySQLThrottleMetric(ctx, params.Conn, historyListLengthQuery)
+ cachedHistoryListLengthMetric.Store(metric)
+ time.AfterFunc(historyListLengthCacheDuration, func() {
+ cachedHistoryListLengthMetric.Store(nil)
+ })
+ return metric
+}
diff --git a/go/vt/vttablet/tabletserver/throttle/base/self_metric_lag.go b/go/vt/vttablet/tabletserver/throttle/base/self_metric_lag.go
index dc25ee5622a..3d0e4beebe1 100644
--- a/go/vt/vttablet/tabletserver/throttle/base/self_metric_lag.go
+++ b/go/vt/vttablet/tabletserver/throttle/base/self_metric_lag.go
@@ -23,7 +23,6 @@ import (
"vitess.io/vitess/go/constants/sidecar"
"vitess.io/vitess/go/vt/sqlparser"
- "vitess.io/vitess/go/vt/vttablet/tabletserver/connpool"
)
var (
@@ -65,6 +64,6 @@ func (m *LagSelfMetric) RequiresConn() bool {
return true
}
-func (m *LagSelfMetric) Read(ctx context.Context, throttler ThrottlerMetricsPublisher, conn *connpool.Conn) *ThrottleMetric {
- return ReadSelfMySQLThrottleMetric(ctx, conn, m.GetQuery())
+func (m *LagSelfMetric) Read(ctx context.Context, params *SelfMetricReadParams) *ThrottleMetric {
+ return ReadSelfMySQLThrottleMetric(ctx, params.Conn, m.GetQuery())
}
diff --git a/go/vt/vttablet/tabletserver/throttle/base/self_metric_loadavg.go b/go/vt/vttablet/tabletserver/throttle/base/self_metric_loadavg.go
index 40a2878421a..2d880169020 100644
--- a/go/vt/vttablet/tabletserver/throttle/base/self_metric_loadavg.go
+++ b/go/vt/vttablet/tabletserver/throttle/base/self_metric_loadavg.go
@@ -18,20 +18,16 @@ package base
import (
"context"
- "fmt"
- "os"
"runtime"
- "strconv"
- "strings"
+ "sync/atomic"
+ "time"
- "vitess.io/vitess/go/vt/vttablet/tabletserver/connpool"
+ "vitess.io/vitess/go/osutil"
)
var (
- loadavgOnlyAvailableOnLinuxMetric = &ThrottleMetric{
- Scope: SelfScope,
- Err: fmt.Errorf("loadavg metric is only available on Linux"),
- }
+ cachedLoadAvgMetric atomic.Pointer[ThrottleMetric]
+ loadAvgCacheDuration = 1 * time.Second
)
var _ SelfMetric = registerSelfMetric(&LoadAvgSelfMetric{})
@@ -55,27 +51,26 @@ func (m *LoadAvgSelfMetric) RequiresConn() bool {
return false
}
-func (m *LoadAvgSelfMetric) Read(ctx context.Context, throttler ThrottlerMetricsPublisher, conn *connpool.Conn) *ThrottleMetric {
- if runtime.GOOS != "linux" {
- return loadavgOnlyAvailableOnLinuxMetric
+func (m *LoadAvgSelfMetric) Read(ctx context.Context, params *SelfMetricReadParams) *ThrottleMetric {
+ // This function will be called sequentially, and therefore does not need strong mutex protection. Still, we use atomics
+ // to ensure correctness in case an external goroutine tries to read the metric concurrently.
+ metric := cachedLoadAvgMetric.Load()
+ if metric != nil {
+ return metric
}
- metric := &ThrottleMetric{
+ metric = &ThrottleMetric{
Scope: SelfScope,
}
- {
- content, err := os.ReadFile("/proc/loadavg")
- if err != nil {
- return metric.WithError(err)
- }
- fields := strings.Fields(string(content))
- if len(fields) == 0 {
- return metric.WithError(fmt.Errorf("unexpected /proc/loadavg content"))
- }
- loadAvg, err := strconv.ParseFloat(fields[0], 64)
- if err != nil {
- return metric.WithError(err)
- }
- metric.Value = loadAvg / float64(runtime.NumCPU())
+ val, err := osutil.LoadAvg()
+ if err != nil {
+ return metric.WithError(err)
}
+ metric.Value = val / float64(runtime.NumCPU())
+
+ cachedLoadAvgMetric.Store(metric)
+ time.AfterFunc(loadAvgCacheDuration, func() {
+ cachedLoadAvgMetric.Store(nil)
+ })
+
return metric
}
diff --git a/go/vt/vttablet/tabletserver/throttle/base/self_metric_mysqld.go b/go/vt/vttablet/tabletserver/throttle/base/self_metric_mysqld.go
new file mode 100644
index 00000000000..321837d86b4
--- /dev/null
+++ b/go/vt/vttablet/tabletserver/throttle/base/self_metric_mysqld.go
@@ -0,0 +1,156 @@
+/*
+Copyright 2024 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package base
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync/atomic"
+ "time"
+
+ "vitess.io/vitess/go/timer"
+
+ tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata"
+)
+
+var (
+ mysqlHostMetricsRpcTimeout = 5 * time.Second
+ mysqlHostMetricsRateLimit = 10 * time.Second
+ mysqlHostMetricsRateLimiter atomic.Pointer[timer.RateLimiter]
+ lastMySQLHostMetricsResponse atomic.Pointer[tabletmanagerdatapb.MysqlHostMetricsResponse]
+)
+
+// getMysqlMetricsRateLimiter returns a rate limiter that is active until the given context is cancelled.
+// This function will be called sequentially, but nonetheless it offers _some_ concurrent safety. Namely,
+// that a created rate limiter is guaranteed to be cleaned up
+func getMysqlMetricsRateLimiter(ctx context.Context, rateLimit time.Duration) *timer.RateLimiter {
+ rateLimiter := mysqlHostMetricsRateLimiter.Load()
+ if rateLimiter == nil {
+ rateLimiter = timer.NewRateLimiter(rateLimit)
+ go func() {
+ defer mysqlHostMetricsRateLimiter.Store(nil)
+ defer rateLimiter.Stop()
+ <-ctx.Done()
+ }()
+ mysqlHostMetricsRateLimiter.Store(rateLimiter)
+ }
+ return rateLimiter
+}
+
+// readMysqlHostMetrics reads MySQL host metrics sporadically from the tablet manager (which in turn reads
+// them from mysql deamon). The metrics are then cached, whether successful or not.
+// This idea is that is is very wasteful to read these metrics for every single query. E.g. right now the throttler
+// can issue 4 reads per second, which is wasteful to go through two RPCs to get the disk space usage for example. Even the load
+// average on the MySQL server is not that susceptible to change.
+func readMysqlHostMetrics(ctx context.Context, params *SelfMetricReadParams) error {
+ if params.TmClient == nil {
+ return fmt.Errorf("tmClient is nil")
+ }
+ if params.TabletInfo == nil {
+ return fmt.Errorf("tabletInfo is nil")
+ }
+ rateLimiter := getMysqlMetricsRateLimiter(ctx, mysqlHostMetricsRateLimit)
+ err := rateLimiter.Do(func() error {
+ ctx, cancel := context.WithTimeout(ctx, mysqlHostMetricsRpcTimeout)
+ defer cancel()
+
+ resp, err := params.TmClient.MysqlHostMetrics(ctx, params.TabletInfo.Tablet, &tabletmanagerdatapb.MysqlHostMetricsRequest{})
+ if err != nil {
+ return err
+ }
+ lastMySQLHostMetricsResponse.Store(resp)
+ return nil
+ })
+ return err
+}
+
+// getMysqlHostMetric gets a metric from the last read MySQL host metrics. The metric will either be directly read from
+// tablet manager (which then reads it from the mysql deamon), or from the cache.
+func getMysqlHostMetric(ctx context.Context, params *SelfMetricReadParams, mysqlHostMetricName string) *ThrottleMetric {
+ metric := &ThrottleMetric{
+ Scope: SelfScope,
+ }
+ if err := readMysqlHostMetrics(ctx, params); err != nil {
+ return metric.WithError(err)
+ }
+ resp := lastMySQLHostMetricsResponse.Load()
+ if resp == nil {
+ return metric.WithError(ErrNoResultYet)
+ }
+ mysqlMetric := resp.HostMetrics.Metrics[mysqlHostMetricName]
+ if mysqlMetric == nil {
+ return metric.WithError(ErrNoSuchMetric)
+ }
+ metric.Value = mysqlMetric.Value
+ if mysqlMetric.Error != nil {
+ metric.Err = errors.New(mysqlMetric.Error.Message)
+ }
+ return metric
+}
+
+var _ SelfMetric = registerSelfMetric(&MysqldLoadAvgSelfMetric{})
+var _ SelfMetric = registerSelfMetric(&MysqldDatadirUsedRatioSelfMetric{})
+
+// MysqldLoadAvgSelfMetric stands for the load average per cpu, on the MySQL host.
+type MysqldLoadAvgSelfMetric struct {
+}
+
+func (m *MysqldLoadAvgSelfMetric) Name() MetricName {
+ return MysqldLoadAvgMetricName
+}
+
+func (m *MysqldLoadAvgSelfMetric) DefaultScope() Scope {
+ return SelfScope
+}
+
+func (m *MysqldLoadAvgSelfMetric) DefaultThreshold() float64 {
+ return 1.0
+}
+
+func (m *MysqldLoadAvgSelfMetric) RequiresConn() bool {
+ return false
+}
+
+func (m *MysqldLoadAvgSelfMetric) Read(ctx context.Context, params *SelfMetricReadParams) *ThrottleMetric {
+ return getMysqlHostMetric(ctx, params, "loadavg")
+}
+
+// MysqldDatadirUsedRatioSelfMetric stands for the disk space usage of the mount where MySQL's datadir is located.
+// Range: 0.0 (empty) - 1.0 (full)
+type MysqldDatadirUsedRatioSelfMetric struct {
+}
+
+func (m *MysqldDatadirUsedRatioSelfMetric) Name() MetricName {
+ return MysqldDatadirUsedRatioMetricName
+}
+
+func (m *MysqldDatadirUsedRatioSelfMetric) DefaultScope() Scope {
+ return SelfScope
+}
+
+func (m *MysqldDatadirUsedRatioSelfMetric) DefaultThreshold() float64 {
+ return 0.98
+}
+
+func (m *MysqldDatadirUsedRatioSelfMetric) RequiresConn() bool {
+ return false
+}
+
+func (m *MysqldDatadirUsedRatioSelfMetric) Read(ctx context.Context, params *SelfMetricReadParams) *ThrottleMetric {
+ return getMysqlHostMetric(ctx, params, "datadir-used-ratio")
+}
diff --git a/go/vt/vttablet/tabletserver/throttle/base/self_metric_mysqld_test.go b/go/vt/vttablet/tabletserver/throttle/base/self_metric_mysqld_test.go
new file mode 100644
index 00000000000..39d3f3f5ec2
--- /dev/null
+++ b/go/vt/vttablet/tabletserver/throttle/base/self_metric_mysqld_test.go
@@ -0,0 +1,72 @@
+/*
+Copyright 2024 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package base
+
+import (
+ "context"
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestGetMysqlMetricsRateLimiter(t *testing.T) {
+ rateLimit := 10 * time.Millisecond
+ for i := range 3 {
+ testName := fmt.Sprintf("iteration %d", i)
+ t.Run(testName, func(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ {
+ rateLimiter := mysqlHostMetricsRateLimiter.Load()
+ assert.Nil(t, rateLimiter)
+ }
+ rateLimiter := getMysqlMetricsRateLimiter(ctx, rateLimit)
+ assert.NotNil(t, rateLimiter)
+ for range 5 {
+ r := getMysqlMetricsRateLimiter(ctx, rateLimit)
+ // Returning the same rate limiter
+ assert.Equal(t, rateLimiter, r)
+ }
+ val := 0
+ incr := func() error {
+ val++
+ return nil
+ }
+ for range 10 {
+ rateLimiter.Do(incr)
+ time.Sleep(2 * rateLimit)
+ }
+ assert.EqualValues(t, 10, val)
+ cancel()
+ // There can be a race condition where the rate limiter still emits one final tick after the context is cancelled.
+ // So we wait enough time to ensure that tick is "wasted".
+ time.Sleep(2 * rateLimit)
+ // Now that the rate limited was stopped (we invoked `cancel()`), its `Do()` should not invoke the function anymore.
+ for range 7 {
+ rateLimiter.Do(incr)
+ time.Sleep(time.Millisecond)
+ }
+ assert.EqualValues(t, 10, val) // Same "10" value as before.
+ {
+ rateLimiter := mysqlHostMetricsRateLimiter.Load()
+ assert.Nil(t, rateLimiter)
+ }
+ })
+ }
+}
diff --git a/go/vt/vttablet/tabletserver/throttle/base/self_metric_threads_running.go b/go/vt/vttablet/tabletserver/throttle/base/self_metric_threads_running.go
index 08f7d408d1c..cb59547a768 100644
--- a/go/vt/vttablet/tabletserver/throttle/base/self_metric_threads_running.go
+++ b/go/vt/vttablet/tabletserver/throttle/base/self_metric_threads_running.go
@@ -18,8 +18,6 @@ package base
import (
"context"
-
- "vitess.io/vitess/go/vt/vttablet/tabletserver/connpool"
)
var (
@@ -47,6 +45,6 @@ func (m *ThreadsRunningSelfMetric) RequiresConn() bool {
return true
}
-func (m *ThreadsRunningSelfMetric) Read(ctx context.Context, throttler ThrottlerMetricsPublisher, conn *connpool.Conn) *ThrottleMetric {
- return ReadSelfMySQLThrottleMetric(ctx, conn, threadsRunningMetricQuery)
+func (m *ThreadsRunningSelfMetric) Read(ctx context.Context, params *SelfMetricReadParams) *ThrottleMetric {
+ return ReadSelfMySQLThrottleMetric(ctx, params.Conn, threadsRunningMetricQuery)
}
diff --git a/go/vt/vttablet/tabletserver/throttle/base/throttler_metrics_publisher.go b/go/vt/vttablet/tabletserver/throttle/base/throttler_metrics_publisher.go
index 1d2d4d0652c..10020af27e6 100644
--- a/go/vt/vttablet/tabletserver/throttle/base/throttler_metrics_publisher.go
+++ b/go/vt/vttablet/tabletserver/throttle/base/throttler_metrics_publisher.go
@@ -16,8 +16,8 @@ limitations under the License.
package base
-// ThrottlerMetricsPublisher is implemented by throttler.Throttler and is used by SelfMetric
+// metricsPublisher is implemented by throttler.Throttler and is used by SelfMetric
// implementations to query the throttler.
-type ThrottlerMetricsPublisher interface {
+type metricsPublisher interface {
GetCustomMetricsQuery() string
}
diff --git a/go/vt/vttablet/tabletserver/throttle/check.go b/go/vt/vttablet/tabletserver/throttle/check.go
index ccdfcb2ce23..d7f43d85e9d 100644
--- a/go/vt/vttablet/tabletserver/throttle/check.go
+++ b/go/vt/vttablet/tabletserver/throttle/check.go
@@ -188,9 +188,9 @@ func (check *ThrottlerCheck) Check(ctx context.Context, appName string, scope ba
// Out of abundance of caution, we will protect against such a scenario.
return
}
- stats.GetOrNewCounter(fmt.Sprintf("ThrottlerCheck%s%sTotal", textutil.SingleWordCamel(metricScope.String()), textutil.SingleWordCamel(metricName.String())), "").Add(1)
+ stats.GetOrNewCounter(fmt.Sprintf("ThrottlerCheck%s%sTotal", textutil.PascalCase(metricScope.String()), textutil.PascalCase(metricName.String())), "").Add(1)
if !metricCheckResult.IsOK() {
- stats.GetOrNewCounter(fmt.Sprintf("ThrottlerCheck%s%sError", textutil.SingleWordCamel(metricScope.String()), textutil.SingleWordCamel(metricName.String())), "").Add(1)
+ stats.GetOrNewCounter(fmt.Sprintf("ThrottlerCheck%s%sError", textutil.PascalCase(metricScope.String()), textutil.PascalCase(metricName.String())), "").Add(1)
}
}(metricCheckResult)
}
@@ -249,7 +249,7 @@ func (check *ThrottlerCheck) localCheck(ctx context.Context, aggregatedMetricNam
check.throttler.markMetricHealthy(aggregatedMetricName)
}
if timeSinceHealthy, found := check.throttler.timeSinceMetricHealthy(aggregatedMetricName); found {
- go stats.GetOrNewGauge(fmt.Sprintf("ThrottlerCheck%sSecondsSinceHealthy", textutil.SingleWordCamel(scope.String())), fmt.Sprintf("seconds since last healthy check for %v", scope)).Set(int64(timeSinceHealthy.Seconds()))
+ go stats.GetOrNewGauge(fmt.Sprintf("ThrottlerCheck%sSecondsSinceHealthy", textutil.PascalCase(scope.String())), fmt.Sprintf("seconds since last healthy check for %v", scope)).Set(int64(timeSinceHealthy.Seconds()))
}
return checkResult
@@ -261,7 +261,7 @@ func (check *ThrottlerCheck) reportAggregated(aggregatedMetricName string, metri
return
}
if value, err := metricResult.Get(); err == nil {
- stats.GetOrNewGaugeFloat64(fmt.Sprintf("ThrottlerAggregated%s%s", textutil.SingleWordCamel(scope.String()), textutil.SingleWordCamel(metricName.String())), fmt.Sprintf("aggregated value for %v", scope)).Set(value)
+ stats.GetOrNewGaugeFloat64(fmt.Sprintf("ThrottlerAggregated%s%s", textutil.PascalCase(scope.String()), textutil.PascalCase(metricName.String())), fmt.Sprintf("aggregated value for %v", scope)).Set(value)
}
}
diff --git a/go/vt/vttablet/tabletserver/throttle/throttler.go b/go/vt/vttablet/tabletserver/throttle/throttler.go
index af7f59abb7e..839ba9d43b8 100644
--- a/go/vt/vttablet/tabletserver/throttle/throttler.go
+++ b/go/vt/vttablet/tabletserver/throttle/throttler.go
@@ -95,7 +95,6 @@ const (
DefaultThrottleRatio = 1.0
defaultReplicationLagQuery = "select unix_timestamp(now(6))-max(ts/1000000000) as replication_lag from %s.heartbeat"
- threadsRunningQuery = "show global status like 'threads_running'"
inventoryPrefix = "inventory/"
throttlerConfigPrefix = "config/"
@@ -137,6 +136,7 @@ type Throttler struct {
keyspace string
shard string
tabletAlias *topodatapb.TabletAlias
+ tabletInfo atomic.Pointer[topo.TabletInfo]
check *ThrottlerCheck
isEnabled atomic.Bool
@@ -190,7 +190,7 @@ type Throttler struct {
cancelEnableContext context.CancelFunc
throttledAppsMutex sync.Mutex
- readSelfThrottleMetrics func(context.Context) base.ThrottleMetrics // overwritten by unit test
+ readSelfThrottleMetrics func(context.Context, tmclient.TabletManagerClient) base.ThrottleMetrics // overwritten by unit test
}
// ThrottlerStatus published some status values from the throttler
@@ -262,8 +262,8 @@ func NewThrottler(env tabletenv.Env, srvTopoServer srvtopo.Server, ts *topo.Serv
}
throttler.StoreMetricsThreshold(base.RegisteredSelfMetrics[base.LagMetricName].DefaultThreshold())
- throttler.readSelfThrottleMetrics = func(ctx context.Context) base.ThrottleMetrics {
- return throttler.readSelfThrottleMetricsInternal(ctx)
+ throttler.readSelfThrottleMetrics = func(ctx context.Context, tmClient tmclient.TabletManagerClient) base.ThrottleMetrics {
+ return throttler.readSelfThrottleMetricsInternal(ctx, tmClient)
}
return throttler
}
@@ -338,6 +338,15 @@ func (throttler *Throttler) initConfig() {
// readThrottlerConfig proactively reads the throttler's config from SrvKeyspace in local topo
func (throttler *Throttler) readThrottlerConfig(ctx context.Context) (*topodatapb.ThrottlerConfig, error) {
+ // since we're reading from topo, let's seize this opportunity to read table info as well
+ if throttler.tabletInfo.Load() == nil {
+ if ti, err := throttler.ts.GetTablet(ctx, throttler.tabletAlias); err == nil {
+ throttler.tabletInfo.Store(ti)
+ } else {
+ log.Errorf("Throttler: error reading tablet info: %v", err)
+ }
+ }
+
srvks, err := throttler.ts.GetSrvKeyspace(ctx, throttler.tabletAlias.Cell, throttler.keyspace)
if err != nil {
return nil, err
@@ -804,7 +813,7 @@ func (throttler *Throttler) Operate(ctx context.Context, wg *sync.WaitGroup) {
if throttler.IsOpen() {
// frequent
// Always collect self metrics:
- throttler.collectSelfMetrics(ctx)
+ throttler.collectSelfMetrics(ctx, tmClient)
if !throttler.isDormant() {
throttler.collectShardMetrics(ctx, tmClient)
}
@@ -869,7 +878,7 @@ func (throttler *Throttler) Operate(ctx context.Context, wg *sync.WaitGroup) {
}()
}
-func (throttler *Throttler) generateTabletProbeFunction(scope base.Scope, tmClient tmclient.TabletManagerClient, probe *base.Probe) (probeFunc func(context.Context) base.ThrottleMetrics) {
+func (throttler *Throttler) generateTabletProbeFunction(scope base.Scope, probe *base.Probe) (probeFunc func(context.Context, tmclient.TabletManagerClient) base.ThrottleMetrics) {
metricsWithError := func(err error) base.ThrottleMetrics {
metrics := base.ThrottleMetrics{}
for _, metricName := range base.KnownMetricNames {
@@ -882,7 +891,7 @@ func (throttler *Throttler) generateTabletProbeFunction(scope base.Scope, tmClie
}
return metrics
}
- return func(ctx context.Context) base.ThrottleMetrics {
+ return func(ctx context.Context, tmClient tmclient.TabletManagerClient) base.ThrottleMetrics {
// Some reasonable timeout, to ensure we release connections even if they're hanging (otherwise grpc-go keeps polling those connections forever)
ctx, cancel := context.WithTimeout(ctx, 4*activeCollectInterval)
defer cancel()
@@ -940,7 +949,7 @@ func (throttler *Throttler) generateTabletProbeFunction(scope base.Scope, tmClie
// readSelfThrottleMetricsInternal rreads all registsred self metrics on this tablet (or backend MySQL server).
// This is the actual place where metrics are read, to be later aggregated and/or propagated to other tablets.
-func (throttler *Throttler) readSelfThrottleMetricsInternal(ctx context.Context) base.ThrottleMetrics {
+func (throttler *Throttler) readSelfThrottleMetricsInternal(ctx context.Context, tmClient tmclient.TabletManagerClient) base.ThrottleMetrics {
result := make(base.ThrottleMetrics, len(base.RegisteredSelfMetrics))
writeMetric := func(metric *base.ThrottleMetric) {
select {
@@ -950,15 +959,20 @@ func (throttler *Throttler) readSelfThrottleMetricsInternal(ctx context.Context)
}
}
readMetric := func(selfMetric base.SelfMetric) *base.ThrottleMetric {
- if !selfMetric.RequiresConn() {
- return selfMetric.Read(ctx, throttler, nil)
+ params := &base.SelfMetricReadParams{
+ Throttler: throttler,
+ TmClient: tmClient,
+ TabletInfo: throttler.tabletInfo.Load(),
}
- conn, err := throttler.pool.Get(ctx, nil)
- if err != nil {
- return &base.ThrottleMetric{Err: err}
+ if selfMetric.RequiresConn() {
+ conn, err := throttler.pool.Get(ctx, nil)
+ if err != nil {
+ return &base.ThrottleMetric{Err: err}
+ }
+ defer conn.Recycle()
+ params.Conn = conn.Conn
}
- defer conn.Recycle()
- return selfMetric.Read(ctx, throttler, conn.Conn)
+ return selfMetric.Read(ctx, params)
}
for metricName, selfMetric := range base.RegisteredSelfMetrics {
if metricName == base.DefaultMetricName {
@@ -975,7 +989,7 @@ func (throttler *Throttler) readSelfThrottleMetricsInternal(ctx context.Context)
return result
}
-func (throttler *Throttler) collectSelfMetrics(ctx context.Context) {
+func (throttler *Throttler) collectSelfMetrics(ctx context.Context, tmClient tmclient.TabletManagerClient) {
probe := throttler.inventory.ClustersProbes[throttler.tabletAliasString()]
if probe == nil {
// probe not created yet
@@ -990,7 +1004,7 @@ func (throttler *Throttler) collectSelfMetrics(ctx context.Context) {
defer atomic.StoreInt64(&probe.QueryInProgress, 0)
// Throttler is probing its own tablet's metrics:
- _ = base.ReadThrottleMetrics(ctx, probe, throttler.readSelfThrottleMetrics)
+ _ = base.ReadThrottleMetrics(ctx, probe, tmClient, throttler.readSelfThrottleMetrics)
}()
}
@@ -1011,9 +1025,9 @@ func (throttler *Throttler) collectShardMetrics(ctx context.Context, tmClient tm
defer atomic.StoreInt64(&probe.QueryInProgress, 0)
// Throttler probing other tablets:
- throttleMetricFunc := throttler.generateTabletProbeFunction(base.ShardScope, tmClient, probe)
+ throttleMetricFunc := throttler.generateTabletProbeFunction(base.ShardScope, probe)
- throttleMetrics := base.ReadThrottleMetrics(ctx, probe, throttleMetricFunc)
+ throttleMetrics := base.ReadThrottleMetrics(ctx, probe, tmClient, throttleMetricFunc)
for _, metric := range throttleMetrics {
select {
case <-ctx.Done():
diff --git a/go/vt/vttablet/tabletserver/throttle/throttler_test.go b/go/vt/vttablet/tabletserver/throttle/throttler_test.go
index fd7921899da..352e641fa35 100644
--- a/go/vt/vttablet/tabletserver/throttle/throttler_test.go
+++ b/go/vt/vttablet/tabletserver/throttle/throttler_test.go
@@ -71,6 +71,24 @@ var (
Value: 2.718,
Err: nil,
},
+ base.HistoryListLengthMetricName: &base.ThrottleMetric{
+ Scope: base.SelfScope,
+ Alias: "",
+ Value: 5,
+ Err: nil,
+ },
+ base.MysqldLoadAvgMetricName: &base.ThrottleMetric{
+ Scope: base.SelfScope,
+ Alias: "",
+ Value: 0.3311,
+ Err: nil,
+ },
+ base.MysqldDatadirUsedRatioMetricName: &base.ThrottleMetric{
+ Scope: base.SelfScope,
+ Alias: "",
+ Value: 0.85,
+ Err: nil,
+ },
}
replicaMetrics = map[string]*MetricResult{
base.LagMetricName.String(): {
@@ -93,6 +111,21 @@ var (
ResponseCode: tabletmanagerdatapb.CheckThrottlerResponseCode_OK,
Value: 5.1,
},
+ base.HistoryListLengthMetricName.String(): {
+ StatusCode: http.StatusOK,
+ ResponseCode: tabletmanagerdatapb.CheckThrottlerResponseCode_OK,
+ Value: 6,
+ },
+ base.MysqldLoadAvgMetricName.String(): {
+ StatusCode: http.StatusOK,
+ ResponseCode: tabletmanagerdatapb.CheckThrottlerResponseCode_OK,
+ Value: 0.2211,
+ },
+ base.MysqldDatadirUsedRatioMetricName.String(): {
+ StatusCode: http.StatusOK,
+ ResponseCode: tabletmanagerdatapb.CheckThrottlerResponseCode_OK,
+ Value: 0.87,
+ },
}
nonPrimaryTabletType atomic.Int32
)
@@ -283,7 +316,7 @@ func newTestThrottler() *Throttler {
throttler.recentCheckDormantDiff = int64(throttler.dormantPeriod / recentCheckRateLimiterInterval)
throttler.recentCheckDiff = int64(3 * time.Second / recentCheckRateLimiterInterval)
- throttler.readSelfThrottleMetrics = func(ctx context.Context) base.ThrottleMetrics {
+ throttler.readSelfThrottleMetrics = func(ctx context.Context, tmClient tmclient.TabletManagerClient) base.ThrottleMetrics {
for _, metric := range selfMetrics {
go func() {
select {
@@ -1827,10 +1860,13 @@ func TestChecks(t *testing.T) {
assert.Equal(t, testAppName.String(), checkResult.AppName)
assert.Equal(t, len(base.KnownMetricNames), len(checkResult.Metrics))
- assert.EqualValues(t, 0.3, checkResult.Metrics[base.LagMetricName.String()].Value) // self lag value, because flags.Scope is set
- assert.EqualValues(t, 26, checkResult.Metrics[base.ThreadsRunningMetricName.String()].Value) // self value, because flags.Scope is set
- assert.EqualValues(t, 17, checkResult.Metrics[base.CustomMetricName.String()].Value) // self value, because flags.Scope is set
- assert.EqualValues(t, 2.718, checkResult.Metrics[base.LoadAvgMetricName.String()].Value) // self value, because flags.Scope is set
+ assert.EqualValues(t, 0.3, checkResult.Metrics[base.LagMetricName.String()].Value) // self lag value, because flags.Scope is set
+ assert.EqualValues(t, 26, checkResult.Metrics[base.ThreadsRunningMetricName.String()].Value) // self value, because flags.Scope is set
+ assert.EqualValues(t, 17, checkResult.Metrics[base.CustomMetricName.String()].Value) // self value, because flags.Scope is set
+ assert.EqualValues(t, 2.718, checkResult.Metrics[base.LoadAvgMetricName.String()].Value) // self value, because flags.Scope is set
+ assert.EqualValues(t, 5, checkResult.Metrics[base.HistoryListLengthMetricName.String()].Value) // self value, because flags.Scope is set
+ assert.EqualValues(t, 0.3311, checkResult.Metrics[base.MysqldLoadAvgMetricName.String()].Value) // self value, because flags.Scope is set
+ assert.EqualValues(t, 0.85, checkResult.Metrics[base.MysqldDatadirUsedRatioMetricName.String()].Value) // self value, because flags.Scope is set
for _, metric := range checkResult.Metrics {
assert.EqualValues(t, base.SelfScope.String(), metric.Scope)
}
@@ -1886,10 +1922,13 @@ func TestChecks(t *testing.T) {
assert.Equal(t, testAppName.String(), checkResult.AppName)
assert.Equal(t, len(base.KnownMetricNames), len(checkResult.Metrics))
- assert.EqualValues(t, 0.9, checkResult.Metrics[base.LagMetricName.String()].Value) // shard lag value, because flags.Scope is set
- assert.EqualValues(t, 26, checkResult.Metrics[base.ThreadsRunningMetricName.String()].Value) // shard value, because flags.Scope is set
- assert.EqualValues(t, 17, checkResult.Metrics[base.CustomMetricName.String()].Value) // shard value, because flags.Scope is set
- assert.EqualValues(t, 5.1, checkResult.Metrics[base.LoadAvgMetricName.String()].Value) // shard value, because flags.Scope is set
+ assert.EqualValues(t, 0.9, checkResult.Metrics[base.LagMetricName.String()].Value) // shard lag value, because flags.Scope is set
+ assert.EqualValues(t, 26, checkResult.Metrics[base.ThreadsRunningMetricName.String()].Value) // shard value, because flags.Scope is set
+ assert.EqualValues(t, 17, checkResult.Metrics[base.CustomMetricName.String()].Value) // shard value, because flags.Scope is set
+ assert.EqualValues(t, 5.1, checkResult.Metrics[base.LoadAvgMetricName.String()].Value) // shard value, because flags.Scope is set
+ assert.EqualValues(t, 6, checkResult.Metrics[base.HistoryListLengthMetricName.String()].Value) // shard value, because flags.Scope is set
+ assert.EqualValues(t, 0.3311, checkResult.Metrics[base.MysqldLoadAvgMetricName.String()].Value) // shard value, because flags.Scope is set
+ assert.EqualValues(t, 0.87, checkResult.Metrics[base.MysqldDatadirUsedRatioMetricName.String()].Value) // shard value, because flags.Scope is set
for _, metric := range checkResult.Metrics {
assert.EqualValues(t, base.ShardScope.String(), metric.Scope)
}
@@ -1918,14 +1957,19 @@ func TestChecks(t *testing.T) {
assert.ErrorIs(t, checkResult.Error, base.ErrThresholdExceeded)
assert.Equal(t, len(base.KnownMetricNames), len(checkResult.Metrics))
- assert.EqualValues(t, 0.9, checkResult.Metrics[base.LagMetricName.String()].Value) // shard lag value, because "shard" is the default scope for lag
- assert.EqualValues(t, 26, checkResult.Metrics[base.ThreadsRunningMetricName.String()].Value) // self value, because "self" is the default scope for threads_running
- assert.EqualValues(t, 17, checkResult.Metrics[base.CustomMetricName.String()].Value) // self value, because "self" is the default scope for custom
- assert.EqualValues(t, 2.718, checkResult.Metrics[base.LoadAvgMetricName.String()].Value) // self value, because "self" is the default scope for loadavg
+ assert.EqualValues(t, 0.9, checkResult.Metrics[base.LagMetricName.String()].Value) // shard lag value, because "shard" is the default scope for lag
+ assert.EqualValues(t, 26, checkResult.Metrics[base.ThreadsRunningMetricName.String()].Value) // self value, because "self" is the default scope for threads_running
+ assert.EqualValues(t, 17, checkResult.Metrics[base.CustomMetricName.String()].Value) // self value, because "self" is the default scope for custom
+ assert.EqualValues(t, 2.718, checkResult.Metrics[base.LoadAvgMetricName.String()].Value) // self value, because "self" is the default scope for loadavg
+ assert.EqualValues(t, 5, checkResult.Metrics[base.HistoryListLengthMetricName.String()].Value) // self value, because "self" is the default scope for loadavg
+ assert.EqualValues(t, 0.3311, checkResult.Metrics[base.MysqldLoadAvgMetricName.String()].Value) // self value, because "self" is the default scope for loadavg
+ assert.EqualValues(t, 0.85, checkResult.Metrics[base.MysqldDatadirUsedRatioMetricName.String()].Value) // self value, because "self" is the default scope for loadavg
assert.EqualValues(t, base.ShardScope.String(), checkResult.Metrics[base.LagMetricName.String()].Scope)
assert.EqualValues(t, base.SelfScope.String(), checkResult.Metrics[base.ThreadsRunningMetricName.String()].Scope)
assert.EqualValues(t, base.SelfScope.String(), checkResult.Metrics[base.CustomMetricName.String()].Scope)
assert.EqualValues(t, base.SelfScope.String(), checkResult.Metrics[base.LoadAvgMetricName.String()].Scope)
+ assert.EqualValues(t, base.SelfScope.String(), checkResult.Metrics[base.MysqldLoadAvgMetricName.String()].Scope)
+ assert.EqualValues(t, base.SelfScope.String(), checkResult.Metrics[base.MysqldDatadirUsedRatioMetricName.String()].Scope)
})
})
t.Run("checks, defined scope masks explicit scope metrics", func(t *testing.T) {
@@ -1939,6 +1983,9 @@ func TestChecks(t *testing.T) {
base.MetricName("self/threads_running"),
base.MetricName("custom"),
base.MetricName("shard/loadavg"),
+ base.MetricName("shard/mysqld-loadavg"),
+ base.MetricName("self/history_list_length"),
+ base.MetricName("self/mysqld-datadir-used-ratio"),
base.MetricName("default"),
}
checkResult := throttler.Check(ctx, testAppName.String(), metricNames, flags)
@@ -1950,10 +1997,13 @@ func TestChecks(t *testing.T) {
assert.ErrorIs(t, checkResult.Error, base.ErrThresholdExceeded)
assert.Equal(t, len(metricNames), len(checkResult.Metrics))
- assert.EqualValues(t, 0.9, checkResult.Metrics[base.LagMetricName.String()].Value) // shard lag value, even though scope name is in metric name
- assert.EqualValues(t, 26, checkResult.Metrics[base.ThreadsRunningMetricName.String()].Value) // shard value, even though scope name is in metric name
- assert.EqualValues(t, 17, checkResult.Metrics[base.CustomMetricName.String()].Value) // shard value because flags.Scope is set
- assert.EqualValues(t, 5.1, checkResult.Metrics[base.LoadAvgMetricName.String()].Value) // shard value, not because scope name is in metric name but because flags.Scope is set
+ assert.EqualValues(t, 0.9, checkResult.Metrics[base.LagMetricName.String()].Value) // shard lag value, even though scope name is in metric name
+ assert.EqualValues(t, 26, checkResult.Metrics[base.ThreadsRunningMetricName.String()].Value) // shard value, even though scope name is in metric name
+ assert.EqualValues(t, 17, checkResult.Metrics[base.CustomMetricName.String()].Value) // shard value because flags.Scope is set
+ assert.EqualValues(t, 5.1, checkResult.Metrics[base.LoadAvgMetricName.String()].Value) // shard value, not because scope name is in metric name but because flags.Scope is set
+ assert.EqualValues(t, 6, checkResult.Metrics[base.HistoryListLengthMetricName.String()].Value) // shard value, even though scope name is in metric name
+ assert.EqualValues(t, 0.3311, checkResult.Metrics[base.MysqldLoadAvgMetricName.String()].Value) // shard value, not because scope name is in metric name but because flags.Scope is set
+ assert.EqualValues(t, 0.87, checkResult.Metrics[base.MysqldDatadirUsedRatioMetricName.String()].Value) // shard value, even though scope name is in metric name
for _, metric := range checkResult.Metrics {
assert.EqualValues(t, base.ShardScope.String(), metric.Scope)
}
@@ -2222,8 +2272,13 @@ func TestReplica(t *testing.T) {
base.DefaultMetricName:
assert.Error(t, metricResult.Error, "metricName=%v, value=%v, threshold=%v", metricName, metricResult.Value, metricResult.Threshold)
assert.ErrorIs(t, metricResult.Error, base.ErrThresholdExceeded)
- case base.ThreadsRunningMetricName:
+ case base.ThreadsRunningMetricName,
+ base.HistoryListLengthMetricName,
+ base.MysqldLoadAvgMetricName,
+ base.MysqldDatadirUsedRatioMetricName:
assert.NoError(t, metricResult.Error, "metricName=%v, value=%v, threshold=%v", metricName, metricResult.Value, metricResult.Threshold)
+ default:
+ assert.Fail(t, "unexpected metric", "name=%v", metricName)
}
}
})
diff --git a/go/vt/vttablet/tabletserver/tx_engine.go b/go/vt/vttablet/tabletserver/tx_engine.go
index b6e0e69b86d..d0ca4ec498c 100644
--- a/go/vt/vttablet/tabletserver/tx_engine.go
+++ b/go/vt/vttablet/tabletserver/tx_engine.go
@@ -116,20 +116,19 @@ func NewTxEngine(env tabletenv.Env, dxNotifier func()) *TxEngine {
te.txPool = NewTxPool(env, limiter)
// We initially allow twoPC (handles vttablet restarts).
// We will disallow them for a few reasons -
- // 1. when a new tablet is promoted if semi-sync is turned off.
+ // 1. When a new tablet is promoted if semi-sync is turned off.
// 2. TabletControls have been set by a Resharding workflow.
te.twopcAllowed = make([]bool, TwoPCAllowed_Len)
for idx := range te.twopcAllowed {
te.twopcAllowed[idx] = true
}
- te.twopcEnabled = config.TwoPCEnable
- if te.twopcEnabled {
- if config.TwoPCAbandonAge <= 0 {
- log.Error("2PC abandon age not specified: Disabling 2PC")
- te.twopcEnabled = false
- }
+ te.twopcEnabled = true
+ if config.TwoPCAbandonAge <= 0 {
+ log.Error("2PC abandon age not specified: Disabling 2PC")
+ te.twopcEnabled = false
}
- te.abandonAge = config.TwoPCAbandonAge.Get()
+
+ te.abandonAge = config.TwoPCAbandonAge
te.ticks = timer.NewTimer(te.abandonAge / 2)
// Set the prepared pool capacity to something lower than
diff --git a/go/vt/vttablet/tabletserver/tx_engine_test.go b/go/vt/vttablet/tabletserver/tx_engine_test.go
index f4dd0596691..cba7bf86e8f 100644
--- a/go/vt/vttablet/tabletserver/tx_engine_test.go
+++ b/go/vt/vttablet/tabletserver/tx_engine_test.go
@@ -613,8 +613,7 @@ func TestCheckReceivedError(t *testing.T) {
cfg := tabletenv.NewDefaultConfig()
cfg.DB = newDBConfigs(db)
env := tabletenv.NewEnv(vtenv.NewTestEnv(), cfg, "TabletServerTest")
- env.Config().TwoPCEnable = true
- env.Config().TwoPCAbandonAge = 5
+ env.Config().TwoPCAbandonAge = 5 * time.Second
te := NewTxEngine(env, nil)
te.AcceptReadWrite()
@@ -791,8 +790,7 @@ func TestPrepareTx(t *testing.T) {
db.AddRejectedQuery("retryable query", sqlerror.NewSQLError(sqlerror.CRConnectionError, "", "Retryable error"))
cfg := tabletenv.NewDefaultConfig()
cfg.DB = newDBConfigs(db)
- cfg.TwoPCEnable = true
- cfg.TwoPCAbandonAge = 200
+ cfg.TwoPCAbandonAge = 200 * time.Second
te := NewTxEngine(tabletenv.NewEnv(vtenv.NewTestEnv(), cfg, "TabletServerTest"), nil)
te.AcceptReadWrite()
db.ResetQueryLog()
diff --git a/go/vt/vttablet/tabletserver/tx_pool.go b/go/vt/vttablet/tabletserver/tx_pool.go
index ca8a0ea34b2..302a3d41050 100644
--- a/go/vt/vttablet/tabletserver/tx_pool.go
+++ b/go/vt/vttablet/tabletserver/tx_pool.go
@@ -40,9 +40,8 @@ import (
)
const (
- txLogInterval = 1 * time.Minute
- beginWithCSRO = "start transaction with consistent snapshot, read only"
- trackGtidQuery = "set session session_track_gtids = START_GTID"
+ txLogInterval = 1 * time.Minute
+ beginWithCSRO = "start transaction with consistent snapshot, read only"
)
var txIsolations = map[querypb.ExecuteOptions_TransactionIsolation]string{
@@ -394,16 +393,6 @@ func createStartTxStmt(options *querypb.ExecuteOptions, readOnly bool) (string,
}
func handleConsistentSnapshotCase(ctx context.Context, conn *StatefulConnection) (beginSQL string, sessionStateChanges string, err error) {
- _, err = conn.execWithRetry(ctx, trackGtidQuery, 1, false)
- // We allow this to fail since this is a custom MySQL extension, but we return
- // then if this query was executed or not.
- //
- // Callers also can know because the sessionStateChanges will be empty for a snapshot
- // transaction and get GTID information in another (less efficient) way.
- if err == nil {
- beginSQL = trackGtidQuery + "; "
- }
-
isolationLevel := txIsolations[querypb.ExecuteOptions_CONSISTENT_SNAPSHOT_READ_ONLY]
execSQL, err := setIsolationLevel(ctx, conn, isolationLevel)
diff --git a/go/vt/vttablet/tabletserver/tx_pool_test.go b/go/vt/vttablet/tabletserver/tx_pool_test.go
index c03cac92878..22810d4c422 100644
--- a/go/vt/vttablet/tabletserver/tx_pool_test.go
+++ b/go/vt/vttablet/tabletserver/tx_pool_test.go
@@ -701,11 +701,11 @@ func TestTxPoolBeginStatements(t *testing.T) {
expBeginSQL: "set transaction isolation level serializable; start transaction read only",
}, {
txIsolationLevel: querypb.ExecuteOptions_CONSISTENT_SNAPSHOT_READ_ONLY,
- expBeginSQL: "set session session_track_gtids = START_GTID; set transaction isolation level repeatable read; start transaction with consistent snapshot, read only",
+ expBeginSQL: "set transaction isolation level repeatable read; start transaction with consistent snapshot, read only",
}, {
txIsolationLevel: querypb.ExecuteOptions_CONSISTENT_SNAPSHOT_READ_ONLY,
readOnly: true,
- expBeginSQL: "set session session_track_gtids = START_GTID; set transaction isolation level repeatable read; start transaction with consistent snapshot, read only",
+ expBeginSQL: "set transaction isolation level repeatable read; start transaction with consistent snapshot, read only",
}, {
txIsolationLevel: querypb.ExecuteOptions_AUTOCOMMIT,
expBeginSQL: "",
diff --git a/go/vt/vttablet/tabletserver/vstreamer/planbuilder.go b/go/vt/vttablet/tabletserver/vstreamer/planbuilder.go
index 9bbc98ca2bd..e5115afe6d3 100644
--- a/go/vt/vttablet/tabletserver/vstreamer/planbuilder.go
+++ b/go/vt/vttablet/tabletserver/vstreamer/planbuilder.go
@@ -89,6 +89,8 @@ const (
NotEqual
// IsNotNull is used to filter a column if it is NULL
IsNotNull
+ // In is used to filter a comparable column if equals any of the values from a specific tuple
+ In
)
// Filter contains opcodes for filtering.
@@ -97,6 +99,9 @@ type Filter struct {
ColNum int
Value sqltypes.Value
+ // Values will be used to store tuple/list values.
+ Values []sqltypes.Value
+
// Parameters for VindexMatch.
// Vindex, VindexColumns and KeyRange, if set, will be used
// to filter the row.
@@ -166,6 +171,8 @@ func getOpcode(comparison *sqlparser.ComparisonExpr) (Opcode, error) {
opcode = GreaterThanEqual
case sqlparser.NotEqualOp:
opcode = NotEqual
+ case sqlparser.InOp:
+ opcode = In
default:
return -1, fmt.Errorf("comparison operator %s not supported", comparison.Operator.ToString())
}
@@ -238,6 +245,24 @@ func (plan *Plan) filter(values, result []sqltypes.Value, charsets []collations.
if values[filter.ColNum].IsNull() {
return false, nil
}
+ case In:
+ if filter.Values == nil {
+ return false, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected empty filter values when performing IN operator")
+ }
+ found := false
+ for _, filterValue := range filter.Values {
+ match, err := compare(Equal, values[filter.ColNum], filterValue, plan.env.CollationEnv(), charsets[filter.ColNum])
+ if err != nil {
+ return false, err
+ }
+ if match {
+ found = true
+ break
+ }
+ }
+ if !found {
+ return false, nil
+ }
default:
match, err := compare(filter.Opcode, values[filter.ColNum], filter.Value, plan.env.CollationEnv(), charsets[filter.ColNum])
if err != nil {
@@ -514,6 +539,27 @@ func (plan *Plan) getColumnFuncExpr(columnName string) *sqlparser.FuncExpr {
return nil
}
+func (plan *Plan) appendTupleFilter(values sqlparser.ValTuple, opcode Opcode, colnum int) error {
+ pv, err := evalengine.Translate(values, &evalengine.Config{
+ Collation: plan.env.CollationEnv().DefaultConnectionCharset(),
+ Environment: plan.env,
+ })
+ if err != nil {
+ return err
+ }
+ env := evalengine.EmptyExpressionEnv(plan.env)
+ resolved, err := env.Evaluate(pv)
+ if err != nil {
+ return err
+ }
+ plan.Filters = append(plan.Filters, Filter{
+ Opcode: opcode,
+ ColNum: colnum,
+ Values: resolved.TupleValues(),
+ })
+ return nil
+}
+
func (plan *Plan) analyzeWhere(vschema *localVSchema, where *sqlparser.Where) error {
if where == nil {
return nil
@@ -537,6 +583,20 @@ func (plan *Plan) analyzeWhere(vschema *localVSchema, where *sqlparser.Where) er
if err != nil {
return err
}
+ // The Right Expr is typically expected to be a Literal value,
+ // except for the IN operator, where a Tuple value is expected.
+ // Handle the IN operator case first.
+ if opcode == In {
+ values, ok := expr.Right.(sqlparser.ValTuple)
+ if !ok {
+ return fmt.Errorf("unexpected: %v", sqlparser.String(expr))
+ }
+ err := plan.appendTupleFilter(values, opcode, colnum)
+ if err != nil {
+ return err
+ }
+ continue
+ }
val, ok := expr.Right.(*sqlparser.Literal)
if !ok {
return fmt.Errorf("unexpected: %v", sqlparser.String(expr))
diff --git a/go/vt/vttablet/tabletserver/vstreamer/planbuilder_test.go b/go/vt/vttablet/tabletserver/vstreamer/planbuilder_test.go
index ba345b2a00b..aba74368802 100644
--- a/go/vt/vttablet/tabletserver/vstreamer/planbuilder_test.go
+++ b/go/vt/vttablet/tabletserver/vstreamer/planbuilder_test.go
@@ -710,9 +710,15 @@ func TestPlanBuilderFilterComparison(t *testing.T) {
outFilters: []Filter{{Opcode: LessThan, ColNum: 0, Value: sqltypes.NewInt64(2)},
{Opcode: LessThanEqual, ColNum: 1, Value: sqltypes.NewVarChar("xyz")},
},
+ }, {
+ name: "in-operator",
+ inFilter: "select * from t1 where id in (1, 2)",
+ outFilters: []Filter{
+ {Opcode: In, ColNum: 0, Values: []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}},
+ },
}, {
name: "vindex-and-operators",
- inFilter: "select * from t1 where in_keyrange(id, 'hash', '-80') and id = 2 and val <> 'xyz'",
+ inFilter: "select * from t1 where in_keyrange(id, 'hash', '-80') and id = 2 and val <> 'xyz' and id in (100, 30)",
outFilters: []Filter{
{
Opcode: VindexMatch,
@@ -727,6 +733,7 @@ func TestPlanBuilderFilterComparison(t *testing.T) {
},
{Opcode: Equal, ColNum: 0, Value: sqltypes.NewInt64(2)},
{Opcode: NotEqual, ColNum: 1, Value: sqltypes.NewVarChar("xyz")},
+ {Opcode: In, ColNum: 0, Values: []sqltypes.Value{sqltypes.NewInt64(100), sqltypes.NewInt64(30)}},
},
}}
diff --git a/go/vt/vttablet/tabletserver/vstreamer/snapshot_conn.go b/go/vt/vttablet/tabletserver/vstreamer/snapshot_conn.go
index ec326cc4159..ee141ce9859 100644
--- a/go/vt/vttablet/tabletserver/vstreamer/snapshot_conn.go
+++ b/go/vt/vttablet/tabletserver/vstreamer/snapshot_conn.go
@@ -132,29 +132,6 @@ func (conn *snapshotConn) startSnapshot(ctx context.Context, table string) (gtid
return replication.EncodePosition(mpos), nil
}
-// startSnapshotWithConsistentGTID performs the snapshotting without locking tables. This assumes
-// session_track_gtids = START_GTID, which is a contribution to MySQL and is not in vanilla MySQL at the
-// time of this writing.
-func (conn *snapshotConn) startSnapshotWithConsistentGTID(ctx context.Context) (gtid string, err error) {
- if _, err := conn.ExecuteFetch("set transaction isolation level repeatable read", 1, false); err != nil {
- return "", err
- }
- result, err := conn.ExecuteFetch("start transaction with consistent snapshot, read only", 1, false)
- if err != nil {
- return "", err
- }
- // The "session_track_gtids = START_GTID" patch is only applicable to MySQL56 GTID, which is
- // why we hardcode the position as mysql.Mysql56FlavorID
- mpos, err := replication.ParsePosition(replication.Mysql56FlavorID, result.SessionStateChanges)
- if err != nil {
- return "", err
- }
- if _, err := conn.ExecuteFetch("set @@session.time_zone = '+00:00'", 1, false); err != nil {
- return "", err
- }
- return replication.EncodePosition(mpos), nil
-}
-
// Close rolls back any open transactions and closes the connection.
func (conn *snapshotConn) Close() {
_, _ = conn.ExecuteFetch("rollback", 1, false)
diff --git a/go/vt/vttablet/tabletserver/vstreamer/vstreamer.go b/go/vt/vttablet/tabletserver/vstreamer/vstreamer.go
index ea7f75cdc38..59db723ff2b 100644
--- a/go/vt/vttablet/tabletserver/vstreamer/vstreamer.go
+++ b/go/vt/vttablet/tabletserver/vstreamer/vstreamer.go
@@ -375,7 +375,7 @@ func (vs *vstreamer) parseEvents(ctx context.Context, events <-chan mysql.Binlog
}
return fmt.Errorf("unexpected server EOF")
}
- vevents, err := vs.parseEvent(ev)
+ vevents, err := vs.parseEvent(ev, bufferAndTransmit)
if err != nil {
vs.vse.errorCounts.Add("ParseEvent", 1)
return err
@@ -416,7 +416,11 @@ func (vs *vstreamer) parseEvents(ctx context.Context, events <-chan mysql.Binlog
}
// parseEvent parses an event from the binlog and converts it to a list of VEvents.
-func (vs *vstreamer) parseEvent(ev mysql.BinlogEvent) ([]*binlogdatapb.VEvent, error) {
+// The bufferAndTransmit function must be passed if the event is a TransactionPayloadEvent
+// as for larger payloads (> ZstdInMemoryDecompressorMaxSize) the internal events need
+// to be streamed directly here in order to avoid holding the entire payload's contents,
+// which can be 10s or even 100s of GiBs, all in memory.
+func (vs *vstreamer) parseEvent(ev mysql.BinlogEvent, bufferAndTransmit func(vevent *binlogdatapb.VEvent) error) ([]*binlogdatapb.VEvent, error) {
if !ev.IsValid() {
return nil, fmt.Errorf("can't parse binlog event: invalid data: %#v", ev)
}
@@ -672,11 +676,31 @@ func (vs *vstreamer) parseEvent(ev mysql.BinlogEvent) ([]*binlogdatapb.VEvent, e
}
return nil, err
}
- tpvevents, err := vs.parseEvent(tpevent)
+ tpvevents, err := vs.parseEvent(tpevent, nil) // Parse the internal event
if err != nil {
return nil, vterrors.Wrap(err, "failed to parse transaction payload's internal event")
}
- vevents = append(vevents, tpvevents...)
+ if tp.StreamingContents {
+ // Transmit each internal event individually to avoid buffering
+ // the large transaction's entire payload of events in memory, as
+ // the uncompressed size can be 10s or even 100s of GiBs in size.
+ if bufferAndTransmit == nil {
+ return nil, vterrors.New(vtrpcpb.Code_INTERNAL, "[bug] cannot stream compressed transaction payload's internal events as no bufferAndTransmit function was provided")
+ }
+ for _, tpvevent := range tpvevents {
+ tpvevent.Timestamp = int64(ev.Timestamp())
+ tpvevent.CurrentTime = time.Now().UnixNano()
+ if err := bufferAndTransmit(tpvevent); err != nil {
+ if err == io.EOF {
+ return nil, nil
+ }
+ vs.vse.errorCounts.Add("TransactionPayloadBufferAndTransmit", 1)
+ return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "error sending compressed transaction payload's internal event: %v", err)
+ }
+ }
+ } else { // Process the payload's internal events all at once
+ vevents = append(vevents, tpvevents...)
+ }
}
vs.vse.vstreamerCompressedTransactionsDecoded.Add(1)
}
diff --git a/go/vt/vttablet/tabletserver/vstreamer/vstreamer_test.go b/go/vt/vttablet/tabletserver/vstreamer/vstreamer_test.go
index 846d62202e7..5282b5f372d 100644
--- a/go/vt/vttablet/tabletserver/vstreamer/vstreamer_test.go
+++ b/go/vt/vttablet/tabletserver/vstreamer/vstreamer_test.go
@@ -1966,7 +1966,7 @@ func TestFilteredMultipleWhere(t *testing.T) {
filter: &binlogdatapb.Filter{
Rules: []*binlogdatapb.Rule{{
Match: "t1",
- Filter: "select id1, val from t1 where in_keyrange('-80') and id2 = 200 and id3 = 1000 and val = 'newton'",
+ Filter: "select id1, val from t1 where in_keyrange('-80') and id2 = 200 and id3 = 1000 and val = 'newton' and id1 in (1, 2, 129)",
}},
},
customFieldEvents: true,
@@ -1988,9 +1988,7 @@ func TestFilteredMultipleWhere(t *testing.T) {
{spec: &TestRowEventSpec{table: "t1", changes: []TestRowChange{{after: []string{"2", "newton"}}}}},
}},
{"insert into t1 values (3, 100, 2000, 'kepler')", noEvents},
- {"insert into t1 values (128, 200, 1000, 'newton')", []TestRowEvent{
- {spec: &TestRowEventSpec{table: "t1", changes: []TestRowChange{{after: []string{"128", "newton"}}}}},
- }},
+ {"insert into t1 values (128, 200, 1000, 'newton')", noEvents},
{"insert into t1 values (5, 200, 2000, 'kepler')", noEvents},
{"insert into t1 values (129, 200, 1000, 'kepler')", noEvents},
{"commit", nil},
@@ -2080,3 +2078,33 @@ func TestGeneratedInvisiblePrimaryKey(t *testing.T) {
}}
ts.Run()
}
+
+func TestFilteredInOperator(t *testing.T) {
+ ts := &TestSpec{
+ t: t,
+ ddls: []string{
+ "create table t1(id1 int, id2 int, val varbinary(128), primary key(id1))",
+ },
+ options: &TestSpecOptions{
+ filter: &binlogdatapb.Filter{
+ Rules: []*binlogdatapb.Rule{{
+ Match: "t1",
+ Filter: "select id1, val from t1 where val in ('eee', 'bbb', 'ddd') and id1 in (4, 5)",
+ }},
+ },
+ },
+ }
+ defer ts.Close()
+ ts.Init()
+ ts.fieldEvents["t1"].cols[1].skip = true
+ ts.tests = [][]*TestQuery{{
+ {"begin", nil},
+ {"insert into t1 values (1, 100, 'aaa')", noEvents},
+ {"insert into t1 values (2, 200, 'bbb')", noEvents},
+ {"insert into t1 values (3, 100, 'ccc')", noEvents},
+ {"insert into t1 values (4, 200, 'ddd')", nil},
+ {"insert into t1 values (5, 200, 'eee')", nil},
+ {"commit", nil},
+ }}
+ ts.Run()
+}
diff --git a/go/vt/vttablet/tmrpctest/test_tm_rpc.go b/go/vt/vttablet/tmrpctest/test_tm_rpc.go
index 88ba3506ba5..a106b43bf2c 100644
--- a/go/vt/vttablet/tmrpctest/test_tm_rpc.go
+++ b/go/vt/vttablet/tmrpctest/test_tm_rpc.go
@@ -1428,6 +1428,10 @@ func (fra *fakeRPCTM) Backup(ctx context.Context, logger logutil.Logger, request
return nil
}
+func (fra *fakeRPCTM) IsBackupRunning() bool {
+ return false
+}
+
func tmRPCTestBackup(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
req := &tabletmanagerdatapb.BackupRequest{Concurrency: testBackupConcurrency, AllowPrimary: testBackupAllowPrimary}
stream, err := client.Backup(ctx, tablet, req)
diff --git a/go/vt/vttest/local_cluster.go b/go/vt/vttest/local_cluster.go
index 576a78bb761..fa7e40feb12 100644
--- a/go/vt/vttest/local_cluster.go
+++ b/go/vt/vttest/local_cluster.go
@@ -660,6 +660,7 @@ func (db *LocalCluster) JSONConfig() any {
config := map[string]any{
"bind_address": db.vt.BindAddress,
"port": db.vt.Port,
+ "grpc_bind_address": db.vt.BindAddressGprc,
"socket": db.mysql.UnixSocket(),
"vtcombo_mysql_port": db.Env.PortForProtocol("vtcombo_mysql_port", ""),
"mysql": db.Env.PortForProtocol("mysql", ""),
diff --git a/go/vt/vttest/vtprocess.go b/go/vt/vttest/vtprocess.go
index 3f34994bb75..6371811a60e 100644
--- a/go/vt/vttest/vtprocess.go
+++ b/go/vt/vttest/vtprocess.go
@@ -44,16 +44,17 @@ type HealthChecker func(addr string) bool
// It can be spawned manually or through one of the available
// helper methods.
type VtProcess struct {
- Name string
- Directory string
- LogDirectory string
- Binary string
- ExtraArgs []string
- Env []string
- BindAddress string
- Port int
- PortGrpc int
- HealthCheck HealthChecker
+ Name string
+ Directory string
+ LogDirectory string
+ Binary string
+ ExtraArgs []string
+ Env []string
+ BindAddress string
+ BindAddressGprc string
+ Port int
+ PortGrpc int
+ HealthCheck HealthChecker
proc *exec.Cmd
exit chan error
@@ -139,6 +140,11 @@ func (vtp *VtProcess) WaitStart() (err error) {
vtp.proc.Args = append(vtp.proc.Args, fmt.Sprintf("%d", vtp.PortGrpc))
}
+ if vtp.BindAddressGprc != "" {
+ vtp.proc.Args = append(vtp.proc.Args, "--grpc_bind_address")
+ vtp.proc.Args = append(vtp.proc.Args, vtp.BindAddressGprc)
+ }
+
vtp.proc.Args = append(vtp.proc.Args, vtp.ExtraArgs...)
vtp.proc.Env = append(vtp.proc.Env, os.Environ()...)
vtp.proc.Env = append(vtp.proc.Env, vtp.Env...)
@@ -199,16 +205,22 @@ func VtcomboProcess(environment Environment, args *Config, mysql MySQLManager) (
if args.VtComboBindAddress != "" {
vtcomboBindAddress = args.VtComboBindAddress
}
+ grpcBindAddress := "127.0.0.1"
+ if servenv.GRPCBindAddress() != "" {
+ grpcBindAddress = servenv.GRPCBindAddress()
+ }
+
vt := &VtProcess{
- Name: "vtcombo",
- Directory: environment.Directory(),
- LogDirectory: environment.LogDirectory(),
- Binary: environment.BinaryPath("vtcombo"),
- BindAddress: vtcomboBindAddress,
- Port: environment.PortForProtocol("vtcombo", ""),
- PortGrpc: environment.PortForProtocol("vtcombo", "grpc"),
- HealthCheck: environment.ProcessHealthCheck("vtcombo"),
- Env: environment.EnvVars(),
+ Name: "vtcombo",
+ Directory: environment.Directory(),
+ LogDirectory: environment.LogDirectory(),
+ Binary: environment.BinaryPath("vtcombo"),
+ BindAddress: vtcomboBindAddress,
+ BindAddressGprc: grpcBindAddress,
+ Port: environment.PortForProtocol("vtcombo", ""),
+ PortGrpc: environment.PortForProtocol("vtcombo", "grpc"),
+ HealthCheck: environment.ProcessHealthCheck("vtcombo"),
+ Env: environment.EnvVars(),
}
user, pass := mysql.Auth()
diff --git a/go/vt/wrangler/fake_dbclient_test.go b/go/vt/wrangler/fake_dbclient_test.go
index 14ef0913383..02ee79210d7 100644
--- a/go/vt/wrangler/fake_dbclient_test.go
+++ b/go/vt/wrangler/fake_dbclient_test.go
@@ -153,6 +153,10 @@ func (dc *fakeDBClient) Rollback() error {
func (dc *fakeDBClient) Close() {
}
+func (dc *fakeDBClient) IsClosed() bool {
+ return false
+}
+
// ExecuteFetch is part of the DBClient interface
func (dc *fakeDBClient) ExecuteFetch(query string, maxrows int) (*sqltypes.Result, error) {
dc.mu.Lock()
diff --git a/go/vt/wrangler/materializer.go b/go/vt/wrangler/materializer.go
index 7e24945cde7..bd7ae553130 100644
--- a/go/vt/wrangler/materializer.go
+++ b/go/vt/wrangler/materializer.go
@@ -551,11 +551,8 @@ func (wr *Wrangler) prepareCreateLookup(ctx context.Context, keyspace string, sp
if len(vindexFromCols) != 1 {
return nil, nil, nil, fmt.Errorf("unique vindex 'from' should have only one column: %v", vindex)
}
- } else {
- if len(vindexFromCols) < 2 {
- return nil, nil, nil, fmt.Errorf("non-unique vindex 'from' should have more than one column: %v", vindex)
- }
}
+
vindexToCol = vindex.Params["to"]
// Make the vindex write_only. If one exists already in the vschema,
// it will need to match this vindex exactly, including the write_only setting.
diff --git a/go/vt/wrangler/materializer_test.go b/go/vt/wrangler/materializer_test.go
index 1728ba6efc2..1871d778c6b 100644
--- a/go/vt/wrangler/materializer_test.go
+++ b/go/vt/wrangler/materializer_test.go
@@ -1599,7 +1599,7 @@ func TestCreateLookupVindexFailures(t *testing.T) {
},
err: "unique vindex 'from' should have only one column",
}, {
- description: "non-unique lookup should have more than one column",
+ description: "non-unique lookup can have only one column",
input: &vschemapb.Keyspace{
Vindexes: map[string]*vschemapb.Vindex{
"v": {
@@ -1612,7 +1612,7 @@ func TestCreateLookupVindexFailures(t *testing.T) {
},
},
},
- err: "non-unique vindex 'from' should have more than one column",
+ err: "",
}, {
description: "vindex not found",
input: &vschemapb.Keyspace{
diff --git a/go/vt/wrangler/testlib/backup_test.go b/go/vt/wrangler/testlib/backup_test.go
index cb61c4bab99..b540fc9f8f0 100644
--- a/go/vt/wrangler/testlib/backup_test.go
+++ b/go/vt/wrangler/testlib/backup_test.go
@@ -28,7 +28,6 @@ import (
"github.com/stretchr/testify/require"
"vitess.io/vitess/go/mysql"
- "vitess.io/vitess/go/mysql/capabilities"
"vitess.io/vitess/go/mysql/fakesqldb"
"vitess.io/vitess/go/mysql/replication"
"vitess.io/vitess/go/sqltypes"
@@ -36,6 +35,7 @@ import (
"vitess.io/vitess/go/vt/logutil"
"vitess.io/vitess/go/vt/mysqlctl"
"vitess.io/vitess/go/vt/mysqlctl/backupstorage"
+ "vitess.io/vitess/go/vt/mysqlctl/blackbox"
"vitess.io/vitess/go/vt/mysqlctl/filebackupstorage"
"vitess.io/vitess/go/vt/topo/memorytopo"
"vitess.io/vitess/go/vt/topo/topoproto"
@@ -132,7 +132,7 @@ func testBackupRestore(t *testing.T, cDetails *compressionDetails) error {
require.NoError(t, os.MkdirAll(s, os.ModePerm))
}
- needIt, err := needInnoDBRedoLogSubdir()
+ needIt, err := blackbox.NeedInnoDBRedoLogSubdir()
require.NoError(t, err)
if needIt {
newPath := path.Join(sourceInnodbLogDir, mysql.DynamicRedoLogSubdir)
@@ -371,7 +371,7 @@ func TestBackupRestoreLagged(t *testing.T) {
}
require.NoError(t, os.WriteFile(path.Join(sourceInnodbDataDir, "innodb_data_1"), []byte("innodb data 1 contents"), os.ModePerm))
- needIt, err := needInnoDBRedoLogSubdir()
+ needIt, err := blackbox.NeedInnoDBRedoLogSubdir()
require.NoError(t, err)
if needIt {
newPath := path.Join(sourceInnodbLogDir, mysql.DynamicRedoLogSubdir)
@@ -591,7 +591,7 @@ func TestRestoreUnreachablePrimary(t *testing.T) {
}
require.NoError(t, os.WriteFile(path.Join(sourceInnodbDataDir, "innodb_data_1"), []byte("innodb data 1 contents"), os.ModePerm))
- needIt, err := needInnoDBRedoLogSubdir()
+ needIt, err := blackbox.NeedInnoDBRedoLogSubdir()
require.NoError(t, err)
if needIt {
newPath := path.Join(sourceInnodbLogDir, mysql.DynamicRedoLogSubdir)
@@ -767,7 +767,7 @@ func TestDisableActiveReparents(t *testing.T) {
}
require.NoError(t, os.WriteFile(path.Join(sourceInnodbDataDir, "innodb_data_1"), []byte("innodb data 1 contents"), os.ModePerm))
- needIt, err := needInnoDBRedoLogSubdir()
+ needIt, err := blackbox.NeedInnoDBRedoLogSubdir()
require.NoError(t, err)
if needIt {
newPath := path.Join(sourceInnodbLogDir, mysql.DynamicRedoLogSubdir)
@@ -877,25 +877,3 @@ func TestDisableActiveReparents(t *testing.T) {
assert.False(t, destTablet.FakeMysqlDaemon.Replicating)
assert.True(t, destTablet.FakeMysqlDaemon.Running)
}
-
-// needInnoDBRedoLogSubdir indicates whether we need to create a redo log subdirectory.
-// Starting with MySQL 8.0.30, the InnoDB redo logs are stored in a subdirectory of the
-// (/. by default) called "#innodb_redo". See:
-//
-// https://dev.mysql.com/doc/refman/8.0/en/innodb-redo-log.html#innodb-modifying-redo-log-capacity
-func needInnoDBRedoLogSubdir() (needIt bool, err error) {
- mysqldVersionStr, err := mysqlctl.GetVersionString()
- if err != nil {
- return needIt, err
- }
- _, sv, err := mysqlctl.ParseVersionString(mysqldVersionStr)
- if err != nil {
- return needIt, err
- }
- versionStr := fmt.Sprintf("%d.%d.%d", sv.Major, sv.Minor, sv.Patch)
- capableOf := mysql.ServerVersionCapableOf(versionStr)
- if capableOf == nil {
- return needIt, fmt.Errorf("cannot determine database flavor details for version %s", versionStr)
- }
- return capableOf(capabilities.DynamicRedoLogCapacityFlavorCapability)
-}
diff --git a/proto/binlogdata.proto b/proto/binlogdata.proto
index 595760dcd52..e1df792776b 100644
--- a/proto/binlogdata.proto
+++ b/proto/binlogdata.proto
@@ -353,6 +353,10 @@ message FieldEvent {
repeated query.Field fields = 2;
string keyspace = 3;
string shard = 4;
+
+ // Field numbers in the gap between shard (4) and enum_set_string_values
+ // (25) are NOT reserved and can be used.
+
// Are ENUM and SET field values already mapped to strings in the ROW
// events? This allows us to transition VTGate VStream consumers from
// the pre v20 behavior of having to do this mapping themselves to the
@@ -362,6 +366,9 @@ message FieldEvent {
// vstreams managed by the vstreamManager.
bool enum_set_string_values = 25;
bool is_internal_table = 26; // set for sidecardb tables
+
+ // Add new members in the field number gap between shard (4) and
+ // enum_set_string_values (25).
}
// ShardGtid contains the GTID position for one shard.
diff --git a/proto/replicationdata.proto b/proto/replicationdata.proto
index 7107332233b..e788fc64bc3 100644
--- a/proto/replicationdata.proto
+++ b/proto/replicationdata.proto
@@ -50,6 +50,7 @@ message Status {
bool has_replication_filters = 22;
bool ssl_allowed = 23;
bool replication_lag_unknown = 24;
+ bool backup_running = 25;
}
// Configuration holds replication configuration information gathered from performance_schema and global variables.
diff --git a/proto/vtadmin.proto b/proto/vtadmin.proto
index 78f086ec345..963d1fa5779 100644
--- a/proto/vtadmin.proto
+++ b/proto/vtadmin.proto
@@ -388,7 +388,11 @@ message WorkflowSwitchTrafficRequest {
message ApplySchemaRequest {
string cluster_id = 1;
- vtctldata.ApplySchemaRequest request = 2;
+ // Request.Sql will be overriden by this Sql field.
+ string sql = 2;
+ // Request.CallerId will be overriden by this CallerId field.
+ string caller_id = 3;
+ vtctldata.ApplySchemaRequest request = 4;
}
message CancelSchemaMigrationRequest {
diff --git a/proto/vtgate.proto b/proto/vtgate.proto
index 5b080178218..aadf211f0a2 100644
--- a/proto/vtgate.proto
+++ b/proto/vtgate.proto
@@ -76,6 +76,8 @@ message Session {
// reserved connection if a dedicated connection is needed
int64 reserved_id = 4;
bool vindex_only = 5;
+ // rows_affected tracks if any query has modified the rows.
+ bool rows_affected = 6;
}
// shard_sessions keep track of per-shard transaction info.
repeated ShardSession shard_sessions = 2;
diff --git a/test.go b/test.go
index 14f51a06e9d..95d62af892f 100755
--- a/test.go
+++ b/test.go
@@ -77,7 +77,7 @@ For example:
// Flags
var (
flavor = flag.String("flavor", "mysql80", "comma-separated bootstrap flavor(s) to run against (when using Docker mode). Available flavors: all,"+flavors)
- bootstrapVersion = flag.String("bootstrap-version", "38", "the version identifier to use for the docker images")
+ bootstrapVersion = flag.String("bootstrap-version", "39", "the version identifier to use for the docker images")
runCount = flag.Int("runs", 1, "run each test this many times")
retryMax = flag.Int("retry", 3, "max number of retries, to detect flaky tests")
logPass = flag.Bool("log-pass", false, "log test output even if it passes")
diff --git a/test/ci_workflow_gen.go b/test/ci_workflow_gen.go
index 7956c491408..b24db1154fb 100644
--- a/test/ci_workflow_gen.go
+++ b/test/ci_workflow_gen.go
@@ -103,6 +103,7 @@ var (
"vtgate_vindex_heavy",
"vtgate_vschema",
"vtgate_queries",
+ "vtgate_plantests",
"vtgate_schema_tracker",
"vtgate_foreignkey_stress",
"vtorc",
@@ -157,6 +158,9 @@ var (
"vreplication_migrate",
"vreplication_vtctldclient_vdiff2_movetables_tz",
}
+ clusterRequiringMinio = []string{
+ "21",
+ }
)
type unitTest struct {
@@ -174,6 +178,7 @@ type clusterTest struct {
EnableBinlogTransactionCompression bool
PartialKeyspace bool
Cores16 bool
+ NeedsMinio bool
}
type vitessTesterTest struct {
@@ -286,6 +291,13 @@ func generateClusterWorkflows(list []string, tpl string) {
break
}
}
+ minioClusters := canonnizeList(clusterRequiringMinio)
+ for _, minioCluster := range minioClusters {
+ if minioCluster == cluster {
+ test.NeedsMinio = true
+ break
+ }
+ }
if mysqlVersion == mysql57 {
test.Platform = string(mysql57)
}
diff --git a/test/config.json b/test/config.json
index c911232ce74..da0026f0125 100644
--- a/test/config.json
+++ b/test/config.json
@@ -136,6 +136,15 @@
"RetryMax": 1,
"Tags": []
},
+ "backup_s3": {
+ "File": "unused.go",
+ "Args": ["vitess.io/vitess/go/test/endtoend/backup/s3", "-timeout", "30m"],
+ "Command": [],
+ "Manual": false,
+ "Shard": "21",
+ "RetryMax": 1,
+ "Tags": []
+ },
"backup_only": {
"File": "unused.go",
"Args": ["vitess.io/vitess/go/test/endtoend/backup/vtbackup", "-timeout", "20m"],
@@ -340,17 +349,6 @@
"RetryMax": 1,
"Tags": []
},
- "pitr": {
- "File": "unused.go",
- "Args": ["vitess.io/vitess/go/test/endtoend/recovery/pitr"],
- "Command": [],
- "Manual": false,
- "Shard": "10",
- "RetryMax": 1,
- "Tags": [
- "site_test"
- ]
- },
"recovery": {
"File": "unused.go",
"Args": ["vitess.io/vitess/go/test/endtoend/recovery/unshardedrecovery"],
@@ -887,6 +885,15 @@
"RetryMax": 1,
"Tags": []
},
+ "vtgate_plantests": {
+ "File": "unused.go",
+ "Args": ["vitess.io/vitess/go/test/endtoend/vtgate/plan_tests"],
+ "Command": [],
+ "Manual": false,
+ "Shard": "vtgate_plantests",
+ "RetryMax": 1,
+ "Tags": []
+ },
"vtgate_unsharded": {
"File": "unused.go",
"Args": ["vitess.io/vitess/go/test/endtoend/vtgate/unsharded"],
@@ -1238,6 +1245,17 @@
"RetryMax": 1,
"Tags": []
},
+ "vtop_example": {
+ "File": "",
+ "Args": [],
+ "Command": [
+ "test/vtop_example.sh"
+ ],
+ "Manual": false,
+ "Shard": "",
+ "RetryMax": 1,
+ "Tags": []
+ },
"vtorc_primary_failure": {
"File": "unused.go",
"Args": ["vitess.io/vitess/go/test/endtoend/vtorc/primaryfailure"],
@@ -1373,6 +1391,15 @@
"RetryMax": 1,
"Tags": []
},
+ "loopkup_index": {
+ "File": "unused.go",
+ "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "TestLookupIndex"],
+ "Command": [],
+ "Manual": false,
+ "Shard": "vreplication_vtctldclient_vdiff2_movetables_tz",
+ "RetryMax": 1,
+ "Tags": []
+ },
"vtadmin": {
"File": "unused.go",
"Args": ["vitess.io/vitess/go/test/endtoend/vtadmin"],
diff --git a/test/templates/cluster_endtoend_test.tpl b/test/templates/cluster_endtoend_test.tpl
index 01f4555e303..8d0a2f650b5 100644
--- a/test/templates/cluster_endtoend_test.tpl
+++ b/test/templates/cluster_endtoend_test.tpl
@@ -14,7 +14,7 @@ env:
jobs:
build:
name: Run endtoend tests on {{.Name}}
- runs-on: {{if .Cores16}}gh-hosted-runners-16cores-1{{else}}ubuntu-latest{{end}}
+ runs-on: {{if .Cores16}}gh-hosted-runners-16cores-1-24.04{{else}}ubuntu-24.04{{end}}
steps:
- name: Skip CI
@@ -124,10 +124,19 @@ jobs:
# Get key to latest MySQL repo
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
# Setup MySQL 8.0
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.32-1_all.deb
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.33-1_all.deb
echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get -qq update
+
+ # We have to install this old version of libaio1 in case we end up testing with MySQL 5.7. See also:
+ # https://bugs.launchpad.net/ubuntu/+source/libaio/+bug/2067501
+ curl -L -O http://mirrors.kernel.org/ubuntu/pool/main/liba/libaio/libaio1_0.3.112-13build1_amd64.deb
+ sudo dpkg -i libaio1_0.3.112-13build1_amd64.deb
+ # libtinfo5 is also needed for older MySQL 5.7 builds.
+ curl -L -O http://mirrors.kernel.org/ubuntu/pool/universe/n/ncurses/libtinfo5_6.3-2ubuntu0.1_amd64.deb
+ sudo dpkg -i libtinfo5_6.3-2ubuntu0.1_amd64.deb
+
# Install everything else we need, and configure
sudo apt-get -qq install -y mysql-server mysql-shell mysql-client make unzip g++ etcd-client etcd-server curl git wget eatmydata xz-utils libncurses6
@@ -148,6 +157,15 @@ jobs:
{{end}}
+ {{if .NeedsMinio }}
+ - name: Install Minio
+ if: steps.skip-workflow.outputs.skip-workflow == 'false'
+ run: |
+ wget https://dl.min.io/server/minio/release/linux-amd64/minio
+ chmod +x minio
+ mv minio /usr/local/bin
+ {{end}}
+
{{if .MakeTools}}
- name: Installing zookeeper and consul
diff --git a/test/templates/cluster_endtoend_test_docker.tpl b/test/templates/cluster_endtoend_test_docker.tpl
index f68e4223c75..f7e8aa2c1d8 100644
--- a/test/templates/cluster_endtoend_test_docker.tpl
+++ b/test/templates/cluster_endtoend_test_docker.tpl
@@ -6,7 +6,7 @@ permissions: read-all
jobs:
build:
name: Run endtoend tests on {{.Name}}
- runs-on: {{if .Cores16}}gh-hosted-runners-16cores-1{{else}}ubuntu-latest{{end}}
+ runs-on: {{if .Cores16}}gh-hosted-runners-16cores-1-24.04{{else}}ubuntu-24.04{{end}}
steps:
- name: Skip CI
diff --git a/test/templates/cluster_endtoend_test_mysql57.tpl b/test/templates/cluster_endtoend_test_mysql57.tpl
index b51ffc9110e..f4152c939b0 100644
--- a/test/templates/cluster_endtoend_test_mysql57.tpl
+++ b/test/templates/cluster_endtoend_test_mysql57.tpl
@@ -19,7 +19,7 @@ env:
jobs:
build:
name: Run endtoend tests on {{.Name}}
- runs-on: {{if .Cores16}}gh-hosted-runners-16cores-1{{else}}ubuntu-latest{{end}}
+ runs-on: {{if .Cores16}}gh-hosted-runners-16cores-1-24.04{{else}}ubuntu-24.04{{end}}
steps:
- name: Skip CI
@@ -126,13 +126,17 @@ jobs:
# Get key to latest MySQL repo
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.32-1_all.deb
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.33-1_all.deb
# Bionic packages are still compatible for Jammy since there's no MySQL 5.7
# packages for Jammy.
echo mysql-apt-config mysql-apt-config/repo-codename select bionic | sudo debconf-set-selections
echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections
sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
+ # We have to install this old version of libaio1. See also:
+ # https://bugs.launchpad.net/ubuntu/+source/libaio/+bug/2067501
+ curl -L -O http://mirrors.kernel.org/ubuntu/pool/main/liba/libaio/libaio1_0.3.112-13build1_amd64.deb
+ sudo dpkg -i libaio1_0.3.112-13build1_amd64.deb
sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7* libncurses6
sudo apt-get install -y make unzip g++ etcd-client etcd-server curl git wget eatmydata
diff --git a/test/templates/cluster_vitess_tester.tpl b/test/templates/cluster_vitess_tester.tpl
index f0b5838d8e8..b8d77754ba6 100644
--- a/test/templates/cluster_vitess_tester.tpl
+++ b/test/templates/cluster_vitess_tester.tpl
@@ -14,7 +14,7 @@ env:
jobs:
build:
name: Run endtoend tests on {{.Name}}
- runs-on: ubuntu-latest
+ runs-on: ubuntu-24.04
steps:
- name: Skip CI
@@ -93,7 +93,7 @@ jobs:
# Get key to latest MySQL repo
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
# Setup MySQL 8.0
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.32-1_all.deb
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.33-1_all.deb
echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get -qq update
diff --git a/test/templates/dockerfile.tpl b/test/templates/dockerfile.tpl
index 82388850947..af4376d3ca9 100644
--- a/test/templates/dockerfile.tpl
+++ b/test/templates/dockerfile.tpl
@@ -1,4 +1,4 @@
-ARG bootstrap_version=38
+ARG bootstrap_version=39
ARG image="vitess/bootstrap:${bootstrap_version}-{{.Platform}}"
FROM "${image}"
diff --git a/test/templates/unit_test.tpl b/test/templates/unit_test.tpl
index c47b7a1d796..3704aebac4e 100644
--- a/test/templates/unit_test.tpl
+++ b/test/templates/unit_test.tpl
@@ -14,7 +14,7 @@ env:
jobs:
test:
name: {{.Name}}
- runs-on: ubuntu-latest
+ runs-on: ubuntu-24.04
steps:
- name: Skip CI
@@ -87,20 +87,20 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
run: |
export DEBIAN_FRONTEND="noninteractive"
- sudo apt-get -qq update
+ sudo apt-get update
# Uninstall any previously installed MySQL first
sudo systemctl stop apparmor
- sudo DEBIAN_FRONTEND="noninteractive" apt-get -qq remove -y --purge mysql-server mysql-client mysql-common
- sudo apt-get -qq -y autoremove
- sudo apt-get -qq -y autoclean
+ sudo DEBIAN_FRONTEND="noninteractive" apt-get remove -y --purge mysql-server mysql-client mysql-common
+ sudo apt-get -y autoremove
+ sudo apt-get -y autoclean
sudo deluser mysql
sudo rm -rf /var/lib/mysql
sudo rm -rf /etc/mysql
# Get key to latest MySQL repo
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.32-1_all.deb
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.33-1_all.deb
{{if (eq .Platform "mysql57")}}
# Bionic packages are still compatible for Jammy since there's no MySQL 5.7
@@ -108,25 +108,32 @@ jobs:
echo mysql-apt-config mysql-apt-config/repo-codename select bionic | sudo debconf-set-selections
echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections
sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
- sudo apt-get -qq update
- sudo DEBIAN_FRONTEND="noninteractive" apt-get -qq install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7* libncurses6
+ sudo apt-get update
+ # We have to install this old version of libaio1. See also:
+ # https://bugs.launchpad.net/ubuntu/+source/libaio/+bug/2067501
+ curl -L -O http://mirrors.kernel.org/ubuntu/pool/main/liba/libaio/libaio1_0.3.112-13build1_amd64.deb
+ sudo dpkg -i libaio1_0.3.112-13build1_amd64.deb
+ # libtinfo5 is also needed for older MySQL 5.7 builds.
+ curl -L -O http://mirrors.kernel.org/ubuntu/pool/universe/n/ncurses/libtinfo5_6.3-2ubuntu0.1_amd64.deb
+ sudo dpkg -i libtinfo5_6.3-2ubuntu0.1_amd64.deb
+ sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7* libncurses6
{{end}}
{{if (eq .Platform "mysql80")}}
echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
- sudo apt-get -qq update
- sudo DEBIAN_FRONTEND="noninteractive" apt-get -qq install -y mysql-server mysql-client
+ sudo apt-get update
+ sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-server mysql-client
{{end}}
{{if (eq .Platform "mysql84")}}
echo mysql-apt-config mysql-apt-config/select-server select mysql-8.4-lts | sudo debconf-set-selections
sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
- sudo apt-get -qq update
- sudo DEBIAN_FRONTEND="noninteractive" apt-get -qq install -y mysql-server mysql-client
+ sudo apt-get update
+ sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-server mysql-client
{{end}}
- sudo apt-get -qq install -y make unzip g++ curl git wget ant openjdk-11-jdk eatmydata
+ sudo apt-get install -y make unzip g++ curl git wget ant openjdk-11-jdk eatmydata
sudo service mysql stop
sudo bash -c "echo '/usr/sbin/mysqld { }' > /etc/apparmor.d/usr.sbin.mysqld" # https://bugs.launchpad.net/ubuntu/+source/mariadb-10.1/+bug/1806263
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
diff --git a/test/vtop_example.sh b/test/vtop_example.sh
index 5ff90a2be7e..c537c0f844c 100755
--- a/test/vtop_example.sh
+++ b/test/vtop_example.sh
@@ -482,11 +482,12 @@ EOF
waitForKeyspaceToBeServing customer 80- 1
}
+kind delete cluster --name kind || true
# Build the docker image for vitess/lite using the local code
docker build -f docker/lite/Dockerfile -t vitess/lite:pr .
# Build the docker image for vitess/vtadmin using the local code
-docker build -f docker/binaries/vtadmin/Dockerfile --build-arg VT_BASE_VER=pr -t vitess/vtadmin:pr .
+docker build -f docker/binaries/vtadmin/Dockerfile --build-arg VT_BASE_VER=pr -t vitess/vtadmin:pr ./docker/binaries/vtadmin
# Print the docker images available
docker image ls
diff --git a/tools/get_kubectl_kind.sh b/tools/get_kubectl_kind.sh
index 57df414fdd8..169b120aaa0 100755
--- a/tools/get_kubectl_kind.sh
+++ b/tools/get_kubectl_kind.sh
@@ -12,7 +12,7 @@ source build.env
mkdir -p "$VTROOT/bin"
cd "$VTROOT/bin"
-KUBE_VERSION="${KUBE_VERSION:-v1.21.1}"
+KUBE_VERSION="${KUBE_VERSION:-v1.31.0}"
KUBERNETES_RELEASE_URL="${KUBERNETES_RELEASE_URL:-https://dl.k8s.io}"
# Download kubectl if needed.
@@ -28,7 +28,7 @@ ln -sf "kubectl-${KUBE_VERSION}" kubectl
if ! command -v kind &> /dev/null
then
echo "Downloading kind..."
- curl -L https://kind.sigs.k8s.io/dl/v0.12.0/kind-linux-amd64 > "kind"
+ curl -L https://kind.sigs.k8s.io/dl/v0.22.0/kind-linux-amd64 > "kind"
chmod +x "kind"
echo "Installed kind"
else
diff --git a/tools/map-shard-for-value/Makefile b/tools/map-shard-for-value/Makefile
new file mode 100644
index 00000000000..61bc88ac0ed
--- /dev/null
+++ b/tools/map-shard-for-value/Makefile
@@ -0,0 +1,22 @@
+# Copyright 2024 The Vitess Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+build:
+ go build map-shard-for-value.go
+
+test:
+ echo "1\n-1\n99" | go run map-shard-for-value.go --total_shards=4 --vindex=xxhash
+
+clean:
+ rm -f map-shard-for-value
diff --git a/tools/map-shard-for-value/map-shard-for-value.go b/tools/map-shard-for-value/map-shard-for-value.go
new file mode 100755
index 00000000000..18a092d1371
--- /dev/null
+++ b/tools/map-shard-for-value/map-shard-for-value.go
@@ -0,0 +1,207 @@
+/*
+Copyright 2024 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package main
+
+import (
+ "bufio"
+ "context"
+ "encoding/hex"
+ "fmt"
+ "log"
+ "os"
+ "strconv"
+ "strings"
+
+ flag "github.com/spf13/pflag"
+
+ "vitess.io/vitess/go/vt/topo"
+
+ "vitess.io/vitess/go/sqltypes"
+ "vitess.io/vitess/go/vt/key"
+ "vitess.io/vitess/go/vt/proto/topodata"
+ "vitess.io/vitess/go/vt/vtgate/vindexes"
+)
+
+/*
+ * This tool reads a list of values from stdin and prints the
+ * corresponding keyspace ID and shard for each value. It uses the given vindex
+ * and shard ranges to determine the shard. The vindex is expected to be a
+ * single-column vindex. The shard ranges are specified as a comma-separated
+ * list of key ranges, example "-80,80-".
+ * If you have uniformly distributed shards, you can specify the total number
+ * of shards using the -total_shards flag, and the tool will generate the shard ranges
+ * using the same logic as the Vitess operator does (using the key.GenerateShardRanges() function).
+ *
+ * Example usage:
+ * echo "1\n2\n3" | go run shard-from-id.go -vindex=hash -shards=-80,80-
+ *
+ * Currently tested only for integer values and hash/xxhash vindexes.
+ */
+
+func mapShard(allShards []*topodata.ShardReference, ksid key.DestinationKeyspaceID) (string, error) {
+ foundShard := ""
+ addShard := func(shard string) error {
+ foundShard = shard
+ return nil
+ }
+ if err := ksid.Resolve(allShards, addShard); err != nil {
+ return "", fmt.Errorf("failed to resolve keyspace ID: %v:: %s", ksid.String(), err)
+ }
+
+ if foundShard == "" {
+ return "", fmt.Errorf("no shard found for keyspace ID: %v", ksid)
+ }
+ return foundShard, nil
+}
+
+func selectShard(vindex vindexes.Vindex, value sqltypes.Value, allShards []*topodata.ShardReference) (string, key.DestinationKeyspaceID, error) {
+ ctx := context.Background()
+
+ destinations, err := vindexes.Map(ctx, vindex, nil, [][]sqltypes.Value{{value}})
+ if err != nil {
+ return "", nil, fmt.Errorf("failed to map value to keyspace ID: %w", err)
+ }
+
+ if len(destinations) != 1 {
+ return "", nil, fmt.Errorf("unexpected number of destinations: %d", len(destinations))
+ }
+
+ ksid, ok := destinations[0].(key.DestinationKeyspaceID)
+ if !ok {
+ return "", nil, fmt.Errorf("unexpected destination type: %T", destinations[0])
+ }
+
+ foundShard, err := mapShard(allShards, ksid)
+ if err != nil {
+ return "", nil, fmt.Errorf("failed to map shard, original value %v, keyspace id %s: %w", value, ksid, err)
+ }
+ return foundShard, ksid, nil
+}
+
+func getValue(valueStr, valueType string) (sqltypes.Value, error) {
+ var value sqltypes.Value
+
+ switch valueType {
+ case "int":
+ valueInt, err := strconv.ParseInt(valueStr, 10, 64)
+ if err != nil {
+ return value, fmt.Errorf("failed to parse int value: %w", err)
+ }
+ value = sqltypes.NewInt64(valueInt)
+ case "uint":
+ valueUint, err := strconv.ParseUint(valueStr, 10, 64)
+ if err != nil {
+ return value, fmt.Errorf("failed to parse uint value: %w", err)
+ }
+ value = sqltypes.NewUint64(valueUint)
+ case "string":
+ value = sqltypes.NewVarChar(valueStr)
+ default:
+ return value, fmt.Errorf("unsupported value type: %s", valueType)
+ }
+
+ return value, nil
+}
+
+func getShardMap(shardsCSV *string) []*topodata.ShardReference {
+ var allShards []*topodata.ShardReference
+
+ for _, shard := range strings.Split(*shardsCSV, ",") {
+ _, keyRange, err := topo.ValidateShardName(shard)
+ if err != nil {
+ log.Fatalf("invalid shard range: %s", shard)
+ }
+ allShards = append(allShards, &topodata.ShardReference{
+ Name: shard,
+ KeyRange: keyRange,
+ })
+ }
+ return allShards
+}
+
+type output struct {
+ Value string
+ KeyspaceID string
+ Shard string
+}
+
+func processValues(scanner *bufio.Scanner, shardsCSV *string, vindexName string, valueType string) ([]output, error) {
+ allShards := getShardMap(shardsCSV)
+
+ vindex, err := vindexes.CreateVindex(vindexName, vindexName, nil)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create vindex: %v", err)
+ }
+ var outputs []output
+ for scanner.Scan() {
+ valueStr := scanner.Text()
+ if valueStr == "" {
+ continue
+ }
+ value, err := getValue(valueStr, valueType)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get value for: %v, value_type %s:: %v", valueStr, valueType, err)
+ }
+ shard, ksid, err := selectShard(vindex, value, allShards)
+ if err != nil {
+ // ignore errors so that we can go ahead with the computation for other values
+ continue
+ }
+ outputs = append(outputs, output{Value: valueStr, KeyspaceID: hex.EncodeToString(ksid), Shard: shard})
+ }
+ return outputs, nil
+}
+
+func printOutput(outputs []output) {
+ fmt.Println("value,keyspaceID,shard")
+ for _, output := range outputs {
+ fmt.Printf("%s,%s,%s\n", output.Value, output.KeyspaceID, output.Shard)
+ }
+}
+
+func main() {
+ // Explicitly configuring the logger since it was flaky in displaying logs locally without this.
+ log.SetOutput(os.Stderr)
+ log.SetFlags(log.LstdFlags)
+ log.SetPrefix("LOG: ")
+
+ vindexName := flag.String("vindex", "xxhash", "name of the vindex")
+ shardsCSV := flag.String("shards", "", "comma-separated list of shard ranges")
+ totalShards := flag.Int("total_shards", 0, "total number of uniformly distributed shards")
+ valueType := flag.String("value_type", "int", "type of the value (int, uint, or string)")
+ flag.Parse()
+
+ if *totalShards > 0 {
+ if *shardsCSV != "" {
+ log.Fatalf("cannot specify both total_shards and shards")
+ }
+ shardArr, err := key.GenerateShardRanges(*totalShards)
+ if err != nil {
+ log.Fatalf("failed to generate shard ranges: %v", err)
+ }
+ *shardsCSV = strings.Join(shardArr, ",")
+ }
+ if *shardsCSV == "" {
+ log.Fatal("shards or total_shards must be specified")
+ }
+ scanner := bufio.NewScanner(os.Stdin)
+ outputs, err := processValues(scanner, shardsCSV, *vindexName, *valueType)
+ if err != nil {
+ log.Fatalf("failed to process values: %v", err)
+ }
+ printOutput(outputs)
+}
diff --git a/tools/map-shard-for-value/map-shard-for-value.md b/tools/map-shard-for-value/map-shard-for-value.md
new file mode 100644
index 00000000000..17daf7f5fe5
--- /dev/null
+++ b/tools/map-shard-for-value/map-shard-for-value.md
@@ -0,0 +1,47 @@
+## Map Shard for Value Tool
+
+### Overview
+
+The `map-shard-for-value` tool maps a given value to a specific shard. This tool helps in determining
+which shard a particular value belongs to, based on the vindex algorithm and shard ranges.
+
+### Features
+-
+
+- Allows specifying the vindex type (e.g., `hash`, `xxhash`).
+- Allows specifying the shard list of (for uniformly distributed shard ranges) the total number of shards to generate.
+- Designed as a _filter_: Reads input values from `stdin` and outputs the corresponding shard information, so it can be
+ used to map values from a file or another program.
+
+### Usage
+
+```sh
+make build
+```
+
+```sh
+echo "1\n-1\n99" | ./map-shard-for-value --total_shards=4 --vindex=xxhash
+value,keyspaceID,shard
+1,d46405367612b4b7,c0-
+-1,d8e2a6a7c8c7623d,c0-
+99,200533312244abca,-40
+
+echo "1\n-1\n99" | ./map-shard-for-value --vindex=hash --shards="-80,80-"
+value,keyspaceID,shard
+1,166b40b44aba4bd6,-80
+-1,355550b2150e2451,-80
+99,2c40ad56f4593c47,-80
+```
+
+#### Flags
+
+- `--vindex`: Specifies the name of the vindex to use (e.g., `hash`, `xxhash`) (default `xxhash`)
+
+One (and only one) of these is required:
+
+- `--shards`: Comma-separated list of shard ranges
+- `--total_shards`: Total number of shards, only if shards are uniformly distributed
+
+Optional:
+- `--value_type`: Type of the value to map, one of int, uint, string (default `int`)
+
diff --git a/tools/map-shard-for-value/map-shard-for-value_test.go b/tools/map-shard-for-value/map-shard-for-value_test.go
new file mode 100644
index 00000000000..ca014818bb9
--- /dev/null
+++ b/tools/map-shard-for-value/map-shard-for-value_test.go
@@ -0,0 +1,90 @@
+/*
+Copyright 2024 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package main
+
+import (
+ "bufio"
+ "fmt"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestProcess(t *testing.T) {
+ type testCase struct {
+ name string
+ shardsCSV string
+ vindexType string
+ values []int
+ valueType string
+ expected []output
+ }
+ testCases := []testCase{
+ {
+ name: "hash,2 shards",
+ shardsCSV: "-80,80-",
+ vindexType: "hash",
+ values: []int{1, 99},
+ valueType: "int",
+ expected: []output{
+ {
+ Value: "1",
+ KeyspaceID: "166b40b44aba4bd6",
+ Shard: "-80",
+ },
+ {
+ Value: "99",
+ KeyspaceID: "2c40ad56f4593c47",
+ Shard: "-80",
+ },
+ },
+ },
+ {
+ name: "xxhash,4 shards",
+ shardsCSV: "-40,40-80,80-c0,c0-",
+ vindexType: "xxhash",
+ values: []int{1, 99},
+ valueType: "int",
+ expected: []output{
+ {
+ Value: "1",
+ KeyspaceID: "d46405367612b4b7",
+ Shard: "c0-",
+ },
+ {
+ Value: "99",
+ KeyspaceID: "200533312244abca",
+ Shard: "-40",
+ },
+ },
+ },
+ }
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ var input strings.Builder
+ for _, num := range tc.values {
+ fmt.Fprintf(&input, "%d\n", num)
+ }
+ reader := strings.NewReader(input.String())
+ scanner := bufio.NewScanner(reader)
+ got, err := processValues(scanner, &tc.shardsCSV, tc.vindexType, tc.valueType)
+ require.NoError(t, err)
+ require.EqualValues(t, tc.expected, got)
+ })
+ }
+}
diff --git a/web/vtadmin/package-lock.json b/web/vtadmin/package-lock.json
index 6004278321a..8ad7c67a5b4 100644
--- a/web/vtadmin/package-lock.json
+++ b/web/vtadmin/package-lock.json
@@ -10863,15 +10863,16 @@
}
},
"node_modules/nanoid": {
- "version": "3.3.7",
- "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.7.tgz",
- "integrity": "sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g==",
+ "version": "3.3.8",
+ "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.8.tgz",
+ "integrity": "sha512-WNLf5Sd8oZxOm+TzppcYk8gVOgP+l58xNy58D0nbUnOxOWRWvlcCV4kUF7ltmI6PsrLl/BgKEyS4mqsGChFN0w==",
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/ai"
}
],
+ "license": "MIT",
"bin": {
"nanoid": "bin/nanoid.cjs"
},
diff --git a/web/vtadmin/src/api/http.ts b/web/vtadmin/src/api/http.ts
index 3f75330d240..674df961ef0 100644
--- a/web/vtadmin/src/api/http.ts
+++ b/web/vtadmin/src/api/http.ts
@@ -1068,3 +1068,41 @@ export const showVDiff = async ({ clusterID, request }: ShowVDiffParams) => {
return vtadmin.VDiffShowResponse.create(result);
};
+
+export const fetchSchemaMigrations = async (request: vtadmin.IGetSchemaMigrationsRequest) => {
+ const { result } = await vtfetch(`/api/migrations/`, {
+ body: JSON.stringify(request),
+ method: 'post',
+ });
+
+ const err = vtadmin.GetSchemaMigrationsResponse.verify(result);
+ if (err) throw Error(err);
+
+ return vtadmin.GetSchemaMigrationsResponse.create(result);
+};
+
+export interface ApplySchemaParams {
+ clusterID: string;
+ keyspace: string;
+ callerID: string;
+ sql: string;
+ request: vtctldata.IApplySchemaRequest;
+}
+
+export const applySchema = async ({ clusterID, keyspace, callerID, sql, request }: ApplySchemaParams) => {
+ const body = {
+ sql,
+ caller_id: callerID,
+ request,
+ };
+
+ const { result } = await vtfetch(`/api/migration/${clusterID}/${keyspace}`, {
+ body: JSON.stringify(body),
+ method: 'post',
+ });
+
+ const err = vtctldata.ApplySchemaResponse.verify(result);
+ if (err) throw Error(err);
+
+ return vtctldata.ApplySchemaResponse.create(result);
+};
diff --git a/web/vtadmin/src/components/App.tsx b/web/vtadmin/src/components/App.tsx
index 79bb0d5c354..fd0f772ae19 100644
--- a/web/vtadmin/src/components/App.tsx
+++ b/web/vtadmin/src/components/App.tsx
@@ -46,6 +46,8 @@ import { Transaction } from './routes/transaction/Transaction';
import { CreateReshard } from './routes/createWorkflow/CreateReshard';
import { CreateMaterialize } from './routes/createWorkflow/CreateMaterialize';
import { TopologyTree } from './routes/topologyTree/TopologyTree';
+import { SchemaMigrations } from './routes/SchemaMigrations';
+import { CreateSchemaMigration } from './routes/createSchemaMigration/CreateSchemaMigration';
export const App = () => {
return (
@@ -141,6 +143,16 @@ export const App = () => {
+
+
+
+
+ {!isReadOnlyMode() && (
+
+
+
+ )}
+
diff --git a/web/vtadmin/src/components/NavRail.tsx b/web/vtadmin/src/components/NavRail.tsx
index 9f9e1bf1681..b30cd165684 100644
--- a/web/vtadmin/src/components/NavRail.tsx
+++ b/web/vtadmin/src/components/NavRail.tsx
@@ -65,6 +65,9 @@ export const NavRail = () => {
+ -
+
+
-
diff --git a/web/vtadmin/src/components/routes/createWorkflow/ErrorDialog.tsx b/web/vtadmin/src/components/dialog/ErrorDialog.tsx
similarity index 94%
rename from web/vtadmin/src/components/routes/createWorkflow/ErrorDialog.tsx
rename to web/vtadmin/src/components/dialog/ErrorDialog.tsx
index 25ac5dedb0b..087876e4cd2 100644
--- a/web/vtadmin/src/components/routes/createWorkflow/ErrorDialog.tsx
+++ b/web/vtadmin/src/components/dialog/ErrorDialog.tsx
@@ -14,8 +14,8 @@
* limitations under the License.
*/
import React from 'react';
-import Dialog from '../../dialog/Dialog';
-import { Icon, Icons } from '../../Icon';
+import Dialog from './Dialog';
+import { Icon, Icons } from '../Icon';
export interface ErrorDialogProps {
errorTitle?: string;
diff --git a/web/vtadmin/src/components/routes/SchemaMigrations.tsx b/web/vtadmin/src/components/routes/SchemaMigrations.tsx
new file mode 100644
index 00000000000..1761d26de49
--- /dev/null
+++ b/web/vtadmin/src/components/routes/SchemaMigrations.tsx
@@ -0,0 +1,195 @@
+/**
+ * Copyright 2024 The Vitess Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import { useEffect, useState } from 'react';
+import { useKeyspaces, useSchemaMigrations } from '../../hooks/api';
+import { DataCell } from '../dataTable/DataCell';
+import { DataTable } from '../dataTable/DataTable';
+import { ContentContainer } from '../layout/ContentContainer';
+import { WorkspaceHeader } from '../layout/WorkspaceHeader';
+import { WorkspaceTitle } from '../layout/WorkspaceTitle';
+import { QueryLoadingPlaceholder } from '../placeholders/QueryLoadingPlaceholder';
+import { useDocumentTitle } from '../../hooks/useDocumentTitle';
+import { vtadmin } from '../../proto/vtadmin';
+import { Select } from '../inputs/Select';
+import { ShardLink } from '../links/ShardLink';
+import { formatDateTime } from '../../util/time';
+import { ReadOnlyGate } from '../ReadOnlyGate';
+import { formatSchemaMigrationStatus } from '../../util/schemaMigrations';
+import { Link } from 'react-router-dom';
+import { TabletLink } from '../links/TabletLink';
+import { formatAlias } from '../../util/tablets';
+import { useURLQuery } from '../../hooks/useURLQuery';
+
+const COLUMNS = ['UUID', 'Status', 'DDL Action', 'Timestamps', 'Stage', 'Progress'];
+
+export const SchemaMigrations = () => {
+ useDocumentTitle('Schema Migrations');
+
+ const { query, replaceQuery } = useURLQuery();
+ const urlKeyspace = query['keyspace'];
+ const urlCluster = query['cluster'];
+
+ const keyspacesQuery = useKeyspaces();
+ const { data: keyspaces = [], ...ksQuery } = keyspacesQuery;
+
+ const [selectedKeyspace, setSelectedKeypsace] = useState();
+
+ const request: vtadmin.IGetSchemaMigrationsRequest = {
+ cluster_requests: [
+ {
+ cluster_id: selectedKeyspace && selectedKeyspace.cluster?.id,
+ request: {
+ keyspace: selectedKeyspace && selectedKeyspace.keyspace?.name,
+ },
+ },
+ ],
+ };
+
+ const schemaMigrationsQuery = useSchemaMigrations(request, {
+ enabled: !!selectedKeyspace,
+ });
+
+ const schemaMigrations = schemaMigrationsQuery.data ? schemaMigrationsQuery.data.schema_migrations : [];
+
+ const handleKeyspaceChange = (ks: vtadmin.Keyspace | null | undefined) => {
+ setSelectedKeypsace(ks);
+
+ if (ks) {
+ replaceQuery({ keyspace: ks.keyspace?.name, cluster: ks.cluster?.id });
+ } else {
+ replaceQuery({ keyspace: undefined, cluster: undefined });
+ }
+ };
+
+ useEffect(() => {
+ if (urlKeyspace && urlCluster) {
+ const keyspace = keyspaces.find(
+ (ks) => ks.cluster?.id === String(urlCluster) && ks.keyspace?.name === String(urlKeyspace)
+ );
+
+ if (keyspace) {
+ setSelectedKeypsace(keyspace);
+ } else if (!ksQuery.isLoading) {
+ replaceQuery({ keyspace: undefined, cluster: undefined });
+ }
+ } else {
+ setSelectedKeypsace(undefined);
+ }
+ }, [urlKeyspace, urlCluster, keyspaces, ksQuery.isLoading, replaceQuery]);
+
+ const renderRows = (rows: vtadmin.ISchemaMigration[]) => {
+ return rows.map((row) => {
+ const migrationInfo = row.schema_migration;
+
+ if (!migrationInfo) return <>>;
+
+ return (
+
+
+ {migrationInfo.uuid}
+
+ Tablet{' '}
+
+ {formatAlias(migrationInfo.tablet)}
+
+
+
+ Shard{' '}
+
+ {`${migrationInfo.keyspace}/${migrationInfo.shard}`}
+
+
+
+
+ {formatSchemaMigrationStatus(migrationInfo)}
+
+ {migrationInfo.ddl_action ? migrationInfo.ddl_action : '-'}
+
+ {migrationInfo.added_at && (
+
+ Added
+ {formatDateTime(migrationInfo.added_at?.seconds)}
+
+ )}
+ {migrationInfo.requested_at && (
+
+ Requested
+ {formatDateTime(migrationInfo.requested_at?.seconds)}
+
+ )}
+ {migrationInfo.started_at && (
+
+ Started
+ {formatDateTime(migrationInfo.started_at?.seconds)}
+
+ )}
+ {migrationInfo.completed_at && (
+
+ Completed
+ {formatDateTime(migrationInfo.completed_at?.seconds)}
+
+ )}
+
+ {migrationInfo.stage ? migrationInfo.stage : '-'}
+ {migrationInfo.progress ? `${migrationInfo.progress}%` : '-'}
+
+ );
+ });
+ };
+
+ return (
+
+
+
+
Schema Migrations
+
+
+
+ Create Schema Migration Request
+
+
+
+
+
+
+
+
+
+
+
+
+
+ );
+};
diff --git a/web/vtadmin/src/components/routes/createSchemaMigration/CreateSchemaMigration.module.scss b/web/vtadmin/src/components/routes/createSchemaMigration/CreateSchemaMigration.module.scss
new file mode 100644
index 00000000000..51f5fdca04e
--- /dev/null
+++ b/web/vtadmin/src/components/routes/createSchemaMigration/CreateSchemaMigration.module.scss
@@ -0,0 +1,30 @@
+/**
+ * Copyright 2024 The Vitess Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+.sqlInput {
+ border: solid 2px var(--colorDisabled);
+ border-radius: 6px;
+ display: block;
+ font-family: var(--fontFamilyMonospace);
+ line-height: var(--lineHeightBody);
+ padding: 0.8rem;
+ resize: vertical;
+ width: 100%;
+}
+
+.sqlInput:focus {
+ border-color: var(--colorPrimary);
+ outline: none;
+}
diff --git a/web/vtadmin/src/components/routes/createSchemaMigration/CreateSchemaMigration.tsx b/web/vtadmin/src/components/routes/createSchemaMigration/CreateSchemaMigration.tsx
new file mode 100644
index 00000000000..0f7326d2ae1
--- /dev/null
+++ b/web/vtadmin/src/components/routes/createSchemaMigration/CreateSchemaMigration.tsx
@@ -0,0 +1,270 @@
+/**
+ * Copyright 2024 The Vitess Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import { useEffect, useState } from 'react';
+import { Link, useHistory } from 'react-router-dom';
+
+import style from './CreateSchemaMigration.module.scss';
+import { useApplySchema, useClusters, useKeyspaces } from '../../../hooks/api';
+import { useDocumentTitle } from '../../../hooks/useDocumentTitle';
+import { Label } from '../../inputs/Label';
+import { Select } from '../../inputs/Select';
+import { ContentContainer } from '../../layout/ContentContainer';
+import { NavCrumbs } from '../../layout/NavCrumbs';
+import { WorkspaceHeader } from '../../layout/WorkspaceHeader';
+import { WorkspaceTitle } from '../../layout/WorkspaceTitle';
+import { TextInput } from '../../TextInput';
+import { success } from '../../Snackbar';
+import { FormError } from '../../forms/FormError';
+import { vtadmin } from '../../../proto/vtadmin';
+import ErrorDialog from '../../dialog/ErrorDialog';
+
+interface FormData {
+ clusterID: string;
+ keyspace: string;
+ ddlStrategy: string;
+ sql: string;
+ batchSize: number;
+ callerID: string;
+ migrationContext: string;
+ uuidList: string;
+}
+
+const DEFAULT_FORM_DATA: FormData = {
+ clusterID: '',
+ keyspace: '',
+ // Default DDL Strategy set to "vitess".
+ ddlStrategy: 'vitess',
+ sql: '',
+ batchSize: 0,
+ callerID: '',
+ migrationContext: '',
+ uuidList: '',
+};
+
+const DDL_STRATEGY_HELP_TEXT = `Online DDL strategy, compatible with @@ddl_strategy session variable (default "vitess")`;
+
+const MIGRATION_CONTEXT_HELP_TEXT =
+ 'For Online DDL, optionally supply a custom unique string used as context for the migration(s) in this command. By default a unique context is auto-generated by Vitess.';
+
+const CALLER_ID_HELP_TEXT =
+ 'Effective caller ID used for the operation and should map to an ACL name which grants this identity the necessary permissions to perform the operation (this is only necessary when strict table ACLs are used)';
+
+export const CreateSchemaMigration = () => {
+ useDocumentTitle('Create Schema Migration Request');
+
+ const history = useHistory();
+
+ const [formData, setFormData] = useState(DEFAULT_FORM_DATA);
+
+ const [clusterKeyspaces, setClusterKeyspaces] = useState([]);
+
+ const [errorDialogOpen, setErrorDialogOpen] = useState(false);
+
+ const { data: clusters = [], ...clustersQuery } = useClusters();
+
+ const { data: keyspaces = [], ...keyspacesQuery } = useKeyspaces();
+
+ const mutation = useApplySchema(
+ {
+ clusterID: formData.clusterID,
+ keyspace: formData.keyspace,
+ callerID: formData.callerID,
+ sql: formData.sql,
+ request: {
+ ddl_strategy: formData.ddlStrategy,
+ batch_size: formData.batchSize,
+ migration_context: formData.migrationContext,
+ uuid_list: (formData.uuidList && formData.uuidList.split(',').map((uuid) => uuid.trim())) || undefined,
+ },
+ },
+ {
+ onSuccess: () => {
+ success(`Successfully created schema migration request.`, { autoClose: 1600 });
+
+ history.push({
+ pathname: `/migrations`,
+ search: `?keyspace=${formData.keyspace}&cluster=${formData.clusterID}`,
+ });
+ },
+ onError: () => {
+ setErrorDialogOpen(true);
+ },
+ }
+ );
+
+ let selectedCluster = null;
+ if (!!formData.clusterID) {
+ selectedCluster = clusters.find((c) => c.id === formData.clusterID);
+ }
+
+ let selectedKeyspace = null;
+ if (!!formData.keyspace) {
+ selectedKeyspace = keyspaces.find((ks) => ks.keyspace?.name === formData.keyspace);
+ }
+
+ const isValid = !!selectedCluster && !!formData.keyspace && !!formData.sql && !!formData.ddlStrategy;
+
+ const isDisabled = !isValid || mutation.isLoading;
+
+ const onSubmit: React.FormEventHandler = (e) => {
+ e.preventDefault();
+ mutation.mutate();
+ };
+
+ useEffect(() => {
+ // Clear out the selected keyspaces if selected cluster is changed.
+ setFormData((prevFormData) => ({ ...prevFormData, keyspace: '' }));
+ setClusterKeyspaces(keyspaces.filter((ks) => ks.cluster?.id === formData.clusterID));
+ }, [formData.clusterID, keyspaces]);
+
+ useEffect(() => {
+ if (clusters.length === 1) {
+ setFormData((prevFormData) => ({ ...prevFormData, clusterID: clusters[0].id }));
+ }
+ }, [clusters]);
+
+ return (
+
+
+
+ Schema Migrations
+
+
+ Create Schema Migration Request
+
+
+
+
+
+ {mutation.isError && !mutation.isLoading && (
+ {
+ setErrorDialogOpen(false);
+ }}
+ />
+ )}
+
+
+ );
+};
diff --git a/web/vtadmin/src/components/routes/createWorkflow/CreateMaterialize.tsx b/web/vtadmin/src/components/routes/createWorkflow/CreateMaterialize.tsx
index c5d688a1fb7..81447cd0e6d 100644
--- a/web/vtadmin/src/components/routes/createWorkflow/CreateMaterialize.tsx
+++ b/web/vtadmin/src/components/routes/createWorkflow/CreateMaterialize.tsx
@@ -31,7 +31,7 @@ import Toggle from '../../toggle/Toggle';
import { tabletmanagerdata, vtadmin, vtctldata } from '../../../proto/vtadmin';
import { MultiSelect } from '../../inputs/MultiSelect';
import { TABLET_TYPES } from '../../../util/tablets';
-import ErrorDialog from './ErrorDialog';
+import ErrorDialog from '../../dialog/ErrorDialog';
interface FormData {
clusterID: string;
diff --git a/web/vtadmin/src/components/routes/createWorkflow/CreateMoveTables.tsx b/web/vtadmin/src/components/routes/createWorkflow/CreateMoveTables.tsx
index bca84cda4fa..1852d85b848 100644
--- a/web/vtadmin/src/components/routes/createWorkflow/CreateMoveTables.tsx
+++ b/web/vtadmin/src/components/routes/createWorkflow/CreateMoveTables.tsx
@@ -31,7 +31,7 @@ import Toggle from '../../toggle/Toggle';
import { vtadmin } from '../../../proto/vtadmin';
import { MultiSelect } from '../../inputs/MultiSelect';
import { TABLET_TYPES } from '../../../util/tablets';
-import ErrorDialog from './ErrorDialog';
+import ErrorDialog from '../../dialog/ErrorDialog';
interface FormData {
clusterID: string;
diff --git a/web/vtadmin/src/components/routes/createWorkflow/CreateReshard.tsx b/web/vtadmin/src/components/routes/createWorkflow/CreateReshard.tsx
index 4977c59e46b..05a33825174 100644
--- a/web/vtadmin/src/components/routes/createWorkflow/CreateReshard.tsx
+++ b/web/vtadmin/src/components/routes/createWorkflow/CreateReshard.tsx
@@ -31,7 +31,7 @@ import Toggle from '../../toggle/Toggle';
import { tabletmanagerdata, vtadmin } from '../../../proto/vtadmin';
import { MultiSelect } from '../../inputs/MultiSelect';
import { TABLET_TYPES } from '../../../util/tablets';
-import ErrorDialog from './ErrorDialog';
+import ErrorDialog from '../../dialog/ErrorDialog';
interface FormData {
clusterID: string;
diff --git a/web/vtadmin/src/hooks/api.ts b/web/vtadmin/src/hooks/api.ts
index 435f3855ea9..c2e1209f128 100644
--- a/web/vtadmin/src/hooks/api.ts
+++ b/web/vtadmin/src/hooks/api.ts
@@ -95,6 +95,8 @@ import {
showVDiff,
ShowVDiffParams,
createMaterialize,
+ fetchSchemaMigrations,
+ applySchema,
} from '../api/http';
import { vtadmin as pb, vtctldata } from '../proto/vtadmin';
import { formatAlias } from '../util/tablets';
@@ -796,3 +798,25 @@ export const useShowVDiff = (
) => {
return useQuery(['vdiff_show', params], () => showVDiff(params), { ...options });
};
+
+/**
+ * useSchemaMigrations is a query hook that fetches schema migrations.
+ */
+export const useSchemaMigrations = (
+ request: pb.IGetSchemaMigrationsRequest,
+ options?: UseQueryOptions | undefined
+) => {
+ return useQuery(['migrations', request], () => fetchSchemaMigrations(request), { ...options });
+};
+
+/**
+ * useApplySchema is a mutation query hook that creates ApplySchema request.
+ */
+export const useApplySchema = (
+ params: Parameters[0],
+ options: UseMutationOptions>, Error>
+) => {
+ return useMutation>, Error>(() => {
+ return applySchema(params);
+ }, options);
+};
diff --git a/web/vtadmin/src/proto/vtadmin.d.ts b/web/vtadmin/src/proto/vtadmin.d.ts
index 9e11f5f37d9..7b921fc034a 100644
--- a/web/vtadmin/src/proto/vtadmin.d.ts
+++ b/web/vtadmin/src/proto/vtadmin.d.ts
@@ -3720,6 +3720,12 @@ export namespace vtadmin {
/** ApplySchemaRequest cluster_id */
cluster_id?: (string|null);
+ /** ApplySchemaRequest sql */
+ sql?: (string|null);
+
+ /** ApplySchemaRequest caller_id */
+ caller_id?: (string|null);
+
/** ApplySchemaRequest request */
request?: (vtctldata.IApplySchemaRequest|null);
}
@@ -3736,6 +3742,12 @@ export namespace vtadmin {
/** ApplySchemaRequest cluster_id. */
public cluster_id: string;
+ /** ApplySchemaRequest sql. */
+ public sql: string;
+
+ /** ApplySchemaRequest caller_id. */
+ public caller_id: string;
+
/** ApplySchemaRequest request. */
public request?: (vtctldata.IApplySchemaRequest|null);
@@ -47923,6 +47935,9 @@ export namespace replicationdata {
/** Status replication_lag_unknown */
replication_lag_unknown?: (boolean|null);
+
+ /** Status backup_running */
+ backup_running?: (boolean|null);
}
/** Represents a Status. */
@@ -48000,6 +48015,9 @@ export namespace replicationdata {
/** Status replication_lag_unknown. */
public replication_lag_unknown: boolean;
+ /** Status backup_running. */
+ public backup_running: boolean;
+
/**
* Creates a new Status instance using the specified properties.
* @param [properties] Properties to set
diff --git a/web/vtadmin/src/proto/vtadmin.js b/web/vtadmin/src/proto/vtadmin.js
index d0ce20ccfc2..e5bcf766b40 100644
--- a/web/vtadmin/src/proto/vtadmin.js
+++ b/web/vtadmin/src/proto/vtadmin.js
@@ -7838,6 +7838,8 @@ export const vtadmin = $root.vtadmin = (() => {
* @memberof vtadmin
* @interface IApplySchemaRequest
* @property {string|null} [cluster_id] ApplySchemaRequest cluster_id
+ * @property {string|null} [sql] ApplySchemaRequest sql
+ * @property {string|null} [caller_id] ApplySchemaRequest caller_id
* @property {vtctldata.IApplySchemaRequest|null} [request] ApplySchemaRequest request
*/
@@ -7864,6 +7866,22 @@ export const vtadmin = $root.vtadmin = (() => {
*/
ApplySchemaRequest.prototype.cluster_id = "";
+ /**
+ * ApplySchemaRequest sql.
+ * @member {string} sql
+ * @memberof vtadmin.ApplySchemaRequest
+ * @instance
+ */
+ ApplySchemaRequest.prototype.sql = "";
+
+ /**
+ * ApplySchemaRequest caller_id.
+ * @member {string} caller_id
+ * @memberof vtadmin.ApplySchemaRequest
+ * @instance
+ */
+ ApplySchemaRequest.prototype.caller_id = "";
+
/**
* ApplySchemaRequest request.
* @member {vtctldata.IApplySchemaRequest|null|undefined} request
@@ -7898,8 +7916,12 @@ export const vtadmin = $root.vtadmin = (() => {
writer = $Writer.create();
if (message.cluster_id != null && Object.hasOwnProperty.call(message, "cluster_id"))
writer.uint32(/* id 1, wireType 2 =*/10).string(message.cluster_id);
+ if (message.sql != null && Object.hasOwnProperty.call(message, "sql"))
+ writer.uint32(/* id 2, wireType 2 =*/18).string(message.sql);
+ if (message.caller_id != null && Object.hasOwnProperty.call(message, "caller_id"))
+ writer.uint32(/* id 3, wireType 2 =*/26).string(message.caller_id);
if (message.request != null && Object.hasOwnProperty.call(message, "request"))
- $root.vtctldata.ApplySchemaRequest.encode(message.request, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim();
+ $root.vtctldata.ApplySchemaRequest.encode(message.request, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim();
return writer;
};
@@ -7939,6 +7961,14 @@ export const vtadmin = $root.vtadmin = (() => {
break;
}
case 2: {
+ message.sql = reader.string();
+ break;
+ }
+ case 3: {
+ message.caller_id = reader.string();
+ break;
+ }
+ case 4: {
message.request = $root.vtctldata.ApplySchemaRequest.decode(reader, reader.uint32());
break;
}
@@ -7980,6 +8010,12 @@ export const vtadmin = $root.vtadmin = (() => {
if (message.cluster_id != null && message.hasOwnProperty("cluster_id"))
if (!$util.isString(message.cluster_id))
return "cluster_id: string expected";
+ if (message.sql != null && message.hasOwnProperty("sql"))
+ if (!$util.isString(message.sql))
+ return "sql: string expected";
+ if (message.caller_id != null && message.hasOwnProperty("caller_id"))
+ if (!$util.isString(message.caller_id))
+ return "caller_id: string expected";
if (message.request != null && message.hasOwnProperty("request")) {
let error = $root.vtctldata.ApplySchemaRequest.verify(message.request);
if (error)
@@ -8002,6 +8038,10 @@ export const vtadmin = $root.vtadmin = (() => {
let message = new $root.vtadmin.ApplySchemaRequest();
if (object.cluster_id != null)
message.cluster_id = String(object.cluster_id);
+ if (object.sql != null)
+ message.sql = String(object.sql);
+ if (object.caller_id != null)
+ message.caller_id = String(object.caller_id);
if (object.request != null) {
if (typeof object.request !== "object")
throw TypeError(".vtadmin.ApplySchemaRequest.request: object expected");
@@ -8025,10 +8065,16 @@ export const vtadmin = $root.vtadmin = (() => {
let object = {};
if (options.defaults) {
object.cluster_id = "";
+ object.sql = "";
+ object.caller_id = "";
object.request = null;
}
if (message.cluster_id != null && message.hasOwnProperty("cluster_id"))
object.cluster_id = message.cluster_id;
+ if (message.sql != null && message.hasOwnProperty("sql"))
+ object.sql = message.sql;
+ if (message.caller_id != null && message.hasOwnProperty("caller_id"))
+ object.caller_id = message.caller_id;
if (message.request != null && message.hasOwnProperty("request"))
object.request = $root.vtctldata.ApplySchemaRequest.toObject(message.request, options);
return object;
@@ -116245,6 +116291,7 @@ export const replicationdata = $root.replicationdata = (() => {
* @property {boolean|null} [has_replication_filters] Status has_replication_filters
* @property {boolean|null} [ssl_allowed] Status ssl_allowed
* @property {boolean|null} [replication_lag_unknown] Status replication_lag_unknown
+ * @property {boolean|null} [backup_running] Status backup_running
*/
/**
@@ -116438,6 +116485,14 @@ export const replicationdata = $root.replicationdata = (() => {
*/
Status.prototype.replication_lag_unknown = false;
+ /**
+ * Status backup_running.
+ * @member {boolean} backup_running
+ * @memberof replicationdata.Status
+ * @instance
+ */
+ Status.prototype.backup_running = false;
+
/**
* Creates a new Status instance using the specified properties.
* @function create
@@ -116506,6 +116561,8 @@ export const replicationdata = $root.replicationdata = (() => {
writer.uint32(/* id 23, wireType 0 =*/184).bool(message.ssl_allowed);
if (message.replication_lag_unknown != null && Object.hasOwnProperty.call(message, "replication_lag_unknown"))
writer.uint32(/* id 24, wireType 0 =*/192).bool(message.replication_lag_unknown);
+ if (message.backup_running != null && Object.hasOwnProperty.call(message, "backup_running"))
+ writer.uint32(/* id 25, wireType 0 =*/200).bool(message.backup_running);
return writer;
};
@@ -116628,6 +116685,10 @@ export const replicationdata = $root.replicationdata = (() => {
message.replication_lag_unknown = reader.bool();
break;
}
+ case 25: {
+ message.backup_running = reader.bool();
+ break;
+ }
default:
reader.skipType(tag & 7);
break;
@@ -116729,6 +116790,9 @@ export const replicationdata = $root.replicationdata = (() => {
if (message.replication_lag_unknown != null && message.hasOwnProperty("replication_lag_unknown"))
if (typeof message.replication_lag_unknown !== "boolean")
return "replication_lag_unknown: boolean expected";
+ if (message.backup_running != null && message.hasOwnProperty("backup_running"))
+ if (typeof message.backup_running !== "boolean")
+ return "backup_running: boolean expected";
return null;
};
@@ -116788,6 +116852,8 @@ export const replicationdata = $root.replicationdata = (() => {
message.ssl_allowed = Boolean(object.ssl_allowed);
if (object.replication_lag_unknown != null)
message.replication_lag_unknown = Boolean(object.replication_lag_unknown);
+ if (object.backup_running != null)
+ message.backup_running = Boolean(object.backup_running);
return message;
};
@@ -116827,6 +116893,7 @@ export const replicationdata = $root.replicationdata = (() => {
object.has_replication_filters = false;
object.ssl_allowed = false;
object.replication_lag_unknown = false;
+ object.backup_running = false;
}
if (message.position != null && message.hasOwnProperty("position"))
object.position = message.position;
@@ -116872,6 +116939,8 @@ export const replicationdata = $root.replicationdata = (() => {
object.ssl_allowed = message.ssl_allowed;
if (message.replication_lag_unknown != null && message.hasOwnProperty("replication_lag_unknown"))
object.replication_lag_unknown = message.replication_lag_unknown;
+ if (message.backup_running != null && message.hasOwnProperty("backup_running"))
+ object.backup_running = message.backup_running;
return object;
};
diff --git a/web/vtadmin/src/util/schemaMigrations.ts b/web/vtadmin/src/util/schemaMigrations.ts
new file mode 100644
index 00000000000..c405c4dbecf
--- /dev/null
+++ b/web/vtadmin/src/util/schemaMigrations.ts
@@ -0,0 +1,31 @@
+/**
+ * Copyright 2024 The Vitess Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import { invertBy } from 'lodash-es';
+import { vtctldata } from '../proto/vtadmin';
+
+/**
+ * SCHEMA_MIGRATION_STATUS maps numeric schema migration status back to human readable strings.
+ */
+export const SCHEMA_MIGRATION_STATUS = Object.entries(invertBy(vtctldata.SchemaMigration.Status)).reduce(
+ (acc, [k, vs]) => {
+ acc[k] = vs[0];
+ return acc;
+ },
+ {} as { [k: string]: string }
+);
+
+export const formatSchemaMigrationStatus = (schemaMigration: vtctldata.ISchemaMigration) =>
+ schemaMigration.status && SCHEMA_MIGRATION_STATUS[schemaMigration.status];