From 0fd27cb533911cebc425ebb0fd90ea56aaee97a8 Mon Sep 17 00:00:00 2001 From: Jeremy Cole Date: Wed, 11 Jan 2023 16:09:21 -0800 Subject: [PATCH 01/12] Add new placement Vindex strategy Signed-off-by: Jeremy Cole Sprinkle addPadding everywhere when comparing KeyRange Start/End values Implement support for PartialVindex usage Signed-off-by: Jeremy Cole --- go/vt/key/key.go | 22 +- go/vt/vtgate/vindexes/placement.go | 224 +++++++++++++++ go/vt/vtgate/vindexes/placement_test.go | 354 ++++++++++++++++++++++++ 3 files changed, 589 insertions(+), 11 deletions(-) create mode 100644 go/vt/vtgate/vindexes/placement.go create mode 100644 go/vt/vtgate/vindexes/placement_test.go diff --git a/go/vt/key/key.go b/go/vt/key/key.go index fc603554ecf..6617be18818 100644 --- a/go/vt/key/key.go +++ b/go/vt/key/key.go @@ -113,10 +113,10 @@ func KeyRangeAdd(first, second *topodatapb.KeyRange) (*topodatapb.KeyRange, bool if first == nil || second == nil { return nil, false } - if len(first.End) != 0 && bytes.Equal(first.End, second.Start) { + if len(first.End) != 0 && bytes.Equal(addPadding(first.End), addPadding(second.Start)) { return &topodatapb.KeyRange{Start: first.Start, End: second.End}, true } - if len(second.End) != 0 && bytes.Equal(second.End, first.Start) { + if len(second.End) != 0 && bytes.Equal(addPadding(second.End), addPadding(first.Start)) { return &topodatapb.KeyRange{Start: second.Start, End: first.End}, true } return nil, false @@ -127,8 +127,8 @@ func KeyRangeContains(kr *topodatapb.KeyRange, id []byte) bool { if kr == nil { return true } - return bytes.Compare(kr.Start, id) <= 0 && - (len(kr.End) == 0 || bytes.Compare(id, kr.End) < 0) + return bytes.Compare(addPadding(kr.Start), addPadding(id)) <= 0 && + (len(kr.End) == 0 || bytes.Compare(addPadding(id), addPadding(kr.End)) < 0) } // ParseKeyRangeParts parses a start and end hex values and build a proto KeyRange @@ -202,7 +202,7 @@ func KeyRangeStartSmaller(left, right *topodatapb.KeyRange) bool { if right == nil { return false } - return bytes.Compare(left.Start, right.Start) < 0 + return bytes.Compare(addPadding(left.Start), addPadding(right.Start)) < 0 } // KeyRangeStartEqual returns true if both key ranges have the same start @@ -250,8 +250,8 @@ func KeyRangesIntersect(first, second *topodatapb.KeyRange) bool { if first == nil || second == nil { return true } - return (len(first.End) == 0 || bytes.Compare(second.Start, first.End) < 0) && - (len(second.End) == 0 || bytes.Compare(first.Start, second.End) < 0) + return (len(first.End) == 0 || bytes.Compare(addPadding(second.Start), addPadding(first.End)) < 0) && + (len(second.End) == 0 || bytes.Compare(addPadding(first.Start), addPadding(second.End)) < 0) } // KeyRangesOverlap returns the overlap between two KeyRanges. @@ -270,14 +270,14 @@ func KeyRangesOverlap(first, second *topodatapb.KeyRange) (*topodatapb.KeyRange, // start with (a,b) result := proto.Clone(first).(*topodatapb.KeyRange) // if c > a, then use c - if bytes.Compare(second.Start, first.Start) > 0 { + if bytes.Compare(addPadding(second.Start), addPadding(first.Start)) > 0 { result.Start = second.Start } // if b is maxed out, or // (d is not maxed out and d < b) // ^ valid test as neither b nor d are max // then use d - if len(first.End) == 0 || (len(second.End) != 0 && bytes.Compare(second.End, first.End) < 0) { + if len(first.End) == 0 || (len(second.End) != 0 && bytes.Compare(addPadding(second.End), addPadding(first.End)) < 0) { result.End = second.End } return result, nil @@ -297,10 +297,10 @@ func KeyRangeIncludes(big, small *topodatapb.KeyRange) bool { return len(big.Start) == 0 && len(big.End) == 0 } // Now we check small.Start >= big.Start, and small.End <= big.End - if len(big.Start) != 0 && bytes.Compare(small.Start, big.Start) < 0 { + if len(big.Start) != 0 && bytes.Compare(addPadding(small.Start), addPadding(big.Start)) < 0 { return false } - if len(big.End) != 0 && (len(small.End) == 0 || bytes.Compare(small.End, big.End) > 0) { + if len(big.End) != 0 && (len(small.End) == 0 || bytes.Compare(addPadding(small.End), addPadding(big.End)) > 0) { return false } return true diff --git a/go/vt/vtgate/vindexes/placement.go b/go/vt/vtgate/vindexes/placement.go new file mode 100644 index 00000000000..4382bc95c83 --- /dev/null +++ b/go/vt/vtgate/vindexes/placement.go @@ -0,0 +1,224 @@ +/* +Copyright 2022 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* + +A Vindex which uses a mapping lookup table `placement_map` to set the first `placement_prefix_bytes` of the Keyspace ID +and another Vindex type `placement_sub_vindex_type` (which must support Hashing) as a sub-Vindex to set the rest. +This is suitable for regional sharding (like region_json or region_experimental) but does not require a mapping file, +and can support non-integer types for the sharding column. All parameters are prefixed with `placement_` so as to avoid +conflict, because the `params` map is passed to the sub-Vindex as well. + +*/ + +package vindexes + +import ( + "bytes" + "context" + "encoding/binary" + "fmt" + "strconv" + "strings" + + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" + + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/key" +) + +var ( + _ MultiColumn = (*Placement)(nil) + + PlacementRequiredParams = []string{ + "placement_map", + "placement_prefix_bytes", + "placement_sub_vindex_type", + } +) + +func init() { + Register("placement", NewPlacement) +} + +type PlacementMap map[string]uint64 + +type Placement struct { + name string + placementMap PlacementMap + subVindex Vindex + subVindexType string + subVindexName string + prefixBytes int +} + +// Parse a string containing a list of delimited string:integer key-value pairs, e.g. "foo:1,bar:2". +func parsePlacementMap(s string) (*PlacementMap, error) { + placementMap := make(PlacementMap) + for _, entry := range strings.Split(s, ",") { + if entry == "" { + continue + } + + kv := strings.Split(entry, ":") + if len(kv) != 2 { + return nil, fmt.Errorf("entry: %v; expected key:value", entry) + } + if kv[0] == "" { + return nil, fmt.Errorf("entry: %v; unexpected empty key", entry) + } + if kv[1] == "" { + return nil, fmt.Errorf("entry: %v; unexpected empty value", entry) + } + + value, err := strconv.ParseUint(kv[1], 0, 64) + if err != nil { + return nil, fmt.Errorf("entry: %v; %v", entry, err) + } + placementMap[kv[0]] = value + } + return &placementMap, nil +} + +func NewPlacement(name string, params map[string]string) (Vindex, error) { + var missingParams []string + for _, param := range PlacementRequiredParams { + if params[param] == "" { + missingParams = append(missingParams, param) + } + } + + if len(missingParams) > 0 { + return nil, fmt.Errorf("missing params: %s", strings.Join(missingParams, ", ")) + } + + placementMap, parseError := parsePlacementMap(params["placement_map"]) + if parseError != nil { + return nil, fmt.Errorf("malformed placement_map; %v", parseError) + } + + prefixBytes, prefixError := strconv.Atoi(params["placement_prefix_bytes"]) + if prefixError != nil { + return nil, prefixError + } + + if prefixBytes < 1 || prefixBytes > 7 { + return nil, fmt.Errorf("invalid placement_prefix_bytes: %v; expected integer between 1 and 7", prefixBytes) + } + + subVindexType := params["placement_sub_vindex_type"] + subVindexName := fmt.Sprintf("%s_sub_vindex", name) + subVindex, createVindexError := CreateVindex(subVindexType, subVindexName, params) + if createVindexError != nil { + return nil, fmt.Errorf("invalid placement_sub_vindex_type: %v", createVindexError) + } + + // TODO: Should we support MultiColumn Vindex? + if _, subVindexSupportsHashing := subVindex.(Hashing); !subVindexSupportsHashing { + return nil, fmt.Errorf("invalid placement_sub_vindex_type: %v; does not support the Hashing interface", createVindexError) + } + + return &Placement{ + name: name, + placementMap: *placementMap, + subVindex: subVindex, + subVindexType: subVindexType, + subVindexName: subVindexName, + prefixBytes: prefixBytes, + }, nil +} + +func (p *Placement) String() string { + return p.name +} + +func (p *Placement) Cost() int { + return 1 +} + +func (p *Placement) IsUnique() bool { + return true +} + +func (p *Placement) NeedsVCursor() bool { + return false +} + +func (p *Placement) PartialVindex() bool { + return true +} + +func makeDestinationPrefix(value uint64, prefixBytes int) []byte { + destinationPrefix := make([]byte, 8) + binary.BigEndian.PutUint64(destinationPrefix, value) + if prefixBytes < 8 { + // Shorten the prefix to the desired length. + destinationPrefix = destinationPrefix[(8 - prefixBytes):] + } + + return destinationPrefix +} + +func (p *Placement) Map(ctx context.Context, vcursor VCursor, rowsColValues [][]sqltypes.Value) ([]key.Destination, error) { + destinations := make([]key.Destination, 0, len(rowsColValues)) + + for _, row := range rowsColValues { + if len(row) != 1 && len(row) != 2 { + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "wrong number of column values were passed: expected 1-2, got %d", len(row)) + } + + // Calculate the destination prefix from the placement key which will be the same whether this is a partial + // or full usage of the Vindex. + placementKey := row[0].ToString() + placementDestinationValue, placementMappingFound := p.placementMap[placementKey] + if !placementMappingFound { + destinations = append(destinations, key.DestinationNone{}) + continue + } + + placementDestinationPrefix := makeDestinationPrefix(placementDestinationValue, p.prefixBytes) + + if len(row) == 1 { // Partial Vindex usage with only the placement column provided. + destinations = append(destinations, NewKeyRangeFromPrefix(placementDestinationPrefix)) + } else if len(row) == 2 { // Full Vindex usage with the placement column and subVindex column provided. + subVindexValue, hashingError := p.subVindex.(Hashing).Hash(row[1]) + if hashingError != nil { + return nil, hashingError // TODO: Should we be less fatal here and use DestinationNone? + } + + // Concatenate and add to destinations. + rowDestination := append(placementDestinationPrefix, subVindexValue...) + destinations = append(destinations, key.DestinationKeyspaceID(rowDestination[0:8])) + } + } + + return destinations, nil +} + +func (p *Placement) Verify(ctx context.Context, vcursor VCursor, rowsColValues [][]sqltypes.Value, keyspaceIDs [][]byte) ([]bool, error) { + result := make([]bool, len(rowsColValues)) + destinations, _ := p.Map(ctx, vcursor, rowsColValues) + for i, destination := range destinations { + switch d := destination.(type) { + case key.DestinationKeyspaceID: + result[i] = bytes.Equal(d, keyspaceIDs[i]) + default: + result[i] = false + } + } + return result, nil +} diff --git a/go/vt/vtgate/vindexes/placement_test.go b/go/vt/vtgate/vindexes/placement_test.go new file mode 100644 index 00000000000..c590f1de3e8 --- /dev/null +++ b/go/vt/vtgate/vindexes/placement_test.go @@ -0,0 +1,354 @@ +/* +Copyright 2022 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vindexes + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/key" +) + +func createBasicPlacementVindex(t *testing.T) (Vindex, error) { + return CreateVindex("placement", "placement", map[string]string{ + "table": "t", + "from": "f1,f2", + "to": "toc", + "placement_prefix_bytes": "1", + "placement_map": "foo:1,bar:2", + "placement_sub_vindex_type": "xxhash", + }) +} + +func TestPlacementName(t *testing.T) { + vindex, err := createBasicPlacementVindex(t) + require.NoError(t, err) + assert.Equal(t, "placement", vindex.String()) +} + +func TestPlacementCost(t *testing.T) { + vindex, err := createBasicPlacementVindex(t) + require.NoError(t, err) + assert.Equal(t, 1, vindex.Cost()) +} + +func TestPlacementIsUnique(t *testing.T) { + vindex, err := createBasicPlacementVindex(t) + require.NoError(t, err) + assert.True(t, vindex.IsUnique()) +} + +func TestPlacementNeedsVCursor(t *testing.T) { + vindex, err := createBasicPlacementVindex(t) + require.NoError(t, err) + assert.False(t, vindex.NeedsVCursor()) +} + +func TestPlacementNoParams(t *testing.T) { + _, err := CreateVindex("placement", "placement", nil) + assert.EqualError(t, err, "missing params: placement_map, placement_prefix_bytes, placement_sub_vindex_type") +} + +func TestPlacementPlacementMapMissing(t *testing.T) { + _, err := CreateVindex("placement", "placement", map[string]string{ + "placement_prefix_bytes": "1", + "placement_sub_vindex_type": "hash", + }) + assert.EqualError(t, err, "missing params: placement_map") +} + +func TestPlacementPlacementMapMalformedMap(t *testing.T) { + _, err := CreateVindex("placement", "placement", map[string]string{ + "placement_map": "xyz", + "placement_prefix_bytes": "1", + "placement_sub_vindex_type": "hash", + }) + assert.EqualError(t, err, "malformed placement_map; entry: xyz; expected key:value") +} + +func TestPlacementPlacementMapMissingKey(t *testing.T) { + _, err := CreateVindex("placement", "placement", map[string]string{ + "placement_map": "abc:1,:2,ghi:3", + "placement_prefix_bytes": "1", + "placement_sub_vindex_type": "hash", + }) + assert.EqualError(t, err, "malformed placement_map; entry: :2; unexpected empty key") +} + +func TestPlacementPlacementMapMissingValue(t *testing.T) { + _, err := CreateVindex("placement", "placement", map[string]string{ + "placement_map": "abc:1,def:,ghi:3", + "placement_prefix_bytes": "1", + "placement_sub_vindex_type": "hash", + }) + assert.EqualError(t, err, "malformed placement_map; entry: def:; unexpected empty value") +} + +func TestPlacementPlacementMapMalformedValue(t *testing.T) { + _, err := CreateVindex("placement", "placement", map[string]string{ + "placement_map": "abc:xyz", + "placement_prefix_bytes": "1", + "placement_sub_vindex_type": "hash", + }) + assert.EqualError(t, err, "malformed placement_map; entry: abc:xyz; strconv.ParseUint: parsing \"xyz\": invalid syntax") +} + +func TestPlacementPrefixBytesMissing(t *testing.T) { + _, err := CreateVindex("placement", "placement", map[string]string{ + "placement_map": "foo:1,bar:2", + "placement_sub_vindex_type": "hash", + }) + assert.EqualError(t, err, "missing params: placement_prefix_bytes") +} + +func TestPlacementPrefixBytesTooLow(t *testing.T) { + _, err := CreateVindex("placement", "placement", map[string]string{ + "placement_map": "foo:1,bar:2", + "placement_prefix_bytes": "0", + "placement_sub_vindex_type": "hash", + }) + assert.EqualError(t, err, "invalid placement_prefix_bytes: 0; expected integer between 1 and 7") +} + +func TestPlacementPrefixBytesTooHigh(t *testing.T) { + _, err := CreateVindex("placement", "placement", map[string]string{ + "placement_map": "foo:1,bar:2", + "placement_prefix_bytes": "17", + "placement_sub_vindex_type": "hash", + }) + assert.EqualError(t, err, "invalid placement_prefix_bytes: 17; expected integer between 1 and 7") +} + +func TestPlacementSubVindexTypeMissing(t *testing.T) { + _, err := CreateVindex("placement", "placement", map[string]string{ + "placement_map": "foo:1,bar:2", + "placement_prefix_bytes": "1", + }) + assert.EqualError(t, err, "missing params: placement_sub_vindex_type") +} + +func TestPlacementSubVindexTypeIncorrect(t *testing.T) { + _, err := CreateVindex("placement", "placement", map[string]string{ + "placement_map": "foo:1,bar:2", + "placement_prefix_bytes": "1", + "placement_sub_vindex_type": "doesnotexist", + }) + assert.EqualError(t, err, "invalid placement_sub_vindex_type: vindexType \"doesnotexist\" not found") +} + +func TestPlacementMapSqlTypeVarChar(t *testing.T) { + vindex, err := CreateVindex("placement", "placement", map[string]string{ + "table": "t", + "from": "f1,f2", + "to": "toc", + "placement_prefix_bytes": "1", + "placement_map": "foo:1,bar:2", + "placement_sub_vindex_type": "xxhash", + }) + assert.NoError(t, err) + actualDestinations, err := vindex.(MultiColumn).Map(context.Background(), nil, [][]sqltypes.Value{{ + sqltypes.NewVarChar("foo"), sqltypes.NewVarChar("hello world"), + }, { + sqltypes.NewVarChar("bar"), sqltypes.NewVarChar("hello world"), + }, { + sqltypes.NewVarChar("xyz"), sqltypes.NewVarChar("hello world"), + }}) + assert.NoError(t, err) + + expectedDestinations := []key.Destination{ + key.DestinationKeyspaceID{0x01, 0x68, 0x69, 0x1e, 0xb2, 0x34, 0x67, 0xab}, + key.DestinationKeyspaceID{0x02, 0x68, 0x69, 0x1e, 0xb2, 0x34, 0x67, 0xab}, + key.DestinationNone{}, + } + assert.Equal(t, expectedDestinations, actualDestinations) +} + +func TestPlacementMapSqlTypeInt64(t *testing.T) { + vindex, err := CreateVindex("placement", "placement", map[string]string{ + "table": "t", + "from": "f1,f2", + "to": "toc", + "placement_prefix_bytes": "1", + "placement_map": "foo:1,bar:2", + "placement_sub_vindex_type": "xxhash", + }) + assert.NoError(t, err) + actualDestinations, err := vindex.(MultiColumn).Map(context.Background(), nil, [][]sqltypes.Value{{ + sqltypes.NewVarChar("foo"), sqltypes.NewInt64(1), + }, { + sqltypes.NewVarChar("bar"), sqltypes.NewInt64(1), + }, { + sqltypes.NewVarChar("xyz"), sqltypes.NewInt64(1), + }}) + assert.NoError(t, err) + + expectedDestinations := []key.Destination{ + key.DestinationKeyspaceID{0x01, 0xd4, 0x64, 0x05, 0x36, 0x76, 0x12, 0xb4}, + key.DestinationKeyspaceID{0x02, 0xd4, 0x64, 0x05, 0x36, 0x76, 0x12, 0xb4}, + key.DestinationNone{}, + } + assert.Equal(t, expectedDestinations, actualDestinations) +} + +func TestPlacementMapWithHexValues(t *testing.T) { + vindex, err := CreateVindex("placement", "placement", map[string]string{ + "table": "t", + "from": "f1,f2", + "to": "toc", + "placement_prefix_bytes": "1", + "placement_map": "foo:0x01,bar:0x02", + "placement_sub_vindex_type": "xxhash", + }) + assert.NoError(t, err) + actualDestinations, err := vindex.(MultiColumn).Map(context.Background(), nil, [][]sqltypes.Value{{ + sqltypes.NewVarChar("foo"), sqltypes.NewInt64(1), + }, { + sqltypes.NewVarChar("bar"), sqltypes.NewInt64(1), + }, { + sqltypes.NewVarChar("xyz"), sqltypes.NewInt64(1), + }}) + assert.NoError(t, err) + + expectedDestinations := []key.Destination{ + key.DestinationKeyspaceID{0x01, 0xd4, 0x64, 0x05, 0x36, 0x76, 0x12, 0xb4}, + key.DestinationKeyspaceID{0x02, 0xd4, 0x64, 0x05, 0x36, 0x76, 0x12, 0xb4}, + key.DestinationNone{}, + } + assert.Equal(t, expectedDestinations, actualDestinations) +} + +func TestPlacementMapMultiplePrefixBytes(t *testing.T) { + vindex, err := CreateVindex("placement", "placement", map[string]string{ + "table": "t", + "from": "f1,f2", + "to": "toc", + "placement_prefix_bytes": "4", + "placement_map": "foo:1,bar:2", + "placement_sub_vindex_type": "xxhash", + }) + assert.NoError(t, err) + actualDestinations, err := vindex.(MultiColumn).Map(context.Background(), nil, [][]sqltypes.Value{{ + sqltypes.NewVarChar("foo"), sqltypes.NewInt64(1), + }, { + sqltypes.NewVarChar("bar"), sqltypes.NewInt64(1), + }, { + sqltypes.NewVarChar("xyz"), sqltypes.NewInt64(1), + }}) + assert.NoError(t, err) + + expectedDestinations := []key.Destination{ + key.DestinationKeyspaceID{0x00, 0x00, 0x00, 0x01, 0xd4, 0x64, 0x05, 0x36}, + key.DestinationKeyspaceID{0x00, 0x00, 0x00, 0x02, 0xd4, 0x64, 0x05, 0x36}, + key.DestinationNone{}, + } + assert.Equal(t, expectedDestinations, actualDestinations) +} + +func TestPlacementSubVindexNumeric(t *testing.T) { + vindex, err := CreateVindex("placement", "placement", map[string]string{ + "table": "t", + "from": "f1,f2", + "to": "toc", + "placement_prefix_bytes": "1", + "placement_map": "foo:1,bar:2", + "placement_sub_vindex_type": "numeric", + }) + assert.NoError(t, err) + actualDestinations, err := vindex.(MultiColumn).Map(context.Background(), nil, [][]sqltypes.Value{{ + sqltypes.NewVarChar("foo"), sqltypes.NewInt64(0x01234567deadbeef), + }, { + sqltypes.NewVarChar("bar"), sqltypes.NewInt64(0x01234567deadbeef), + }, { + sqltypes.NewVarChar("xyz"), sqltypes.NewInt64(0x01234567deadbeef), + }}) + assert.NoError(t, err) + + expectedDestinations := []key.Destination{ + key.DestinationKeyspaceID{0x01, 0x01, 0x23, 0x45, 0x67, 0xde, 0xad, 0xbe}, + key.DestinationKeyspaceID{0x02, 0x01, 0x23, 0x45, 0x67, 0xde, 0xad, 0xbe}, + key.DestinationNone{}, + } + assert.Equal(t, expectedDestinations, actualDestinations) +} + +func TestPlacementMapPrefixOnly(t *testing.T) { + vindex, err := CreateVindex("placement", "placement", map[string]string{ + "table": "t", + "from": "f1,f2", + "to": "toc", + "placement_prefix_bytes": "1", + "placement_map": "foo:1,bar:2", + "placement_sub_vindex_type": "xxhash", + }) + assert.NoError(t, err) + actualDestinations, err := vindex.(MultiColumn).Map(context.Background(), nil, [][]sqltypes.Value{{ + sqltypes.NewVarChar("foo"), + }, { + sqltypes.NewVarChar("bar"), + }, { + sqltypes.NewVarChar("xyz"), + }}) + assert.NoError(t, err) + + expectedDestinations := []key.Destination{ + NewKeyRangeFromPrefix([]byte{0x01}), + NewKeyRangeFromPrefix([]byte{0x02}), + key.DestinationNone{}, + } + assert.Equal(t, expectedDestinations, actualDestinations) +} + +func TestPlacementMapTooManyColumns(t *testing.T) { + vindex, err := CreateVindex("placement", "placement", map[string]string{ + "table": "t", + "from": "f1,f2", + "to": "toc", + "placement_prefix_bytes": "1", + "placement_map": "foo:1,bar:2", + "placement_sub_vindex_type": "xxhash", + }) + assert.NoError(t, err) + actualDestinations, err := vindex.(MultiColumn).Map(context.Background(), nil, [][]sqltypes.Value{{ + // Too many columns; expecting two, providing three. + sqltypes.NewVarChar("a"), sqltypes.NewVarChar("b"), sqltypes.NewVarChar("c"), + }}) + assert.EqualError(t, err, "wrong number of column values were passed: expected 1-2, got 3") + + assert.Nil(t, actualDestinations) +} + +func TestPlacementMapNoColumns(t *testing.T) { + vindex, err := CreateVindex("placement", "placement", map[string]string{ + "table": "t", + "from": "f1,f2", + "to": "toc", + "placement_prefix_bytes": "1", + "placement_map": "foo:1,bar:2", + "placement_sub_vindex_type": "xxhash", + }) + assert.NoError(t, err) + actualDestinations, err := vindex.(MultiColumn).Map(context.Background(), nil, [][]sqltypes.Value{{ + // Empty column list. + }}) + assert.EqualError(t, err, "wrong number of column values were passed: expected 1-2, got 0") + + assert.Nil(t, actualDestinations) +} From 330f36f9306641816b166c0679caf2ca95789d16 Mon Sep 17 00:00:00 2001 From: Jeremy Cole Date: Tue, 7 Feb 2023 15:30:12 -0800 Subject: [PATCH 02/12] Refactor and cleanup treatment of keyspace IDs and KeyRange Signed-off-by: Jeremy Cole Address internal review comments Signed-off-by: Jeremy Cole Fix apparent bug in KeyRangeContiguous when a or b are full-range Signed-off-by: Jeremy Cole Add test for bug in comparing "0003" vs "000300" Signed-off-by: Hormoz Kheradmand Remove trailing zeroes in key.Normalize instead of adding padding Signed-off-by: Jeremy Cole Address review feedback; test formatting, comments, function naming Signed-off-by: Jeremy Cole Refactor tests for TestKeyRangesIntersect Signed-off-by: Jeremy Cole Rename KeyRangesIntersect to KeyRangeIntersect for consistency Signed-off-by: Jeremy Cole Remove unused KeyRangesOverlap function Signed-off-by: Jeremy Cole Rename KeyRangeIncludes to KeyRangeContainsKeyRange, clean up and add tests Signed-off-by: Jeremy Cole --- go/test/endtoend/keyspace/keyspace_test.go | 7 +- go/vt/discovery/topology_watcher.go | 2 +- go/vt/key/destination.go | 6 +- go/vt/key/key.go | 413 +++--- go/vt/key/key_test.go | 1112 ++++++++++++++--- go/vt/topo/shard.go | 2 +- go/vt/topo/topoproto/srvkeyspace.go | 15 +- go/vt/topotools/split.go | 4 +- go/vt/topotools/split_test.go | 4 + go/vt/vtctl/workflow/stream_migrator.go | 2 +- .../tabletmanager/vdiff/workflow_differ.go | 2 +- .../vreplication/table_plan_builder.go | 2 +- .../tabletserver/vstreamer/uvstreamer.go | 2 +- go/vt/wrangler/materializer.go | 2 +- go/vt/wrangler/resharder.go | 2 +- go/vt/wrangler/traffic_switcher.go | 4 +- go/vt/wrangler/traffic_switcher_env_test.go | 4 +- go/vt/wrangler/vdiff.go | 2 +- 18 files changed, 1187 insertions(+), 400 deletions(-) diff --git a/go/test/endtoend/keyspace/keyspace_test.go b/go/test/endtoend/keyspace/keyspace_test.go index 82477dacee7..c8ff519910f 100644 --- a/go/test/endtoend/keyspace/keyspace_test.go +++ b/go/test/endtoend/keyspace/keyspace_test.go @@ -17,7 +17,6 @@ limitations under the License. package sequence import ( - "bytes" "encoding/binary" "encoding/json" "flag" @@ -25,6 +24,8 @@ import ( "strings" "testing" + "vitess.io/vitess/go/vt/key" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -392,8 +393,8 @@ func TestKeyspaceToShardName(t *testing.T) { shardKIDs := shardKIdMap[shardRef.Name] for _, kid := range shardKIDs { id = packKeyspaceID(kid) - assert.True(t, bytes.Compare(shardRef.KeyRange.Start, id) <= 0 && - (len(shardRef.KeyRange.End) == 0 || bytes.Compare(id, shardRef.KeyRange.End) < 0)) + assert.True(t, key.Compare(shardRef.KeyRange.Start, id) <= 0 && + (key.Empty(shardRef.KeyRange.End) || key.Compare(id, shardRef.KeyRange.End) < 0)) } } } diff --git a/go/vt/discovery/topology_watcher.go b/go/vt/discovery/topology_watcher.go index 5e5c54ab57a..8a4dcb3213e 100644 --- a/go/vt/discovery/topology_watcher.go +++ b/go/vt/discovery/topology_watcher.go @@ -362,7 +362,7 @@ func (fbs *FilterByShard) IsIncluded(tablet *topodata.Tablet) bool { // Exact match (probably a non-sharded keyspace). return true } - if kr != nil && c.keyRange != nil && key.KeyRangeIncludes(c.keyRange, kr) { + if kr != nil && c.keyRange != nil && key.KeyRangeContainsKeyRange(c.keyRange, kr) { // Our filter's KeyRange includes the provided KeyRange return true } diff --git a/go/vt/key/destination.go b/go/vt/key/destination.go index c4fb37b7a9d..437e980f480 100644 --- a/go/vt/key/destination.go +++ b/go/vt/key/destination.go @@ -128,7 +128,7 @@ func (d DestinationExactKeyRange) String() string { func processExactKeyRange(allShards []*topodatapb.ShardReference, kr *topodatapb.KeyRange, addShard func(shard string) error) error { sort.SliceStable(allShards, func(i, j int) bool { - return KeyRangeStartSmaller(allShards[i].GetKeyRange(), allShards[j].GetKeyRange()) + return KeyRangeLess(allShards[i].GetKeyRange(), allShards[j].GetKeyRange()) }) shardnum := 0 @@ -139,7 +139,7 @@ func processExactKeyRange(allShards []*topodatapb.ShardReference, kr *topodatapb shardnum++ } for shardnum < len(allShards) { - if !KeyRangesIntersect(kr, allShards[shardnum].KeyRange) { + if !KeyRangeIntersect(kr, allShards[shardnum].KeyRange) { // If we are over the requested keyrange, we // can stop now, we won't find more. break @@ -215,7 +215,7 @@ func (d DestinationKeyRange) String() string { func processKeyRange(allShards []*topodatapb.ShardReference, kr *topodatapb.KeyRange, addShard func(shard string) error) error { for _, shard := range allShards { - if !KeyRangesIntersect(kr, shard.KeyRange) { + if !KeyRangeIntersect(kr, shard.KeyRange) { // We don't need that shard. continue } diff --git a/go/vt/key/key.go b/go/vt/key/key.go index 6617be18818..dcdcda47f81 100644 --- a/go/vt/key/key.go +++ b/go/vt/key/key.go @@ -26,16 +26,18 @@ import ( "regexp" "strings" - "google.golang.org/protobuf/proto" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) +var ( + KeyRangePattern = regexp.MustCompile(`^(0|([0-9a-fA-F]{2})*-([0-9a-fA-F]{2})*)$`) +) + // // Uint64Key definitions // -// Uint64Key is a uint64 that can be converted into a KeyspaceId. +// Uint64Key is a uint64 that can be converted into a keyspace id. type Uint64Key uint64 func (i Uint64Key) String() string { @@ -49,261 +51,214 @@ func (i Uint64Key) Bytes() []byte { return buf } -// -// KeyRange helper methods -// +// Helper methods for keyspace id values. -// EvenShardsKeyRange returns a key range definition for a shard at index "i", -// assuming range based sharding with "n" equal-width shards in total. -// i starts at 0. -// -// Example: (1, 2) returns the second out of two shards in total i.e. "80-". -// -// This function must not be used in the Vitess code base because Vitess also -// supports shards with different widths. In that case, the output of this -// function would be wrong. -// -// Note: start and end values have trailing zero bytes omitted. -// For example, "80-" has only the first byte (0x80) set. -// We do this to produce the same KeyRange objects as ParseKeyRangeParts() does. -// Because it's using the Go hex methods, it's omitting trailing zero bytes as -// well. -func EvenShardsKeyRange(i, n int) (*topodatapb.KeyRange, error) { - if n <= 0 { - return nil, fmt.Errorf("the shard count must be > 0: %v", n) - } - if i >= n { - return nil, fmt.Errorf("the index of the shard must be less than the total number of shards: %v < %v", i, n) - } - if n&(n-1) != 0 { - return nil, fmt.Errorf("the shard count must be a power of two: %v", n) +// Normalize removes any trailing zero bytes from id. This allows two id values to be compared even if they are +// different lengths. +// From a key range perspective, -80 == 00-80 == 0000-8000 == 000000-800000, etc. and they should +// always be treated the same even if they are different lengths. +func Normalize(id []byte) []byte { + trailingZeroes := 0 + for i := len(id) - 1; i >= 0 && id[i] == 0x00; i-- { + trailingZeroes += 1 } - // Determine the number of bytes which are required to represent any - // KeyRange start or end for the given n. - // This is required to trim the returned values to the same length e.g. - // (256, 512) should return 8000-8080 as shard key range. - minBytes := 0 - for nn := Uint64Key(n - 1); nn > 0; nn >>= 8 { - minBytes++ - } + return id[:len(id)-trailingZeroes] +} - width := Uint64Key(math.MaxUint64)/Uint64Key(n) + 1 - start := Uint64Key(i) * width - end := start + width +// Compare compares two keyspace IDs while taking care to normalize them; returns -1 if ab, 0 if equal. +func Compare(a, b []byte) int { + return bytes.Compare(Normalize(a), Normalize(b)) +} - // Note: The byte value is empty if start or end is the min or the max - // respectively. - startBytes := start.Bytes()[:minBytes] - endBytes := end.Bytes()[:minBytes] - if start == 0 { - startBytes = []byte{} - } - if end == 0 { - // Always set the end except for the last shard. In that case, the - // end value (2^64) flows over and is the same as 0. - endBytes = []byte{} - } - return &topodatapb.KeyRange{Start: startBytes, End: endBytes}, nil +// Less returns true if a is less than b. +func Less(a, b []byte) bool { + return Compare(a, b) < 0 +} + +// Equal returns true if a is equal to b. +func Equal(a, b []byte) bool { + return Compare(a, b) == 0 +} + +// Empty returns true if id is an empty keyspace ID. +func Empty(id []byte) bool { + return len(Normalize(id)) == 0 } -// KeyRangeAdd adds two adjacent keyranges into a single value. -// If the values are not adjacent, it returns false. -func KeyRangeAdd(first, second *topodatapb.KeyRange) (*topodatapb.KeyRange, bool) { - if first == nil || second == nil { +// +// KeyRange helper methods +// + +// KeyRangeAdd adds two adjacent KeyRange values (in any order) into a single value. If the values are not adjacent, +// it returns false. +func KeyRangeAdd(a, b *topodatapb.KeyRange) (*topodatapb.KeyRange, bool) { + if a == nil || b == nil { return nil, false } - if len(first.End) != 0 && bytes.Equal(addPadding(first.End), addPadding(second.Start)) { - return &topodatapb.KeyRange{Start: first.Start, End: second.End}, true + if !Empty(a.End) && Equal(a.End, b.Start) { + return &topodatapb.KeyRange{Start: a.Start, End: b.End}, true } - if len(second.End) != 0 && bytes.Equal(addPadding(second.End), addPadding(first.Start)) { - return &topodatapb.KeyRange{Start: second.Start, End: first.End}, true + if !Empty(b.End) && Equal(b.End, a.Start) { + return &topodatapb.KeyRange{Start: b.Start, End: a.End}, true } return nil, false } // KeyRangeContains returns true if the provided id is in the keyrange. -func KeyRangeContains(kr *topodatapb.KeyRange, id []byte) bool { - if kr == nil { +func KeyRangeContains(keyRange *topodatapb.KeyRange, id []byte) bool { + if KeyRangeIsComplete(keyRange) { return true } - return bytes.Compare(addPadding(kr.Start), addPadding(id)) <= 0 && - (len(kr.End) == 0 || bytes.Compare(addPadding(id), addPadding(kr.End)) < 0) + return (Empty(keyRange.Start) || Compare(id, keyRange.Start) >= 0) && (Empty(keyRange.End) || Compare(id, keyRange.End) < 0) } -// ParseKeyRangeParts parses a start and end hex values and build a proto KeyRange +// ParseKeyRangeParts parses a Start and End as hex encoded strings and builds a proto KeyRange. func ParseKeyRangeParts(start, end string) (*topodatapb.KeyRange, error) { - s, err := hex.DecodeString(start) + startKey, err := hex.DecodeString(start) if err != nil { return nil, err } - e, err := hex.DecodeString(end) + endKey, err := hex.DecodeString(end) if err != nil { return nil, err } - return &topodatapb.KeyRange{Start: s, End: e}, nil + return &topodatapb.KeyRange{Start: startKey, End: endKey}, nil } -// KeyRangeString prints a topodatapb.KeyRange -func KeyRangeString(k *topodatapb.KeyRange) string { - if k == nil { +// KeyRangeString formats a topodatapb.KeyRange into a hex encoded string. +func KeyRangeString(keyRange *topodatapb.KeyRange) string { + if KeyRangeIsComplete(keyRange) { return "-" } - return hex.EncodeToString(k.Start) + "-" + hex.EncodeToString(k.End) + return hex.EncodeToString(keyRange.Start) + "-" + hex.EncodeToString(keyRange.End) } -// KeyRangeIsPartial returns true if the KeyRange does not cover the entire space. -func KeyRangeIsPartial(kr *topodatapb.KeyRange) bool { - if kr == nil { - return false +// KeyRangeStartCompare compares the Start of two KeyRange values using semantics unique to Start values where an +// empty Start means the *minimum* value; returns -1 if ab, 0 if equal. +func KeyRangeStartCompare(a, b *topodatapb.KeyRange) int { + aIsMinimum := a == nil || Empty(a.Start) + bIsMinimum := b == nil || Empty(b.Start) + + if aIsMinimum && bIsMinimum { + return 0 + } else if aIsMinimum { + return -1 + } else if bIsMinimum { + return 1 } - return !(len(kr.Start) == 0 && len(kr.End) == 0) + + return Compare(a.Start, b.Start) } -// KeyRangeEqual returns true if both key ranges cover the same area -func KeyRangeEqual(left, right *topodatapb.KeyRange) bool { - if left == nil { - return right == nil || (len(right.Start) == 0 && len(right.End) == 0) - } - if right == nil { - return len(left.Start) == 0 && len(left.End) == 0 - } - return bytes.Equal(addPadding(left.Start), addPadding(right.Start)) && - bytes.Equal(addPadding(left.End), addPadding(right.End)) +// KeyRangeStartEqual returns true if both KeyRange values have the same Start. +func KeyRangeStartEqual(a, b *topodatapb.KeyRange) bool { + return KeyRangeStartCompare(a, b) == 0 } -// addPadding adds padding to make sure keyrange represents an 8 byte integer. -// From Vitess docs: -// A hash vindex produces an 8-byte number. -// This means that all numbers less than 0x8000000000000000 will fall in shard -80. -// Any number with the highest bit set will be >= 0x8000000000000000, and will therefore -// belong to shard 80-. -// This means that from a keyrange perspective -80 == 00-80 == 0000-8000 == 000000-800000 -// If we don't add this padding, we could run into issues when transitioning from keyranges -// that use 2 bytes to 4 bytes. -func addPadding(kr []byte) []byte { - paddedKr := make([]byte, 8) - - for i := 0; i < len(kr); i++ { - paddedKr = append(paddedKr, kr[i]) - } +// KeyRangeEndCompare compares the End of two KeyRange values using semantics unique to End values where an +// empty End means the *maximum* value; returns -1 if ab, 0 if equal. +func KeyRangeEndCompare(a, b *topodatapb.KeyRange) int { + aIsMaximum := a == nil || Empty(a.End) + bIsMaximum := b == nil || Empty(b.End) - for i := len(kr); i < 8; i++ { - paddedKr = append(paddedKr, 0) + if aIsMaximum && bIsMaximum { + return 0 + } else if aIsMaximum { + return 1 + } else if bIsMaximum { + return -1 } - return paddedKr + + return Compare(a.End, b.End) } -// KeyRangeStartSmaller returns true if right's keyrange start is _after_ left's start -func KeyRangeStartSmaller(left, right *topodatapb.KeyRange) bool { - if left == nil { - return right != nil - } - if right == nil { - return false - } - return bytes.Compare(addPadding(left.Start), addPadding(right.Start)) < 0 +// KeyRangeEndEqual returns true if both KeyRange values have the same End. +func KeyRangeEndEqual(a, b *topodatapb.KeyRange) bool { + return KeyRangeEndCompare(a, b) == 0 } -// KeyRangeStartEqual returns true if both key ranges have the same start -func KeyRangeStartEqual(left, right *topodatapb.KeyRange) bool { - if left == nil { - return right == nil || len(right.Start) == 0 - } - if right == nil { - return len(left.Start) == 0 - } - return bytes.Equal(addPadding(left.Start), addPadding(right.Start)) +// KeyRangeCompare compares two KeyRange values, taking into account both the Start and End fields and their +// field-specific comparison logic; returns -1 if ab, 0 if equal. Specifically: +// +// - The Start-specific KeyRangeStartCompare and End-specific KeyRangeEndCompare are used for proper comparison +// of an empty value for either Start or End. +// - The Start is compared first and End is only compared if Start is equal. +func KeyRangeCompare(a, b *topodatapb.KeyRange) int { + // First, compare the Start field. + if v := KeyRangeStartCompare(a, b); v != 0 { + // The Start field for a and b differ, and that is enough; return that comparison. + return v + } + + // The Start field was equal, so compare the End field and return that comparison. + return KeyRangeEndCompare(a, b) } -// KeyRangeContiguous returns true if the end of the left key range exactly -// matches the start of the right key range (i.e they are contigious) -func KeyRangeContiguous(left, right *topodatapb.KeyRange) bool { - if left == nil { - return right == nil || (len(right.Start) == 0 && len(right.End) == 0) - } - if right == nil { - return len(left.Start) == 0 && len(left.End) == 0 - } - return bytes.Equal(addPadding(left.End), addPadding(right.Start)) +// KeyRangeEqual returns true if a and b are equal. +func KeyRangeEqual(a, b *topodatapb.KeyRange) bool { + return KeyRangeCompare(a, b) == 0 } -// KeyRangeEndEqual returns true if both key ranges have the same end -func KeyRangeEndEqual(left, right *topodatapb.KeyRange) bool { - if left == nil { - return right == nil || len(right.End) == 0 - } - if right == nil { - return len(left.End) == 0 - } - return bytes.Equal(addPadding(left.End), addPadding(right.End)) +// KeyRangeLess returns true if a is less than b. +func KeyRangeLess(a, b *topodatapb.KeyRange) bool { + return KeyRangeCompare(a, b) < 0 } -// For more info on the following functions, see: -// See: http://stackoverflow.com/questions/4879315/what-is-a-tidy-algorithm-to-find-overlapping-intervals -// two segments defined as (a,b) and (c,d) (with a c) && (a < d) -// overlap = min(b, d) - max(c, a) - -// KeyRangesIntersect returns true if some Keyspace values exist in both ranges. -func KeyRangesIntersect(first, second *topodatapb.KeyRange) bool { - if first == nil || second == nil { - return true - } - return (len(first.End) == 0 || bytes.Compare(addPadding(second.Start), addPadding(first.End)) < 0) && - (len(second.End) == 0 || bytes.Compare(addPadding(first.Start), addPadding(second.End)) < 0) +// KeyRangeIsComplete returns true if the KeyRange covers the entire keyspace. +func KeyRangeIsComplete(keyRange *topodatapb.KeyRange) bool { + return keyRange == nil || (Empty(keyRange.Start) && Empty(keyRange.End)) } -// KeyRangesOverlap returns the overlap between two KeyRanges. -// They need to overlap, otherwise an error is returned. -func KeyRangesOverlap(first, second *topodatapb.KeyRange) (*topodatapb.KeyRange, error) { - if !KeyRangesIntersect(first, second) { - return nil, fmt.Errorf("KeyRanges %v and %v don't overlap", first, second) - } - if first == nil { - return second, nil - } - if second == nil { - return first, nil - } - // compute max(c,a) and min(b,d) - // start with (a,b) - result := proto.Clone(first).(*topodatapb.KeyRange) - // if c > a, then use c - if bytes.Compare(addPadding(second.Start), addPadding(first.Start)) > 0 { - result.Start = second.Start +// KeyRangeIsPartial returns true if the KeyRange does not cover the entire keyspace. +func KeyRangeIsPartial(keyRange *topodatapb.KeyRange) bool { + return !KeyRangeIsComplete(keyRange) +} + +// KeyRangeContiguous returns true if the End of KeyRange a is equivalent to the Start of the KeyRange b, +// which means that they are contiguous. +func KeyRangeContiguous(a, b *topodatapb.KeyRange) bool { + if KeyRangeIsComplete(a) || KeyRangeIsComplete(b) { + return false // no two KeyRange values can be contiguous if either is the complete range } - // if b is maxed out, or - // (d is not maxed out and d < b) - // ^ valid test as neither b nor d are max - // then use d - if len(first.End) == 0 || (len(second.End) != 0 && bytes.Compare(addPadding(second.End), addPadding(first.End)) < 0) { - result.End = second.End + + return Equal(a.End, b.Start) +} + +// For more info on the following functions, see: +// http://stackoverflow.com/questions/4879315/what-is-a-tidy-algorithm-to-find-overlapping-intervals +// Two segments defined as (a,b) and (c,d) (with a c) && (a < d) +// * overlap = min(b, d) - max(c, a) + +// KeyRangeIntersect returns true if some part of KeyRange a and b overlap, meaning that some keyspace ID values +// exist in both a and b. +func KeyRangeIntersect(a, b *topodatapb.KeyRange) bool { + if KeyRangeIsComplete(a) || KeyRangeIsComplete(b) { + return true // if either KeyRange is complete, there must be an intersection } - return result, nil + + return (Empty(a.End) || Less(b.Start, a.End)) && (Empty(b.End) || Less(a.Start, b.End)) } -// KeyRangeIncludes returns true if the first provided KeyRange, big, -// contains the second KeyRange, small. If they intersect, but small -// spills out, this returns false. -func KeyRangeIncludes(big, small *topodatapb.KeyRange) bool { - if big == nil { - // The outside one covers everything, we're good. +// KeyRangeContainsKeyRange returns true if KeyRange a fully contains KeyRange b. +func KeyRangeContainsKeyRange(a, b *topodatapb.KeyRange) bool { + // If a covers the entire KeyRange, it always contains b. + if KeyRangeIsComplete(a) { return true } - if small == nil { - // The smaller one covers everything, better have the - // bigger one also cover everything. - return len(big.Start) == 0 && len(big.End) == 0 - } - // Now we check small.Start >= big.Start, and small.End <= big.End - if len(big.Start) != 0 && bytes.Compare(addPadding(small.Start), addPadding(big.Start)) < 0 { - return false + + // If b covers the entire KeyRange, a must also cover the entire KeyRange. + if KeyRangeIsComplete(b) { + return KeyRangeIsComplete(a) } - if len(big.End) != 0 && (len(small.End) == 0 || bytes.Compare(addPadding(small.End), addPadding(big.End)) > 0) { - return false + + // Ensure b.Start >= a.Start and b.End <= a.End. + if KeyRangeStartCompare(b, a) >= 0 && KeyRangeEndCompare(b, a) <= 0 { + return true } - return true + + return false } // ParseShardingSpec parses a string that describes a sharding @@ -351,11 +306,63 @@ func ParseShardingSpec(spec string) ([]*topodatapb.KeyRange, error) { return ranges, nil } -var krRegexp = regexp.MustCompile(`^[0-9a-fA-F]*-[0-9a-fA-F]*$`) +// IsValidKeyRange returns true if the string represents a valid key range. +func IsValidKeyRange(keyRangeString string) bool { + return KeyRangePattern.MatchString(keyRangeString) +} -// IsKeyRange returns true if the string represents a keyrange. -func IsKeyRange(kr string) bool { - return krRegexp.MatchString(kr) +// EvenShardsKeyRange returns a key range definition for a shard at index "i", +// assuming range based sharding with "n" equal-width shards in total. +// i starts at 0. +// +// Example: (1, 2) returns the second out of two shards in total i.e. "80-". +// +// This function must not be used in the Vitess code base because Vitess also +// supports shards with different widths. In that case, the output of this +// function would be wrong. +// +// Note: start and end values have trailing zero bytes omitted. +// For example, "80-" has only the first byte (0x80) set. +// We do this to produce the same KeyRange objects as ParseKeyRangeParts() does. +// Because it's using the Go hex methods, it's omitting trailing zero bytes as +// well. +func EvenShardsKeyRange(i, n int) (*topodatapb.KeyRange, error) { + if n <= 0 { + return nil, fmt.Errorf("the shard count must be > 0: %v", n) + } + if i >= n { + return nil, fmt.Errorf("the index of the shard must be less than the total number of shards: %v < %v", i, n) + } + if n&(n-1) != 0 { + return nil, fmt.Errorf("the shard count must be a power of two: %v", n) + } + + // Determine the number of bytes which are required to represent any + // KeyRange start or end for the given n. + // This is required to trim the returned values to the same length e.g. + // (256, 512) should return 8000-8080 as shard key range. + minBytes := 0 + for nn := Uint64Key(n - 1); nn > 0; nn >>= 8 { + minBytes++ + } + + width := Uint64Key(math.MaxUint64)/Uint64Key(n) + 1 + start := Uint64Key(i) * width + end := start + width + + // Note: The byte value is empty if start or end is the min or the max + // respectively. + startBytes := start.Bytes()[:minBytes] + endBytes := end.Bytes()[:minBytes] + if start == 0 { + startBytes = []byte{} + } + if end == 0 { + // Always set the end except for the last shard. In that case, the + // end value (2^64) flows over and is the same as 0. + endBytes = []byte{} + } + return &topodatapb.KeyRange{Start: startBytes, End: endBytes}, nil } // GenerateShardRanges returns shard ranges assuming a keyspace with N shards. diff --git a/go/vt/key/key_test.go b/go/vt/key/key_test.go index 639b81c5f18..8db45aa79b9 100644 --- a/go/vt/key/key_test.go +++ b/go/vt/key/key_test.go @@ -28,7 +28,222 @@ import ( topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) -func TestKey(t *testing.T) { +func TestNormalize(t *testing.T) { + type args struct { + id []byte + } + tests := []struct { + name string + args args + want []byte + }{ + { + "empty should empty", + args{[]byte{}}, + []byte{}, + }, + { + "one zero should be empty", + args{[]byte{0x00}}, + []byte{}, + }, + { + "any number of zeroes should be empty", + args{[]byte{0x00, 0x00, 0x00}}, + []byte{}, + }, + { + "one non-zero byte should be left alone", + args{[]byte{0x11}}, + []byte{0x11}, + }, + { + "multiple non-zero bytes should be left alone", + args{[]byte{0x11, 0x22, 0x33}}, + []byte{0x11, 0x22, 0x33}, + }, + { + "zeroes that aren't trailing should be left alone", + args{[]byte{0x11, 0x00, 0x22, 0x00, 0x33, 0x00}}, + []byte{0x11, 0x00, 0x22, 0x00, 0x33}, + }, + { + "excess zero bytes should be removed after a non-zero byte", + args{[]byte{0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, + []byte{0x11}, + }, + { + "excess zero bytes should be removed after multiple non-zero bytes", + args{[]byte{0x11, 0x22, 0x00, 0x00, 0x00}}, + []byte{0x11, 0x22}, + }, + { + "values longer than eight bytes should be supported", + args{[]byte{0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0x00}}, + []byte{0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equalf(t, tt.want, Normalize(tt.args.id), "Normalize(%v)", tt.args.id) + }) + } +} + +func TestCompare(t *testing.T) { + type args struct { + a []byte + b []byte + } + tests := []struct { + name string + args args + want int + }{ + { + "empty ids are equal", + args{[]byte{}, []byte{}}, + 0, + }, + { + "equal full id values are equal", + args{ + []byte{0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88}, + []byte{0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88}, + }, + 0, + }, + { + "equal partial id values are equal", + args{ + []byte{0x11, 0x22}, + []byte{0x11, 0x22}, + }, + 0, + }, + { + "equal full and partial id values are equal", + args{[]byte{0x11, 0x22, 0x33, 0x44}, []byte{0x11, 0x22, 0x33, 0x44, 0x00, 0x00, 0x00, 0x00}}, + 0, + }, + { + "equal partial and full id values are equal", + args{[]byte{0x11, 0x22, 0x33, 0x44, 0x00, 0x00, 0x00, 0x00}, []byte{0x11, 0x22, 0x33, 0x44}}, + 0, + }, + {"a less than b", args{[]byte{0x01}, []byte{0x02}}, -1}, + {"a greater than b", args{[]byte{0x02}, []byte{0x01}}, +1}, + { + "equal partial a and b with different lengths", + args{[]byte{0x30, 0x00}, []byte{0x20}}, + 1, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equalf(t, tt.want, Compare(tt.args.a, tt.args.b), "Compare(%v, %v)", tt.args.a, tt.args.b) + }) + } +} + +func TestLess(t *testing.T) { + type args struct { + a []byte + b []byte + } + tests := []struct { + name string + args args + want bool + }{ + // Less uses Compare which is already robustly tested, so we're just aiming to ensure that the result + // of the Compare is used correctly in context and not e.g. reversed, so test a few obvious cases. + { + "a is less than b", + args{[]byte{0x01}, []byte{0x02}}, + true, + }, + { + "a is equal to b", + args{[]byte{0x01}, []byte{0x01}}, + false, + }, + { + "a is greater than b", + args{[]byte{0x02}, []byte{0x01}}, + false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equalf(t, tt.want, Less(tt.args.a, tt.args.b), "Less(%v, %v)", tt.args.a, tt.args.b) + }) + } +} + +func TestEqual(t *testing.T) { + type args struct { + a []byte + b []byte + } + tests := []struct { + name string + args args + want bool + }{ + // Equal uses Compare which is already robustly tested, so we're just aiming to ensure that the result + // of the Compare is used correctly in context and not e.g. reversed, so test a few obvious cases. + { + "a is less than b", + args{[]byte{0x01}, []byte{0x02}}, + false, + }, + { + "a is equal to b", + args{[]byte{0x01}, []byte{0x01}}, + true, + }, + { + "a is greater than b", + args{[]byte{0x02}, []byte{0x01}}, + false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equalf(t, tt.want, Equal(tt.args.a, tt.args.b), "Equal(%v, %v)", tt.args.a, tt.args.b) + }) + } +} + +func TestEmpty(t *testing.T) { + type args struct { + id []byte + } + tests := []struct { + name string + args args + want bool + }{ + { + "empty", + args{[]byte{}}, + true, + }, + { + "not empty", + args{[]byte{0x11, 0x22, 0x33}}, + false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equalf(t, tt.want, Empty(tt.args.id), "Empty(%v)", tt.args.id) + }) + } +} + +func TestUint64Key(t *testing.T) { k0 := Uint64Key(0) k1 := Uint64Key(1) k2 := Uint64Key(0x7FFFFFFFFFFFFFFF) @@ -362,7 +577,7 @@ func TestKeyRangeContiguous(t *testing.T) { }, { first: "-", second: "-40", - out: true, + out: false, }, { first: "40-80", second: "c0-", @@ -460,7 +675,398 @@ func TestParseShardingSpec(t *testing.T) { } } -func TestContains(t *testing.T) { +func TestKeyRangeComparisons(t *testing.T) { + type args struct { + a *topodatapb.KeyRange + b *topodatapb.KeyRange + } + type wants struct { + wantStartCompare int + wantStartEqual bool + wantEndCompare int + wantEndEqual bool + wantCompare int + wantEqual bool + } + tests := []struct { + name string + args args + wants wants + }{ + { + name: "a and b are both full range", + args: args{ + a: stringToKeyRange("-"), + b: stringToKeyRange("-"), + }, + wants: wants{ + wantStartCompare: 0, + wantStartEqual: true, + wantEndCompare: 0, + wantEndEqual: true, + wantCompare: 0, + wantEqual: true, + }, + }, + { + name: "a is equal to b", + args: args{ + a: stringToKeyRange("10-30"), + b: stringToKeyRange("10-30"), + }, + wants: wants{ + wantStartCompare: 0, + wantStartEqual: true, + wantEndCompare: 0, + wantEndEqual: true, + wantCompare: 0, + wantEqual: true, + }, + }, + { + name: "a (2 digit, end only) but equal to b (2 digits, end only)", + args: args{ + a: stringToKeyRange("-80"), + b: stringToKeyRange("-80"), + }, + wants: wants{ + wantStartCompare: 0, + wantStartEqual: true, + wantEndCompare: 0, + wantEndEqual: true, + wantCompare: 0, + wantEqual: true, + }, + }, + { + name: "a (2 digit, end only) but equal to b (4 digits, end only)", + args: args{ + a: stringToKeyRange("-80"), + b: stringToKeyRange("-8000"), + }, + wants: wants{ + wantStartCompare: 0, + wantStartEqual: true, + wantEndCompare: 0, + wantEndEqual: true, + wantCompare: 0, + wantEqual: true, + }, + }, + { + name: "a (2 digit, end only) but equal to b (6 digits, end only)", + args: args{ + a: stringToKeyRange("-80"), + b: stringToKeyRange("-800000"), + }, + wants: wants{ + wantStartCompare: 0, + wantStartEqual: true, + wantEndCompare: 0, + wantEndEqual: true, + wantCompare: 0, + wantEqual: true, + }, + }, + { + name: "a (2 digit, end only) but equal to b (8 digits, end only)", + args: args{ + a: stringToKeyRange("-80"), + b: stringToKeyRange("-80000000"), + }, + wants: wants{ + wantStartCompare: 0, + wantStartEqual: true, + wantEndCompare: 0, + wantEndEqual: true, + wantCompare: 0, + wantEqual: true, + }, + }, + { + name: "a (2 digit, start only) but equal to b (2 digits, start only)", + args: args{ + stringToKeyRange("80-"), + stringToKeyRange("80-"), + }, + wants: wants{ + wantStartCompare: 0, + wantStartEqual: true, + wantEndCompare: 0, + wantEndEqual: true, + wantCompare: 0, + wantEqual: true, + }, + }, + { + name: "a (2 digit, start only) but equal to b (4 digits, start only)", + args: args{ + a: stringToKeyRange("80-"), + b: stringToKeyRange("8000-"), + }, + wants: wants{ + wantStartCompare: 0, + wantStartEqual: true, + wantEndCompare: 0, + wantEndEqual: true, + wantCompare: 0, + wantEqual: true, + }, + }, + { + name: "a (2 digit, start only) but equal to b (6 digits, start only)", + args: args{ + a: stringToKeyRange("80-"), + b: stringToKeyRange("800000-"), + }, + wants: wants{ + wantStartCompare: 0, + wantStartEqual: true, + wantEndCompare: 0, + wantEndEqual: true, + wantCompare: 0, + wantEqual: true, + }, + }, + { + name: "a (2 digit, start only) but equal to b (8 digits, start only)", + args: args{ + a: stringToKeyRange("80-"), + b: stringToKeyRange("80000000-"), + }, + wants: wants{ + wantStartCompare: 0, + wantStartEqual: true, + wantEndCompare: 0, + wantEndEqual: true, + wantCompare: 0, + wantEqual: true, + }, + }, + { + name: "a (4 digits) but equal to b (2 digits)", + args: args{ + a: stringToKeyRange("1000-3000"), + b: stringToKeyRange("10-30"), + }, + wants: wants{ + wantStartCompare: 0, + wantStartEqual: true, + wantEndCompare: 0, + wantEndEqual: true, + wantCompare: 0, + wantEqual: true, + }, + }, + { + name: "a (8 digits) but equal to b (4 digits)", + args: args{ + a: stringToKeyRange("10000000-30000000"), + b: stringToKeyRange("1000-3000"), + }, + wants: wants{ + wantStartCompare: 0, + wantStartEqual: true, + wantEndCompare: 0, + wantEndEqual: true, + wantCompare: 0, + wantEqual: true, + }, + }, + { + name: "b (4 digits) but equal to a (2 digits)", + args: args{ + a: stringToKeyRange("10-30"), + b: stringToKeyRange("1000-3000"), + }, + wants: wants{ + wantStartCompare: 0, + wantStartEqual: true, + wantEndCompare: 0, + wantEndEqual: true, + wantCompare: 0, + wantEqual: true, + }, + }, + { + name: "b (8 digits) but equal to a (4 digits)", + args: args{ + a: stringToKeyRange("10-30"), + b: stringToKeyRange("10000000-30000000"), + }, + wants: wants{ + wantStartCompare: 0, + wantStartEqual: true, + wantEndCompare: 0, + wantEndEqual: true, + wantCompare: 0, + wantEqual: true, + }, + }, + { + name: "a is full range, b is not", + args: args{ + a: stringToKeyRange("-"), + b: stringToKeyRange("20-30"), + }, + wants: wants{ + wantStartCompare: -1, + wantStartEqual: false, + wantEndCompare: 1, + wantEndEqual: false, + wantCompare: -1, + wantEqual: false, + }, + }, + { + name: "b is full range, a is not", + args: args{ + a: stringToKeyRange("10-30"), + b: stringToKeyRange("-"), + }, + wants: wants{ + wantStartCompare: 1, + wantStartEqual: false, + wantEndCompare: -1, + wantEndEqual: false, + wantCompare: 1, + wantEqual: false, + }, + }, + { + name: "a start is greater than b start", + args: args{ + a: stringToKeyRange("10-30"), + b: stringToKeyRange("20-30"), + }, + wants: wants{ + wantStartCompare: -1, + wantStartEqual: false, + wantEndCompare: 0, + wantEndEqual: true, + wantCompare: -1, + wantEqual: false, + }, + }, + { + name: "b start is greater than a start", + args: args{ + a: stringToKeyRange("20-30"), + b: stringToKeyRange("10-30"), + }, + wants: wants{ + wantStartCompare: 1, + wantStartEqual: false, + wantEndCompare: 0, + wantEndEqual: true, + wantCompare: 1, + wantEqual: false, + }, + }, + { + name: "a start is empty, b start is not", + args: args{ + a: stringToKeyRange("-30"), + b: stringToKeyRange("10-30"), + }, + wants: wants{ + wantStartCompare: -1, + wantStartEqual: false, + wantEndCompare: 0, + wantEndEqual: true, + wantCompare: -1, + wantEqual: false, + }, + }, + { + name: "b start is empty, a start is not", + args: args{ + a: stringToKeyRange("10-30"), + b: stringToKeyRange("-30"), + }, + wants: wants{ + wantStartCompare: 1, + wantStartEqual: false, + wantEndCompare: 0, + wantEndEqual: true, + wantCompare: 1, + wantEqual: false, + }, + }, + { + name: "a end is greater than b end", + args: args{ + a: stringToKeyRange("10-30"), + b: stringToKeyRange("10-20"), + }, + wants: wants{ + wantStartCompare: 0, + wantStartEqual: true, + wantEndCompare: 1, + wantEndEqual: false, + wantCompare: 1, + wantEqual: false, + }, + }, + { + name: "b end is greater than a end", + args: args{ + a: stringToKeyRange("10-20"), + b: stringToKeyRange("10-30"), + }, + wants: wants{ + wantStartCompare: 0, + wantStartEqual: true, + wantEndCompare: -1, + wantEndEqual: false, + wantCompare: -1, + wantEqual: false, + }, + }, + { + name: "a end is empty, b end is not", + args: args{ + a: stringToKeyRange("10-"), + b: stringToKeyRange("10-30"), + }, + wants: wants{ + wantStartCompare: 0, + wantStartEqual: true, + wantEndCompare: 1, + wantEndEqual: false, + wantCompare: 1, + wantEqual: false, + }, + }, + { + name: "b end is empty, a end is not", + args: args{ + a: stringToKeyRange("10-30"), + b: stringToKeyRange("10-"), + }, + wants: wants{ + wantStartCompare: 0, + wantStartEqual: true, + wantEndCompare: -1, + wantEndEqual: false, + wantCompare: -1, + wantEqual: false, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equalf(t, tt.wants.wantStartCompare, KeyRangeStartCompare(tt.args.a, tt.args.b), "KeyRangeStartCompare(%v, %v)", tt.args.a, tt.args.b) + assert.Equalf(t, tt.wants.wantStartEqual, KeyRangeStartEqual(tt.args.a, tt.args.b), "KeyRangeStartEqual(%v, %v)", tt.args.a, tt.args.b) + assert.Equalf(t, tt.wants.wantEndCompare, KeyRangeEndCompare(tt.args.a, tt.args.b), "KeyRangeEndCompare(%v, %v)", tt.args.a, tt.args.b) + assert.Equalf(t, tt.wants.wantEndEqual, KeyRangeEndEqual(tt.args.a, tt.args.b), "KeyRangeEndEqual(%v, %v)", tt.args.a, tt.args.b) + assert.Equalf(t, tt.wants.wantCompare, KeyRangeCompare(tt.args.a, tt.args.b), "KeyRangeCompare(%v, %v)", tt.args.a, tt.args.b) + assert.Equalf(t, tt.wants.wantEqual, KeyRangeEqual(tt.args.a, tt.args.b), "KeyRangeEqual(%v, %v)", tt.args.a, tt.args.b) + }) + } +} + +func TestKeyRangeContains(t *testing.T) { var table = []struct { kid string start string @@ -499,120 +1105,321 @@ func TestContains(t *testing.T) { } } -func TestIntersectOverlap(t *testing.T) { - var table = []struct { - a string - b string - c string - d string - intersects bool - overlap string - }{ - {a: "40", b: "80", c: "c0", d: "d0", intersects: false}, - {a: "", b: "80", c: "80", d: "", intersects: false}, - {a: "", b: "80", c: "", d: "40", intersects: true, overlap: "-40"}, - {a: "80", b: "", c: "c0", d: "", intersects: true, overlap: "c0-"}, - {a: "", b: "80", c: "40", d: "80", intersects: true, overlap: "40-80"}, - {a: "40", b: "80", c: "60", d: "a0", intersects: true, overlap: "60-80"}, - {a: "40", b: "80", c: "50", d: "60", intersects: true, overlap: "50-60"}, - {a: "40", b: "80", c: "10", d: "50", intersects: true, overlap: "40-50"}, - {a: "40", b: "80", c: "40", d: "80", intersects: true, overlap: "40-80"}, - {a: "", b: "80", c: "", d: "80", intersects: true, overlap: "-80"}, - {a: "40", b: "", c: "40", d: "", intersects: true, overlap: "40-"}, - {a: "40", b: "80", c: "20", d: "40", intersects: false}, - {a: "80", b: "", c: "80", d: "c0", intersects: true, overlap: "80-c0"}, - {a: "", b: "", c: "c0", d: "d0", intersects: true, overlap: "c0-d0"}, +func TestKeyRangeIntersect(t *testing.T) { + type args struct { + a *topodatapb.KeyRange + b *topodatapb.KeyRange } + tests := []struct { + name string + args args + want bool + }{ + // non-intersecting cases + { + name: "typical half-range split, ascending order", + args: args{a: stringToKeyRange("-80"), b: stringToKeyRange("80-")}, + want: false, + }, + { + name: "typical half-range split, descending order", + args: args{a: stringToKeyRange("80-"), b: stringToKeyRange("-80")}, + want: false, + }, + { + name: "partial ranges, ascending order", + args: args{a: stringToKeyRange("40-80"), b: stringToKeyRange("c0-d0")}, + want: false, + }, + { + name: "partial ranges, descending order", + args: args{a: stringToKeyRange("40-80"), b: stringToKeyRange("20-40")}, + want: false, + }, + { + name: "partial ranges, different key lengths", + args: args{a: stringToKeyRange("4000-8000"), b: stringToKeyRange("20-40")}, + want: false, + }, - for _, el := range table { - a, err := hex.DecodeString(el.a) - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - b, err := hex.DecodeString(el.b) - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - left := &topodatapb.KeyRange{Start: a, End: b} - c, err := hex.DecodeString(el.c) - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - d, err := hex.DecodeString(el.d) - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - right := &topodatapb.KeyRange{Start: c, End: d} - if c := KeyRangesIntersect(left, right); c != el.intersects { - t.Errorf("Unexpected result: KeyRangesIntersect for %v and %v yields %v.", left, right, c) - } - overlap, err := KeyRangesOverlap(left, right) - if el.intersects { - if err != nil { - t.Errorf("Unexpected result: KeyRangesOverlap for overlapping %v and %v returned an error: %v", left, right, err) - } else { - got := hex.EncodeToString(overlap.Start) + "-" + hex.EncodeToString(overlap.End) - if got != el.overlap { - t.Errorf("Unexpected result: KeyRangesOverlap for overlapping %v and %v should have returned: %v but got: %v", left, right, el.overlap, got) - } - } - } else { - if err == nil { - t.Errorf("Unexpected result: KeyRangesOverlap for non-overlapping %v and %v should have returned an error", left, right) - } - } + // intersecting cases with a full range + { + name: "full range with full range", + args: args{a: stringToKeyRange("-"), b: stringToKeyRange("-")}, + want: true, + }, + { + name: "full range with maximum key partial range", + args: args{a: stringToKeyRange("-"), b: stringToKeyRange("80-")}, + want: true, + }, + { + name: "full range with partial range", + args: args{a: stringToKeyRange("-"), b: stringToKeyRange("c0-d0")}, + want: true, + }, + { + name: "minimum key partial range with full range", + args: args{a: stringToKeyRange("-80"), b: stringToKeyRange("-")}, + want: true, + }, + { + name: "partial range with full range", + args: args{a: stringToKeyRange("a0-b0"), b: stringToKeyRange("-")}, + want: true, + }, + + // intersecting cases with only partial ranges + { + name: "the same range, both from minimum key", + args: args{a: stringToKeyRange("-80"), b: stringToKeyRange("-80")}, + want: true, + }, + { + name: "the same range, both from minimum key, different key lengths", + args: args{a: stringToKeyRange("-8000"), b: stringToKeyRange("-80")}, + want: true, + }, + { + name: "the same range, both to maximum key", + args: args{a: stringToKeyRange("40-"), b: stringToKeyRange("40-")}, + want: true, + }, + { + name: "the same range, both to maximum key, different key lengths", + args: args{a: stringToKeyRange("4000-"), b: stringToKeyRange("40-")}, + want: true, + }, + { + name: "the same range, both with mid-range keys", + args: args{a: stringToKeyRange("40-80"), b: stringToKeyRange("40-80")}, + want: true, + }, + { + name: "the same range, both with mid-range keys, different key lengths", + args: args{a: stringToKeyRange("40-80"), b: stringToKeyRange("4000-8000")}, + want: true, + }, + { + name: "different-sized partial ranges, both from minimum key", + args: args{a: stringToKeyRange("-80"), b: stringToKeyRange("-40")}, + want: true, + }, + { + name: "different-sized partial ranges, both to maximum key", + args: args{a: stringToKeyRange("80-"), b: stringToKeyRange("c0-")}, + want: true, + }, + { + name: "different-sized partial ranges, from minimum key with mid-range key", + args: args{a: stringToKeyRange("-80"), b: stringToKeyRange("40-80")}, + want: true, + }, + { + name: "different-sized partial ranges, from minimum key with mid-range key, different key lengths", + args: args{a: stringToKeyRange("-80"), b: stringToKeyRange("4000-8000")}, + want: true, + }, + { + name: "different-sized partial ranges, to maximum key with mid-range key", + args: args{a: stringToKeyRange("80-"), b: stringToKeyRange("80-c0")}, + want: true, + }, + { + name: "different-sized partial ranges, to maximum key with mid-range key, different key lengths", + args: args{a: stringToKeyRange("80-"), b: stringToKeyRange("8000-c000")}, + want: true, + }, + { + name: "partially overlapping ranges, in ascending order", + args: args{a: stringToKeyRange("40-80"), b: stringToKeyRange("60-a0")}, + want: true, + }, + { + name: "partially overlapping ranges, in descending order", + args: args{a: stringToKeyRange("40-80"), b: stringToKeyRange("10-50")}, + want: true, + }, + { + name: "partially overlapping ranges, one fully containing the other, in ascending order", + args: args{a: stringToKeyRange("40-80"), b: stringToKeyRange("50-60")}, + want: true, + }, + { + name: "partially overlapping ranges, one fully containing the other, in descending order", + args: args{a: stringToKeyRange("40-80"), b: stringToKeyRange("30-90")}, + want: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equalf(t, tt.want, KeyRangeIntersect(tt.args.a, tt.args.b), "KeyRangeIntersect(%v, %v)", tt.args.a, tt.args.b) + }) } } -func TestKeyRangeIncludes(t *testing.T) { - var table = []struct { - name string - big string - small string - expected bool +func TestKeyRangeContainsKeyRange(t *testing.T) { + type args struct { + a *topodatapb.KeyRange + b *topodatapb.KeyRange + } + var tests = []struct { + name string + args args + want bool }{ - {"big nil, small nil", "nil", "nil", true}, - {"big nil, small non nil, fully partial", "nil", "80-c0", true}, - {"big nil, small non nil, full start", "nil", "-c0", true}, - {"big nil, small non nil, full end", "nil", "80-", true}, - {"big non-nil, fully partial, small nil", "80-c0", "nil", false}, - {"big non-nil, full start, small nil", "-c0", "nil", false}, - {"big non-nil, full end, small nil", "80-", "nil", false}, - {"big full, small full", "-", "-", true}, - {"big full, small partial", "-", "40-60", true}, - {"big partial, small full", "40-60", "-", false}, - - {"big partial, small to the end", "40-60", "40-", false}, - {"big partial, small bigger to the right", "40-60", "40-80", false}, - {"big partial, small equal", "40-60", "40-60", true}, - {"big partial, small smaller right", "40-60", "40-50", true}, - - {"big partial, small to the beginning", "40-60", "-60", false}, - {"big partial, small smaller to the left", "40-60", "20-60", false}, - {"big partial, small bigger left", "40-60", "50-60", true}, - } - - var err error - for _, tc := range table { - var big, small *topodatapb.KeyRange - if tc.big != "nil" { - parts := strings.Split(tc.big, "-") - big, err = ParseKeyRangeParts(parts[0], parts[1]) - if err != nil { - t.Fatalf("test data error in %v: %v", tc.big, err) - } - } - if tc.small != "nil" { - parts := strings.Split(tc.small, "-") - small, err = ParseKeyRangeParts(parts[0], parts[1]) - if err != nil { - t.Fatalf("test data error in %v: %v", tc.small, err) - } - } - got := KeyRangeIncludes(big, small) - if got != tc.expected { - t.Errorf("KeyRangeIncludes for test case '%v' returned %v but expected %v", tc.name, got, tc.expected) - } + // full range contains itself + { + name: "both full range", + args: args{a: stringToKeyRange("-"), b: stringToKeyRange("-")}, + want: true, + }, + + // full range always contains a partial range + { + name: "full range, partial range from minimum key", + args: args{a: stringToKeyRange("-"), b: stringToKeyRange("-c0")}, + want: true, + }, + { + name: "full range, partial range to maximum key", + args: args{a: stringToKeyRange("-"), b: stringToKeyRange("80-")}, + want: true, + }, + { + name: "full range, partial mid-key range", + args: args{a: stringToKeyRange("-"), b: stringToKeyRange("80-c0")}, + want: true, + }, + + // equal partial ranges contain each other + { + name: "equal partial ranges", + args: args{a: stringToKeyRange("40-60"), b: stringToKeyRange("40-60")}, + want: true, + }, + { + name: "equal partial ranges, different size keys", + args: args{a: stringToKeyRange("40-60"), b: stringToKeyRange("4000-6000")}, + want: true, + }, + { + name: "equal partial ranges, different size keys", + args: args{a: stringToKeyRange("4000-6000"), b: stringToKeyRange("40-60")}, + want: true, + }, + + // partial ranges may contain smaller partial ranges + { + name: "partial range, partial touching start", + args: args{a: stringToKeyRange("40-80"), b: stringToKeyRange("40-50")}, + want: true, + }, + { + name: "partial range, partial touching start, different size keys", + args: args{a: stringToKeyRange("40-80"), b: stringToKeyRange("4000-5000")}, + want: true, + }, + { + name: "partial range, partial touching start, different size keys", + args: args{a: stringToKeyRange("4000-8000"), b: stringToKeyRange("40-50")}, + want: true, + }, + { + name: "partial range, partial touching end", + args: args{a: stringToKeyRange("40-80"), b: stringToKeyRange("70-80")}, + want: true, + }, + { + name: "partial range, partial touching end, different size keys", + args: args{a: stringToKeyRange("40-80"), b: stringToKeyRange("7000-8000")}, + want: true, + }, + { + name: "partial range, partial touching end, different size keys", + args: args{a: stringToKeyRange("4000-8000"), b: stringToKeyRange("70-80")}, + want: true, + }, + { + name: "partial range, partial in the middle", + args: args{a: stringToKeyRange("40-80"), b: stringToKeyRange("50-70")}, + want: true, + }, + { + name: "partial range, partial in the middle, different size keys", + args: args{a: stringToKeyRange("40-80"), b: stringToKeyRange("5000-7000")}, + want: true, + }, + { + name: "partial range, partial in the middle, different size keys", + args: args{a: stringToKeyRange("4000-8000"), b: stringToKeyRange("50-70")}, + want: true, + }, + + // partial ranges do not contain the full range + { + name: "partial range from minimum key, full range", + args: args{a: stringToKeyRange("-c0"), b: stringToKeyRange("-")}, + want: false, + }, + { + name: "partial range to maximum key, full range", + args: args{a: stringToKeyRange("80-"), b: stringToKeyRange("-")}, + want: false, + }, + { + name: "partial mid-key range, full range", + args: args{a: stringToKeyRange("80-c0"), b: stringToKeyRange("-")}, + want: false, + }, + + // partial ranges do not contain overlapping but boundary-crossing partial ranges + { + name: "partial range mid-key range, overlapping partial range to maximum key", + args: args{a: stringToKeyRange("40-60"), b: stringToKeyRange("50-")}, + want: false, + }, + { + name: "partial range mid-key range, overlapping partial range to maximum key", + args: args{a: stringToKeyRange("40-60"), b: stringToKeyRange("5000-")}, + want: false, + }, + { + name: "partial range mid-key range, overlapping partial range to maximum key, different size keys", + args: args{a: stringToKeyRange("4000-6000"), b: stringToKeyRange("50-")}, + want: false, + }, + { + name: "partial range mid-key range, overlapping partial range to maximum key, different size keys", + args: args{a: stringToKeyRange("40-60"), b: stringToKeyRange("5000-")}, + want: false, + }, + { + name: "partial range mid-key range, overlapping partial range from minimum key", + args: args{a: stringToKeyRange("40-60"), b: stringToKeyRange("-50")}, + want: false, + }, + { + name: "partial range mid-key range, overlapping partial range from minimum key", + args: args{a: stringToKeyRange("40-60"), b: stringToKeyRange("-5000")}, + want: false, + }, + { + name: "partial range mid-key range, overlapping partial range from minimum key, different size keys", + args: args{a: stringToKeyRange("4000-6000"), b: stringToKeyRange("-50")}, + want: false, + }, + { + name: "partial range mid-key range, overlapping partial range from minimum key, different size keys", + args: args{a: stringToKeyRange("40-60"), b: stringToKeyRange("-5000")}, + want: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equalf(t, tt.want, KeyRangeContainsKeyRange(tt.args.a, tt.args.b), "KeyRangeContainsKeyRange(%v, %v)", tt.args.a, tt.args.b) + }) } } @@ -671,65 +1478,40 @@ func BenchmarkKeyRangesIntersect(b *testing.B) { } for i := 0; i < b.N; i++ { - KeyRangesIntersect(kr1, kr2) + KeyRangeIntersect(kr1, kr2) } } -func BenchmarkKeyRangesOverlap(b *testing.B) { - kr1 := &topodatapb.KeyRange{ - Start: []byte{0x40, 0, 0, 0, 0, 0, 0, 0}, - End: []byte{0x80, 0, 0, 0, 0, 0, 0, 0}, - } - kr2 := &topodatapb.KeyRange{ - Start: []byte{0x30, 0, 0, 0, 0, 0, 0, 0}, - End: []byte{0x50, 0, 0, 0, 0, 0, 0, 0}, - } - - for i := 0; i < b.N; i++ { - if _, err := KeyRangesOverlap(kr1, kr2); err != nil { - b.Fatal(err) - } - } -} +func TestIsValidKeyRange(t *testing.T) { + tests := []struct { + arg string + want bool + }{ + // normal cases + {"-", true}, + {"00-", true}, + {"-80", true}, + {"40-80", true}, + {"80-", true}, + {"a0-", true}, + {"-A0", true}, -func TestIsKeyRange(t *testing.T) { - testcases := []struct { - in string - out bool - }{{ - in: "-", - out: true, - }, { - in: "-80", - out: true, - }, { - in: "40-80", - out: true, - }, { - in: "80-", - out: true, - }, { - in: "a0-", - out: true, - }, { - in: "-A0", - out: true, - }, { - in: "", - out: false, - }, { - in: "x-80", - out: false, - }, { - in: "-80x", - out: false, - }, { - in: "select", - out: false, - }} + // special cases + {"0", true}, // equal to "-" - for _, tcase := range testcases { - assert.Equal(t, IsKeyRange(tcase.in), tcase.out, tcase.in) + // invalid cases + {"", false}, // empty is not allowed + {"11", false}, // no hyphen + {"-1", false}, // odd number of digits + {"-111", false}, // odd number of digits + {"1-2", false}, // odd number of digits + {"x-80", false}, // invalid character + {"-80x", false}, // invalid character + {"select", false}, // nonsense + {"+", false}, // nonsense + } + for _, tt := range tests { + assert.Equalf(t, tt.want, IsValidKeyRange(tt.arg), "IsValidKeyRange(%v)", tt.arg) } } diff --git a/go/vt/topo/shard.go b/go/vt/topo/shard.go index 2599c7de962..37e810b966f 100644 --- a/go/vt/topo/shard.go +++ b/go/vt/topo/shard.go @@ -308,7 +308,7 @@ func (ts *Server) CreateShard(ctx context.Context, keyspace, shard string) (err return err } for _, si := range sis { - if si.KeyRange == nil || key.KeyRangesIntersect(si.KeyRange, keyRange) { + if si.KeyRange == nil || key.KeyRangeIntersect(si.KeyRange, keyRange) { value.IsPrimaryServing = false break } diff --git a/go/vt/topo/topoproto/srvkeyspace.go b/go/vt/topo/topoproto/srvkeyspace.go index 24618233fb2..88cfe4b9986 100644 --- a/go/vt/topo/topoproto/srvkeyspace.go +++ b/go/vt/topo/topoproto/srvkeyspace.go @@ -17,9 +17,10 @@ limitations under the License. package topoproto import ( - "bytes" "sort" + "vitess.io/vitess/go/vt/key" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) @@ -29,18 +30,12 @@ type ShardReferenceArray []*topodatapb.ShardReference // Len implements sort.Interface func (sra ShardReferenceArray) Len() int { return len(sra) } -// Len implements sort.Interface +// Less implements sort.Interface func (sra ShardReferenceArray) Less(i, j int) bool { - if sra[i].KeyRange == nil || len(sra[i].KeyRange.Start) == 0 { - return true - } - if sra[j].KeyRange == nil || len(sra[j].KeyRange.Start) == 0 { - return false - } - return bytes.Compare(sra[i].KeyRange.Start, sra[j].KeyRange.Start) < 0 + return key.KeyRangeLess(sra[i].KeyRange, sra[j].KeyRange) } -// Len implements sort.Interface +// Swap implements sort.Interface func (sra ShardReferenceArray) Swap(i, j int) { sra[i], sra[j] = sra[j], sra[i] } diff --git a/go/vt/topotools/split.go b/go/vt/topotools/split.go index 00cfd49cdae..ace3dda94a7 100644 --- a/go/vt/topotools/split.go +++ b/go/vt/topotools/split.go @@ -222,7 +222,7 @@ func findOverlappingShards(shardMap map[string]*topo.ShardInfo) ([]*OverlappingS func findIntersectingShard(shardMap map[string]*topo.ShardInfo, sourceArray []*topo.ShardInfo) *topo.ShardInfo { for name, si := range shardMap { for _, sourceShardInfo := range sourceArray { - if si.KeyRange == nil || sourceShardInfo.KeyRange == nil || key.KeyRangesIntersect(si.KeyRange, sourceShardInfo.KeyRange) { + if si.KeyRange == nil || sourceShardInfo.KeyRange == nil || key.KeyRangeIntersect(si.KeyRange, sourceShardInfo.KeyRange) { delete(shardMap, name) return si } @@ -235,7 +235,7 @@ func findIntersectingShard(shardMap map[string]*topo.ShardInfo, sourceArray []*t // in the destination array func intersect(si *topo.ShardInfo, allShards []*topo.ShardInfo) bool { for _, shard := range allShards { - if key.KeyRangesIntersect(si.KeyRange, shard.KeyRange) { + if key.KeyRangeIntersect(si.KeyRange, shard.KeyRange) { return true } } diff --git a/go/vt/topotools/split_test.go b/go/vt/topotools/split_test.go index 0ba13a3524d..003dc767317 100644 --- a/go/vt/topotools/split_test.go +++ b/go/vt/topotools/split_test.go @@ -129,6 +129,10 @@ func TestValidateForReshard(t *testing.T) { sources: []string{"0"}, targets: []string{"-40", "40-"}, out: "", + }, { + sources: []string{"0003-"}, + targets: []string{"000300-000380", "000380-000400", "0004-"}, + out: "", }, { sources: []string{"-40", "40-80", "80-"}, targets: []string{"-40", "40-"}, diff --git a/go/vt/vtctl/workflow/stream_migrator.go b/go/vt/vtctl/workflow/stream_migrator.go index 8266b59aea9..17b447a68bb 100644 --- a/go/vt/vtctl/workflow/stream_migrator.go +++ b/go/vt/vtctl/workflow/stream_migrator.go @@ -659,7 +659,7 @@ func (sm *StreamMigrator) templatizeRule(ctx context.Context, rule *binlogdatapb switch { case rule.Filter == "": return StreamTypeUnknown, fmt.Errorf("rule %v does not have a select expression in vreplication", rule) - case key.IsKeyRange(rule.Filter): + case key.IsValidKeyRange(rule.Filter): rule.Filter = "{{.}}" return StreamTypeSharded, nil case rule.Filter == vreplication.ExcludeStr: diff --git a/go/vt/vttablet/tabletmanager/vdiff/workflow_differ.go b/go/vt/vttablet/tabletmanager/vdiff/workflow_differ.go index a5b3a89df56..1d13128088c 100644 --- a/go/vt/vttablet/tabletmanager/vdiff/workflow_differ.go +++ b/go/vt/vttablet/tabletmanager/vdiff/workflow_differ.go @@ -250,7 +250,7 @@ func (wd *workflowDiffer) buildPlan(dbClient binlogplayer.DBClient, filter *binl buf := sqlparser.NewTrackedBuffer(nil) buf.Myprintf("select * from %v", sqlparser.NewIdentifierCS(table.Name)) sourceQuery = buf.String() - case key.IsKeyRange(rule.Filter): + case key.IsValidKeyRange(rule.Filter): buf := sqlparser.NewTrackedBuffer(nil) buf.Myprintf("select * from %v where in_keyrange(%v)", sqlparser.NewIdentifierCS(table.Name), sqlparser.NewStrLiteral(rule.Filter)) sourceQuery = buf.String() diff --git a/go/vt/vttablet/tabletmanager/vreplication/table_plan_builder.go b/go/vt/vttablet/tabletmanager/vreplication/table_plan_builder.go index 4e8141f164f..c34f179df5b 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/table_plan_builder.go +++ b/go/vt/vttablet/tabletmanager/vreplication/table_plan_builder.go @@ -201,7 +201,7 @@ func buildTablePlan(tableName string, rule *binlogdatapb.Rule, colInfos []*Colum buf := sqlparser.NewTrackedBuffer(nil) buf.Myprintf("select * from %v", sqlparser.NewIdentifierCS(tableName)) query = buf.String() - case key.IsKeyRange(filter): + case key.IsValidKeyRange(filter): buf := sqlparser.NewTrackedBuffer(nil) buf.Myprintf("select * from %v where in_keyrange(%v)", sqlparser.NewIdentifierCS(tableName), sqlparser.NewStrLiteral(filter)) query = buf.String() diff --git a/go/vt/vttablet/tabletserver/vstreamer/uvstreamer.go b/go/vt/vttablet/tabletserver/vstreamer/uvstreamer.go index 34f13268a06..2f9f5e972b9 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/uvstreamer.go +++ b/go/vt/vttablet/tabletserver/vstreamer/uvstreamer.go @@ -220,7 +220,7 @@ func getQuery(tableName string, filter string) string { buf := sqlparser.NewTrackedBuffer(nil) buf.Myprintf("select * from %v", sqlparser.NewIdentifierCS(tableName)) query = buf.String() - case key.IsKeyRange(filter): + case key.IsValidKeyRange(filter): buf := sqlparser.NewTrackedBuffer(nil) buf.Myprintf("select * from %v where in_keyrange(%v)", sqlparser.NewIdentifierCS(tableName), sqlparser.NewStrLiteral(filter)) diff --git a/go/vt/wrangler/materializer.go b/go/vt/wrangler/materializer.go index a44d15b36e2..1b6aa2f5909 100644 --- a/go/vt/wrangler/materializer.go +++ b/go/vt/wrangler/materializer.go @@ -1226,7 +1226,7 @@ func (mz *materializer) generateInserts(ctx context.Context, targetShard *topo.S // We only do it for MoveTables for now since this doesn't hold for materialize flows // where the target's sharding key might differ from that of the source if mz.ms.MaterializationIntent == vtctldatapb.MaterializationIntent_MOVETABLES && - !key.KeyRangesIntersect(sourceShard.KeyRange, targetShard.KeyRange) { + !key.KeyRangeIntersect(sourceShard.KeyRange, targetShard.KeyRange) { continue } bls := &binlogdatapb.BinlogSource{ diff --git a/go/vt/wrangler/resharder.go b/go/vt/wrangler/resharder.go index 3df9d3f586f..d4e26a3dbb1 100644 --- a/go/vt/wrangler/resharder.go +++ b/go/vt/wrangler/resharder.go @@ -317,7 +317,7 @@ func (rs *resharder) createStreams(ctx context.Context) error { // copy excludeRules to prevent data race. copyExcludeRules := append([]*binlogdatapb.Rule(nil), excludeRules...) for _, source := range rs.sourceShards { - if !key.KeyRangesIntersect(target.KeyRange, source.KeyRange) { + if !key.KeyRangeIntersect(target.KeyRange, source.KeyRange) { continue } filter := &binlogdatapb.Filter{ diff --git a/go/vt/wrangler/traffic_switcher.go b/go/vt/wrangler/traffic_switcher.go index ec7914e51ce..7c4a7226065 100644 --- a/go/vt/wrangler/traffic_switcher.go +++ b/go/vt/wrangler/traffic_switcher.go @@ -949,9 +949,7 @@ func (ts *trafficSwitcher) isPartialMoveTables(sourceShards, targetShards []stri return false, err } - if !key.KeyRangeIsPartial(skr) || !key.KeyRangeIsPartial(tkr) || // both cover full range - len(sourceShards) != len(targetShards) { - + if key.KeyRangeIsComplete(skr) || key.KeyRangeIsComplete(tkr) || len(sourceShards) != len(targetShards) { return false, nil } diff --git a/go/vt/wrangler/traffic_switcher_env_test.go b/go/vt/wrangler/traffic_switcher_env_test.go index 7d886961e67..aac00eec872 100644 --- a/go/vt/wrangler/traffic_switcher_env_test.go +++ b/go/vt/wrangler/traffic_switcher_env_test.go @@ -348,7 +348,7 @@ func newTestShardMigrater(ctx context.Context, t *testing.T, sourceShards, targe var rows, rowsRdOnly []string var streamExtInfoRows []string for j, sourceShard := range sourceShards { - if !key.KeyRangesIntersect(tme.targetKeyRanges[i], tme.sourceKeyRanges[j]) { + if !key.KeyRangeIntersect(tme.targetKeyRanges[i], tme.sourceKeyRanges[j]) { continue } bls := &binlogdatapb.BinlogSource{ @@ -499,7 +499,7 @@ func (tme *testMigraterEnv) expectNoPreviousReverseJournals() { func (tme *testShardMigraterEnv) forAllStreams(f func(i, j int)) { for i := range tme.targetShards { for j := range tme.sourceShards { - if !key.KeyRangesIntersect(tme.targetKeyRanges[i], tme.sourceKeyRanges[j]) { + if !key.KeyRangeIntersect(tme.targetKeyRanges[i], tme.sourceKeyRanges[j]) { continue } f(i, j) diff --git a/go/vt/wrangler/vdiff.go b/go/vt/wrangler/vdiff.go index cfecefaeabd..e77ca253a84 100644 --- a/go/vt/wrangler/vdiff.go +++ b/go/vt/wrangler/vdiff.go @@ -398,7 +398,7 @@ func (df *vdiff) buildVDiffPlan(ctx context.Context, filter *binlogdatapb.Filter continue } query := rule.Filter - if rule.Filter == "" || key.IsKeyRange(rule.Filter) { + if rule.Filter == "" || key.IsValidKeyRange(rule.Filter) { buf := sqlparser.NewTrackedBuffer(nil) buf.Myprintf("select * from %v", sqlparser.NewIdentifierCS(table.Name)) query = buf.String() From efb0ee5e747b4bcc17d69c2bf70824f40e9f6d6e Mon Sep 17 00:00:00 2001 From: austenLacy Date: Fri, 14 Apr 2023 10:15:55 -0400 Subject: [PATCH 03/12] remove tablet server's ACL check on the /healthz HTTP api route Signed-off-by: austenLacy --- go/vt/vttablet/tabletserver/tabletserver.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/go/vt/vttablet/tabletserver/tabletserver.go b/go/vt/vttablet/tabletserver/tabletserver.go index 681c1c33fd0..c7c66233284 100644 --- a/go/vt/vttablet/tabletserver/tabletserver.go +++ b/go/vt/vttablet/tabletserver/tabletserver.go @@ -1724,10 +1724,6 @@ func (tsv *TabletServer) registerHealthzHealthHandler() { } func (tsv *TabletServer) healthzHandler(w http.ResponseWriter, r *http.Request) { - if err := acl.CheckAccessHTTP(r, acl.MONITORING); err != nil { - acl.SendError(w, err) - return - } if (tsv.sm.wantState == StateServing || tsv.sm.wantState == StateNotConnected) && !tsv.sm.IsServing() { http.Error(w, "500 internal server error: vttablet is not serving", http.StatusInternalServerError) return From 5db88fcc66bdab6d7156ce251ea493de4b21cb0b Mon Sep 17 00:00:00 2001 From: Austen Lacy Date: Tue, 9 May 2023 16:17:48 -0400 Subject: [PATCH 04/12] use dry-run switcher in switchreads (#91) Signed-off-by: Austen Lacy Co-authored-by: Austen Lacy --- go/vt/wrangler/traffic_switcher.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go/vt/wrangler/traffic_switcher.go b/go/vt/wrangler/traffic_switcher.go index 7c4a7226065..773177b5829 100644 --- a/go/vt/wrangler/traffic_switcher.go +++ b/go/vt/wrangler/traffic_switcher.go @@ -397,7 +397,7 @@ func (wr *Wrangler) SwitchReads(ctx context.Context, targetKeyspace, workflowNam return sw.logs(), nil } wr.Logger().Infof("About to switchShardReads: %+v, %+v, %+v", cells, servedTypes, direction) - if err := ts.switchShardReads(ctx, cells, servedTypes, direction); err != nil { + if err := sw.switchShardReads(ctx, cells, servedTypes, direction); err != nil { ts.Logger().Errorf("switchShardReads failed: %v", err) return nil, err } From 0b2120fcb11bbbec1fefa95dbff8d04b810e1f36 Mon Sep 17 00:00:00 2001 From: Austen Lacy Date: Fri, 19 May 2023 13:48:51 -0400 Subject: [PATCH 05/12] Purge old schema versions from memory in historian (#13056) * purge old schema versions from memory in historian Signed-off-by: austenLacy * fix vttablet arg e2e test Signed-off-by: Austen Lacy * only reassign historian schemas if necessary Signed-off-by: Austen Lacy * dont use from_unixtime in query because already in unix Signed-off-by: Austen Lacy --------- Signed-off-by: austenLacy Signed-off-by: Austen Lacy Co-authored-by: Austen Lacy --- go/flags/endtoend/vttablet.txt | 1 + go/vt/vttablet/tabletserver/schema/engine.go | 4 +- .../tabletserver/schema/engine_test.go | 19 ++-- .../vttablet/tabletserver/schema/historian.go | 87 +++++++++++++++---- .../tabletserver/schema/historian_test.go | 77 +++++++++++++++- .../vttablet/tabletserver/schema/main_test.go | 4 +- .../tabletserver/schema/tracker_test.go | 4 +- .../vttablet/tabletserver/tabletenv/config.go | 2 + 8 files changed, 163 insertions(+), 35 deletions(-) diff --git a/go/flags/endtoend/vttablet.txt b/go/flags/endtoend/vttablet.txt index c8fa87920ca..a8d9ce87bf0 100644 --- a/go/flags/endtoend/vttablet.txt +++ b/go/flags/endtoend/vttablet.txt @@ -261,6 +261,7 @@ Usage of vttablet: --s3_backup_storage_root string root prefix for all backup-related object names. --s3_backup_tls_skip_verify_cert skip the 'certificate is valid' check for SSL connections. --sanitize_log_messages Remove potentially sensitive information in tablet INFO, WARNING, and ERROR log messages such as query parameters. + --schema-version-max-age-seconds int max age of schema version records to kept in memory by the vreplication historian --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only) --service_map strings comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-queryservice --serving_state_grace_period duration how long to pause after broadcasting health to vtgate, before enforcing a new serving state diff --git a/go/vt/vttablet/tabletserver/schema/engine.go b/go/vt/vttablet/tabletserver/schema/engine.go index 215b37a1c6a..8d830a82d8f 100644 --- a/go/vt/vttablet/tabletserver/schema/engine.go +++ b/go/vt/vttablet/tabletserver/schema/engine.go @@ -122,7 +122,7 @@ func NewEngine(env tabletenv.Env) *Engine { schemazHandler(se.GetSchema(), w, r) }) - se.historian = newHistorian(env.Config().TrackSchemaVersions, se.conns) + se.historian = newHistorian(env.Config().TrackSchemaVersions, env.Config().SchemaVersionMaxAgeSeconds, se.conns) return se } @@ -741,7 +741,7 @@ func NewEngineForTests() *Engine { se := &Engine{ isOpen: true, tables: make(map[string]*Table), - historian: newHistorian(false, nil), + historian: newHistorian(false, 0, nil), } return se } diff --git a/go/vt/vttablet/tabletserver/schema/engine_test.go b/go/vt/vttablet/tabletserver/schema/engine_test.go index 79f3bf5d3b7..9b62c595b6e 100644 --- a/go/vt/vttablet/tabletserver/schema/engine_test.go +++ b/go/vt/vttablet/tabletserver/schema/engine_test.go @@ -75,7 +75,7 @@ func TestOpenAndReload(t *testing.T) { )) firstReadRowsValue := 12 AddFakeInnoDBReadRowsResult(db, firstReadRowsValue) - se := newEngine(10, 10*time.Second, 10*time.Second, db) + se := newEngine(10, 10*time.Second, 10*time.Second, 0, db) se.Open() defer se.Close() @@ -266,7 +266,7 @@ func TestReloadWithSwappedTables(t *testing.T) { firstReadRowsValue := 12 AddFakeInnoDBReadRowsResult(db, firstReadRowsValue) - se := newEngine(10, 10*time.Second, 10*time.Second, db) + se := newEngine(10, 10*time.Second, 10*time.Second, 0, db) se.Open() defer se.Close() want := initialSchema() @@ -416,7 +416,7 @@ func TestOpenFailedDueToExecErr(t *testing.T) { schematest.AddDefaultQueries(db) want := "injected error" db.RejectQueryPattern(baseShowTablesPattern, want) - se := newEngine(10, 1*time.Second, 1*time.Second, db) + se := newEngine(10, 1*time.Second, 1*time.Second, 0, db) err := se.Open() if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("se.Open: %v, want %s", err, want) @@ -447,7 +447,7 @@ func TestOpenFailedDueToLoadTableErr(t *testing.T) { db.AddRejectedQuery("SELECT * FROM `fakesqldb`.`test_view` WHERE 1 != 1", mysql.NewSQLErrorFromError(errors.New("The user specified as a definer ('root'@'%') does not exist (errno 1449) (sqlstate HY000)"))) AddFakeInnoDBReadRowsResult(db, 0) - se := newEngine(10, 1*time.Second, 1*time.Second, db) + se := newEngine(10, 1*time.Second, 1*time.Second, 0, db) err := se.Open() // failed load should return an error because of test_table assert.ErrorContains(t, err, "Row count exceeded") @@ -482,7 +482,7 @@ func TestOpenNoErrorDueToInvalidViews(t *testing.T) { db.AddRejectedQuery("SELECT `col1`, `col2` FROM `fakesqldb`.`bar_view` WHERE 1 != 1", mysql.NewSQLError(mysql.ERWrongFieldWithGroup, mysql.SSClientError, "random error for table bar_view")) AddFakeInnoDBReadRowsResult(db, 0) - se := newEngine(10, 1*time.Second, 1*time.Second, db) + se := newEngine(10, 1*time.Second, 1*time.Second, 0, db) err := se.Open() require.NoError(t, err) @@ -498,7 +498,7 @@ func TestExportVars(t *testing.T) { db := fakesqldb.New(t) defer db.Close() schematest.AddDefaultQueries(db) - se := newEngine(10, 1*time.Second, 1*time.Second, db) + se := newEngine(10, 1*time.Second, 1*time.Second, 0, db) se.Open() defer se.Close() expvar.Do(func(kv expvar.KeyValue) { @@ -510,7 +510,7 @@ func TestStatsURL(t *testing.T) { db := fakesqldb.New(t) defer db.Close() schematest.AddDefaultQueries(db) - se := newEngine(10, 1*time.Second, 1*time.Second, db) + se := newEngine(10, 1*time.Second, 1*time.Second, 0, db) se.Open() defer se.Close() @@ -540,7 +540,7 @@ func TestSchemaEngineCloseTickRace(t *testing.T) { }) AddFakeInnoDBReadRowsResult(db, 12) // Start the engine with a small reload tick - se := newEngine(10, 100*time.Millisecond, 1*time.Second, db) + se := newEngine(10, 100*time.Millisecond, 1*time.Second, 0, db) err := se.Open() require.NoError(t, err) @@ -567,13 +567,14 @@ func TestSchemaEngineCloseTickRace(t *testing.T) { } } -func newEngine(queryCacheSize int, reloadTime time.Duration, idleTimeout time.Duration, db *fakesqldb.DB) *Engine { +func newEngine(queryCacheSize int, reloadTime time.Duration, idleTimeout time.Duration, schemaMaxAgeSeconds int64, db *fakesqldb.DB) *Engine { config := tabletenv.NewDefaultConfig() config.QueryCacheSize = queryCacheSize config.SchemaReloadIntervalSeconds.Set(reloadTime) config.OltpReadPool.IdleTimeoutSeconds.Set(idleTimeout) config.OlapReadPool.IdleTimeoutSeconds.Set(idleTimeout) config.TxPool.IdleTimeoutSeconds.Set(idleTimeout) + config.SchemaVersionMaxAgeSeconds = schemaMaxAgeSeconds se := NewEngine(tabletenv.NewEnv(config, "SchemaTest")) se.InitDBConfig(newDBConfigs(db).DbaWithDB()) return se diff --git a/go/vt/vttablet/tabletserver/schema/historian.go b/go/vt/vttablet/tabletserver/schema/historian.go index 75f48dc0518..d503b2826a6 100644 --- a/go/vt/vttablet/tabletserver/schema/historian.go +++ b/go/vt/vttablet/tabletserver/schema/historian.go @@ -18,9 +18,9 @@ package schema import ( "context" - "fmt" "sort" "sync" + "time" "google.golang.org/protobuf/proto" @@ -35,36 +35,40 @@ import ( "vitess.io/vitess/go/vt/sqlparser" ) -const getSchemaVersions = "select id, pos, ddl, time_updated, schemax from _vt.schema_version where id > %d order by id asc" +const getInitialSchemaVersions = "select id, pos, ddl, time_updated, schemax from _vt.schema_version where time_updated > %d order by id asc" +const getNextSchemaVersions = "select id, pos, ddl, time_updated, schemax from _vt.schema_version where id > %d order by id asc" // vl defines the glog verbosity level for the package const vl = 10 // trackedSchema has the snapshot of the table at a given pos (reached by ddl) type trackedSchema struct { - schema map[string]*binlogdatapb.MinimalTable - pos mysql.Position - ddl string + schema map[string]*binlogdatapb.MinimalTable + pos mysql.Position + ddl string + timeUpdated int64 } // historian implements the Historian interface by calling schema.Engine for the underlying schema // and supplying a schema for a specific version by loading the cached values from the schema_version table // The schema version table is populated by the Tracker type historian struct { - conns *connpool.Pool - lastID int64 - schemas []*trackedSchema - mu sync.Mutex - enabled bool - isOpen bool + conns *connpool.Pool + lastID int64 + schemas []*trackedSchema + mu sync.Mutex + enabled bool + isOpen bool + schemaMaxAgeSeconds int64 } // newHistorian creates a new historian. It expects a schema.Engine instance -func newHistorian(enabled bool, conns *connpool.Pool) *historian { +func newHistorian(enabled bool, schemaMaxAgeSeconds int64, conns *connpool.Pool) *historian { sh := historian{ - conns: conns, - lastID: 0, - enabled: enabled, + conns: conns, + lastID: 0, + enabled: enabled, + schemaMaxAgeSeconds: schemaMaxAgeSeconds, } return &sh } @@ -164,7 +168,17 @@ func (h *historian) loadFromDB(ctx context.Context) error { return err } defer conn.Recycle() - tableData, err := conn.Exec(ctx, fmt.Sprintf(getSchemaVersions, h.lastID), 10000, true) + + var tableData *sqltypes.Result + if h.lastID == 0 && h.schemaMaxAgeSeconds > 0 { // only at vttablet start + schemaMaxAge := time.Now().UTC().Add(time.Duration(-h.schemaMaxAgeSeconds) * time.Second) + tableData, err = conn.Exec(ctx, sqlparser.BuildParsedQuery(getInitialSchemaVersions, + schemaMaxAge.Unix()).Query, 10000, true) + } else { + tableData, err = conn.Exec(ctx, sqlparser.BuildParsedQuery(getNextSchemaVersions, + h.lastID).Query, 10000, true) + } + if err != nil { log.Infof("Error reading schema_tracking table %v, will operate with the latest available schema", err) return nil @@ -177,6 +191,14 @@ func (h *historian) loadFromDB(ctx context.Context) error { h.schemas = append(h.schemas, trackedSchema) h.lastID = id } + + if h.lastID != 0 && h.schemaMaxAgeSeconds > 0 { + // To avoid keeping old schemas in memory which can lead to an eventual memory leak + // we purge any older than h.schemaMaxAgeSeconds. Only needs to be done when adding + // new schema rows. + h.purgeOldSchemas() + } + h.sortSchemas() return nil } @@ -217,13 +239,40 @@ func (h *historian) readRow(row []sqltypes.Value) (*trackedSchema, int64, error) tables[t.Name] = t } tSchema := &trackedSchema{ - schema: tables, - pos: pos, - ddl: ddl, + schema: tables, + pos: pos, + ddl: ddl, + timeUpdated: timeUpdated, } return tSchema, id, nil } +func (h *historian) purgeOldSchemas() { + maxAgeDuration := time.Duration(h.schemaMaxAgeSeconds) * time.Second + shouldPurge := false + + // check if we have any schemas we need to purge and only create the filtered + // slice if necessary + for _, s := range h.schemas { + if time.Since(time.Unix(s.timeUpdated, 0)) > maxAgeDuration { + shouldPurge = true + break + } + } + + if !shouldPurge { + return + } + + filtered := make([]*trackedSchema, 0) + for _, s := range h.schemas { + if time.Since(time.Unix(s.timeUpdated, 0)) < maxAgeDuration { + filtered = append(filtered, s) + } + } + h.schemas = filtered +} + // sortSchemas sorts entries in ascending order of gtid, ex: 40,44,48 func (h *historian) sortSchemas() { sort.Slice(h.schemas, func(i int, j int) bool { diff --git a/go/vt/vttablet/tabletserver/schema/historian_test.go b/go/vt/vttablet/tabletserver/schema/historian_test.go index c1a9a6416d4..66c89feeb37 100644 --- a/go/vt/vttablet/tabletserver/schema/historian_test.go +++ b/go/vt/vttablet/tabletserver/schema/historian_test.go @@ -19,6 +19,7 @@ package schema import ( "fmt" "testing" + "time" "github.com/stretchr/testify/require" "google.golang.org/protobuf/proto" @@ -71,7 +72,7 @@ func getDbSchemaBlob(t *testing.T, tables map[string]*binlogdatapb.MinimalTable) } func TestHistorian(t *testing.T) { - se, db, cancel := getTestSchemaEngine(t) + se, db, cancel := getTestSchemaEngine(t, 0) defer cancel() se.EnableHistorian(false) @@ -174,3 +175,77 @@ func TestHistorian(t *testing.T) { require.NoError(t, err) require.Equal(t, exp3, fmt.Sprintf("%v", tab)) } + +func TestHistorianPurgeOldSchemas(t *testing.T) { + schemaVersionMaxAgeSeconds := 3600 // 1 hour + se, db, cancel := getTestSchemaEngine(t, int64(schemaVersionMaxAgeSeconds)) + defer cancel() + + gtidPrefix := "MySQL56/7b04699f-f5e9-11e9-bf88-9cb6d089e1c3:" + gtid1 := gtidPrefix + "1-10" + ddl1 := "create table tracker_test (id int)" + // create the first record 1 day ago so it gets purged from memory + ts1 := time.Now().Add(time.Duration(-24) * time.Hour) + _, _, _ = ddl1, ts1, db + se.EnableHistorian(true) + _, err := se.GetTableForPos(sqlparser.NewIdentifierCS("t1"), gtid1) + require.Equal(t, "table t1 not found in vttablet schema", err.Error()) + var blob1 string + + fields := []*querypb.Field{{ + Name: "id", + Type: sqltypes.Int32, + }, { + Name: "pos", + Type: sqltypes.VarBinary, + }, { + Name: "ddl", + Type: sqltypes.VarBinary, + }, { + Name: "time_updated", + Type: sqltypes.Int32, + }, { + Name: "schemax", + Type: sqltypes.Blob, + }} + + table := getTable("t1", []string{"id1", "id2"}, []querypb.Type{querypb.Type_INT32, querypb.Type_INT32}, []int64{0}) + tables := make(map[string]*binlogdatapb.MinimalTable) + tables["t1"] = table + blob1 = getDbSchemaBlob(t, tables) + db.AddQueryPattern("select id, pos, ddl, time_updated, schemax from _vt\\.schema_version where time_updated \\>.*", &sqltypes.Result{ + Fields: fields, + Rows: [][]sqltypes.Value{ + {sqltypes.NewInt32(1), sqltypes.NewVarBinary(gtid1), sqltypes.NewVarBinary(ddl1), sqltypes.NewInt32(int32(ts1.Unix())), sqltypes.NewVarBinary(blob1)}, + }, + }) + require.Nil(t, se.RegisterVersionEvent()) + _, err = se.GetTableForPos(sqlparser.NewIdentifierCS("t1"), gtid1) + // validate the old schema has been purged + require.Equal(t, "table t1 not found in vttablet schema", err.Error()) + require.Equal(t, 0, len(se.historian.schemas)) + + // add a second schema record row with a time_updated that won't be purged + gtid2 := gtidPrefix + "1-20" + _, err = se.GetTableForPos(sqlparser.NewIdentifierCS("t1"), gtid2) + require.Equal(t, "table t1 not found in vttablet schema", err.Error()) + + table = getTable("t1", []string{"id1", "id2"}, []querypb.Type{querypb.Type_INT32, querypb.Type_VARBINARY}, []int64{0}) + tables["t1"] = table + blob2 := getDbSchemaBlob(t, tables) + ddl2 := "alter table t1 modify column id2 varbinary" + // set time_updated younger than the cutoff from historian.schemaMaxAgeSeconds + ts2 := time.Now().Add(time.Duration(-60) * time.Second) + db.AddQuery("select id, pos, ddl, time_updated, schemax from _vt.schema_version where id > 1 order by id asc", &sqltypes.Result{ + Fields: fields, + Rows: [][]sqltypes.Value{ + {sqltypes.NewInt32(2), sqltypes.NewVarBinary(gtid2), sqltypes.NewVarBinary(ddl2), sqltypes.NewInt32(int32(ts2.Unix())), sqltypes.NewVarBinary(blob2)}, + }, + }) + require.Nil(t, se.RegisterVersionEvent()) + exp2 := `name:"t1" fields:{name:"id1" type:INT32 table:"t1"} fields:{name:"id2" type:VARBINARY table:"t1"} p_k_columns:0` + tab, err := se.GetTableForPos(sqlparser.NewIdentifierCS("t1"), gtid2) + require.NoError(t, err) + require.Equal(t, exp2, fmt.Sprintf("%v", tab)) + require.Equal(t, 1, len(se.historian.schemas)) +} diff --git a/go/vt/vttablet/tabletserver/schema/main_test.go b/go/vt/vttablet/tabletserver/schema/main_test.go index ada5c8085a1..19fc66c36d1 100644 --- a/go/vt/vttablet/tabletserver/schema/main_test.go +++ b/go/vt/vttablet/tabletserver/schema/main_test.go @@ -27,7 +27,7 @@ import ( "vitess.io/vitess/go/sqltypes" ) -func getTestSchemaEngine(t *testing.T) (*Engine, *fakesqldb.DB, func()) { +func getTestSchemaEngine(t *testing.T, schemaMaxAgeSeconds int64) (*Engine, *fakesqldb.DB, func()) { db := fakesqldb.New(t) db.AddQuery("select unix_timestamp()", sqltypes.MakeTestResult(sqltypes.MakeTestFields( "t", @@ -37,7 +37,7 @@ func getTestSchemaEngine(t *testing.T) (*Engine, *fakesqldb.DB, func()) { db.AddQueryPattern(baseShowTablesPattern, &sqltypes.Result{}) db.AddQuery(mysql.BaseShowPrimary, &sqltypes.Result{}) AddFakeInnoDBReadRowsResult(db, 1) - se := newEngine(10, 10*time.Second, 10*time.Second, db) + se := newEngine(10, 10*time.Second, 10*time.Second, schemaMaxAgeSeconds, db) require.NoError(t, se.Open()) cancel := func() { defer db.Close() diff --git a/go/vt/vttablet/tabletserver/schema/tracker_test.go b/go/vt/vttablet/tabletserver/schema/tracker_test.go index 2b30ee47e55..9822b6bfe5a 100644 --- a/go/vt/vttablet/tabletserver/schema/tracker_test.go +++ b/go/vt/vttablet/tabletserver/schema/tracker_test.go @@ -30,7 +30,7 @@ import ( func TestTracker(t *testing.T) { initialSchemaInserted := false - se, db, cancel := getTestSchemaEngine(t) + se, db, cancel := getTestSchemaEngine(t, 0) defer cancel() gtid1 := "MySQL56/7b04699f-f5e9-11e9-bf88-9cb6d089e1c3:1-10" ddl1 := "create table tracker_test (id int)" @@ -91,7 +91,7 @@ func TestTracker(t *testing.T) { func TestTrackerShouldNotInsertInitialSchema(t *testing.T) { initialSchemaInserted := false - se, db, cancel := getTestSchemaEngine(t) + se, db, cancel := getTestSchemaEngine(t, 0) gtid1 := "MySQL56/7b04699f-f5e9-11e9-bf88-9cb6d089e1c3:1-10" defer cancel() diff --git a/go/vt/vttablet/tabletserver/tabletenv/config.go b/go/vt/vttablet/tabletserver/tabletenv/config.go index 0dbf334d50e..f4961bc4109 100644 --- a/go/vt/vttablet/tabletserver/tabletenv/config.go +++ b/go/vt/vttablet/tabletserver/tabletenv/config.go @@ -134,6 +134,7 @@ func registerTabletEnvFlags(fs *pflag.FlagSet) { fs.BoolVar(¤tConfig.AnnotateQueries, "queryserver-config-annotate-queries", defaultConfig.AnnotateQueries, "prefix queries to MySQL backend with comment indicating vtgate principal (user) and target tablet type") fs.BoolVar(¤tConfig.WatchReplication, "watch_replication_stream", false, "When enabled, vttablet will stream the MySQL replication stream from the local server, and use it to update schema when it sees a DDL.") fs.BoolVar(¤tConfig.TrackSchemaVersions, "track_schema_versions", false, "When enabled, vttablet will store versions of schemas at each position that a DDL is applied and allow retrieval of the schema corresponding to a position") + fs.Int64Var(¤tConfig.SchemaVersionMaxAgeSeconds, "schema-version-max-age-seconds", 0, "max age of schema version records to kept in memory by the vreplication historian") fs.BoolVar(¤tConfig.TwoPCEnable, "twopc_enable", defaultConfig.TwoPCEnable, "if the flag is on, 2pc is enabled. Other 2pc flags must be supplied.") fs.StringVar(¤tConfig.TwoPCCoordinatorAddress, "twopc_coordinator_address", defaultConfig.TwoPCCoordinatorAddress, "address of the (VTGate) process(es) that will be used to notify of abandoned transactions.") SecondsVar(fs, ¤tConfig.TwoPCAbandonAge, "twopc_abandon_age", defaultConfig.TwoPCAbandonAge, "time in seconds. Any unresolved transaction older than this time will be sent to the coordinator to be resolved.") @@ -294,6 +295,7 @@ type TabletConfig struct { SignalSchemaChangeReloadIntervalSeconds Seconds `json:"signalSchemaChangeReloadIntervalSeconds,omitempty"` WatchReplication bool `json:"watchReplication,omitempty"` TrackSchemaVersions bool `json:"trackSchemaVersions,omitempty"` + SchemaVersionMaxAgeSeconds int64 `json:"schemaVersionMaxAgeSeconds,omitempty"` TerseErrors bool `json:"terseErrors,omitempty"` AnnotateQueries bool `json:"annotateQueries,omitempty"` MessagePostponeParallelism int `json:"messagePostponeParallelism,omitempty"` From b4c9ce3805d97232ed9f1b8a5d76375cf5e46e58 Mon Sep 17 00:00:00 2001 From: Brendan Dougherty Date: Thu, 15 Jun 2023 17:09:28 -0400 Subject: [PATCH 06/12] Merge pull request #102 from Shopify/generate-cachedsize-for-placement-vindex Generate CachedSize function for Placement vindex --- go/vt/vtgate/vindexes/cached_size.go | 36 ++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/go/vt/vtgate/vindexes/cached_size.go b/go/vt/vtgate/vindexes/cached_size.go index 76fe7f4abf5..50817e22352 100644 --- a/go/vt/vtgate/vindexes/cached_size.go +++ b/go/vt/vtgate/vindexes/cached_size.go @@ -342,6 +342,42 @@ func (cached *NumericStaticMap) CachedSize(alloc bool) int64 { } return size } + +//go:nocheckptr +func (cached *Placement) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(80) + } + // field name string + size += hack.RuntimeAllocSize(int64(len(cached.name))) + // field placementMap vitess.io/vitess/go/vt/vtgate/vindexes.PlacementMap + if cached.placementMap != nil { + size += int64(48) + hmap := reflect.ValueOf(cached.placementMap) + numBuckets := int(math.Pow(2, float64((*(*uint8)(unsafe.Pointer(hmap.Pointer() + uintptr(9))))))) + numOldBuckets := (*(*uint16)(unsafe.Pointer(hmap.Pointer() + uintptr(10)))) + size += hack.RuntimeAllocSize(int64(numOldBuckets * 208)) + if len(cached.placementMap) > 0 || numBuckets > 1 { + size += hack.RuntimeAllocSize(int64(numBuckets * 208)) + } + for k := range cached.placementMap { + size += hack.RuntimeAllocSize(int64(len(k))) + } + } + // field subVindex vitess.io/vitess/go/vt/vtgate/vindexes.Vindex + if cc, ok := cached.subVindex.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field subVindexType string + size += hack.RuntimeAllocSize(int64(len(cached.subVindexType))) + // field subVindexName string + size += hack.RuntimeAllocSize(int64(len(cached.subVindexName))) + return size +} func (cached *RegionExperimental) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) From 224735160eb043a7467fa494c3cef159c8cd49c1 Mon Sep 17 00:00:00 2001 From: Austen Lacy Date: Tue, 20 Jun 2023 09:34:55 -0400 Subject: [PATCH 07/12] return 200 in throttler if throttler is not open (#100) --- go/vt/vttablet/tabletserver/throttle/throttler.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go/vt/vttablet/tabletserver/throttle/throttler.go b/go/vt/vttablet/tabletserver/throttle/throttler.go index 55a86a5e683..6f48694fe53 100644 --- a/go/vt/vttablet/tabletserver/throttle/throttler.go +++ b/go/vt/vttablet/tabletserver/throttle/throttler.go @@ -994,7 +994,7 @@ func (throttler *Throttler) AppRequestMetricResult(ctx context.Context, appName // checkStore checks the aggregated value of given MySQL store func (throttler *Throttler) checkStore(ctx context.Context, appName string, storeName string, remoteAddr string, flags *CheckFlags) (checkResult *CheckResult) { - if !throttler.IsEnabled() { + if !throttler.IsEnabled() || atomic.LoadInt64(&throttler.isOpen) == 0 { return okMetricCheckResult } return throttler.check.Check(ctx, appName, "mysql", storeName, remoteAddr, flags) From 9afb1580f4816e2820df2468e6840b096fa0ac85 Mon Sep 17 00:00:00 2001 From: Austen Lacy Date: Wed, 5 Jul 2023 14:29:40 -0400 Subject: [PATCH 08/12] `vttestserver`: persist vschema changes in `--persistent_mode` (#13065) (#107) Co-authored-by: Hormoz Kheradmand --- go/cmd/vtcombo/main.go | 12 ++- go/cmd/vtcombo/vschema_watcher.go | 111 +++++++++++++++++++++++ go/cmd/vttestserver/main.go | 6 +- go/cmd/vttestserver/vttestserver_test.go | 14 ++- go/flags/endtoend/vttestserver.txt | 2 +- go/vt/vttest/local_cluster.go | 34 ++++--- go/vt/vttest/vtprocess.go | 4 + 7 files changed, 157 insertions(+), 26 deletions(-) create mode 100644 go/cmd/vtcombo/vschema_watcher.go diff --git a/go/cmd/vtcombo/main.go b/go/cmd/vtcombo/main.go index 61e862475dc..60ffdb12f21 100644 --- a/go/cmd/vtcombo/main.go +++ b/go/cmd/vtcombo/main.go @@ -61,7 +61,13 @@ var ( mysqlPort = flags.Int("mysql_port", 3306, "mysql port") externalTopoServer = flags.Bool("external_topo_server", false, "Should vtcombo use an external topology server instead of starting its own in-memory topology server. "+ "If true, vtcombo will use the flags defined in topo/server.go to open topo server") - plannerName = flags.String("planner-version", "", "Sets the default planner to use when the session has not changed it. Valid values are: V3, Gen4, Gen4Greedy and Gen4Fallback. Gen4Fallback tries the gen4 planner and falls back to the V3 planner if the gen4 fails.") + plannerName = flags.String("planner-version", "", "Sets the default planner to use when the session has not changed it. Valid values are: V3, V3Insert, Gen4, Gen4Greedy and Gen4Fallback. Gen4Fallback tries the gen4 planner and falls back to the V3 planner if the gen4 fails.") + vschemaPersistenceDir = flags.String("vschema-persistence-dir", "", "If set, per-keyspace vschema will be persisted in this directory "+ + "and reloaded into the in-memory topology server across restarts. Bookkeeping is performed using a simple watcher goroutine. "+ + "This is useful when running vtcombo as an application development container (e.g. vttestserver) where you want to keep the same "+ + "vschema even if developer's machine reboots. This works in tandem with vttestserver's --persistent_mode flag. Needless to say, "+ + "this is neither a perfect nor a production solution for vschema persistence. Consider using the --external_topo_server flag if "+ + "you require a more complete solution. This flag is ignored if --external_topo_server is set.") tpb vttestpb.VTTestTopology ts *topo.Server @@ -292,6 +298,10 @@ func main() { exit.Return(1) } + if *vschemaPersistenceDir != "" && !*externalTopoServer { + startVschemaWatcher(*vschemaPersistenceDir, tpb.Keyspaces, ts) + } + servenv.OnRun(func() { addStatusParts(vtg) }) diff --git a/go/cmd/vtcombo/vschema_watcher.go b/go/cmd/vtcombo/vschema_watcher.go new file mode 100644 index 00000000000..9194f61e774 --- /dev/null +++ b/go/cmd/vtcombo/vschema_watcher.go @@ -0,0 +1,111 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "context" + "encoding/json" + "os" + "path" + + "vitess.io/vitess/go/vt/log" + vschemapb "vitess.io/vitess/go/vt/proto/vschema" + vttestpb "vitess.io/vitess/go/vt/proto/vttest" + "vitess.io/vitess/go/vt/topo" +) + +func startVschemaWatcher(vschemaPersistenceDir string, keyspaces []*vttestpb.Keyspace, ts *topo.Server) { + // Create the directory if it doesn't exist. + if err := createDirectoryIfNotExists(vschemaPersistenceDir); err != nil { + log.Fatalf("Unable to create vschema persistence directory %v: %v", vschemaPersistenceDir, err) + } + + // If there are keyspace files, load them. + loadKeyspacesFromDir(vschemaPersistenceDir, keyspaces, ts) + + // Rebuild the SrvVSchema object in case we loaded vschema from file + if err := ts.RebuildSrvVSchema(context.Background(), tpb.Cells); err != nil { + log.Fatalf("RebuildSrvVSchema failed: %v", err) + } + + // Now watch for changes in the SrvVSchema object and persist them to disk. + go watchSrvVSchema(context.Background(), ts, tpb.Cells[0]) +} + +func loadKeyspacesFromDir(dir string, keyspaces []*vttestpb.Keyspace, ts *topo.Server) { + for _, ks := range tpb.Keyspaces { + ksFile := path.Join(dir, ks.Name+".json") + if _, err := os.Stat(ksFile); err == nil { + jsonData, err := os.ReadFile(ksFile) + if err != nil { + log.Fatalf("Unable to read keyspace file %v: %v", ksFile, err) + } + + keyspace := &vschemapb.Keyspace{} + err = json.Unmarshal(jsonData, keyspace) + if err != nil { + log.Fatalf("Unable to parse keyspace file %v: %v", ksFile, err) + } + + ts.SaveVSchema(context.Background(), ks.Name, keyspace) + log.Infof("Loaded keyspace %v from %v\n", ks.Name, ksFile) + } + } +} + +func watchSrvVSchema(ctx context.Context, ts *topo.Server, cell string) { + data, ch, err := ts.WatchSrvVSchema(context.Background(), tpb.Cells[0]) + if err != nil { + log.Fatalf("WatchSrvVSchema failed: %v", err) + } + + if data.Err != nil { + log.Fatalf("WatchSrvVSchema could not retrieve initial vschema: %v", data.Err) + } + persistNewSrvVSchema(data.Value) + + for update := range ch { + if update.Err != nil { + log.Errorf("WatchSrvVSchema returned an error: %v", update.Err) + } else { + persistNewSrvVSchema(update.Value) + } + } +} + +func persistNewSrvVSchema(srvVSchema *vschemapb.SrvVSchema) { + for ksName, ks := range srvVSchema.Keyspaces { + jsonBytes, err := json.MarshalIndent(ks, "", " ") + if err != nil { + log.Errorf("Error marshaling keyspace: %v", err) + continue + } + + err = os.WriteFile(path.Join(*vschemaPersistenceDir, ksName+".json"), jsonBytes, 0644) + if err != nil { + log.Errorf("Error writing keyspace file: %v", err) + } + log.Infof("Persisted keyspace %v to %v", ksName, *vschemaPersistenceDir) + } +} + +func createDirectoryIfNotExists(dir string) error { + if _, err := os.Stat(dir); os.IsNotExist(err) { + return os.Mkdir(dir, 0755) + } + return nil +} diff --git a/go/cmd/vttestserver/main.go b/go/cmd/vttestserver/main.go index a91005f841c..c14fb0960c8 100644 --- a/go/cmd/vttestserver/main.go +++ b/go/cmd/vttestserver/main.go @@ -93,9 +93,9 @@ func registerFlags(fs *pflag.FlagSet) { " vttestserver as a database container in local developer environments. Note"+ " that db migration files (--schema_dir option) and seeding of"+ " random data (--initialize_with_random_data option) will only run during"+ - " cluster startup if the data directory does not already exist. vschema"+ - " migrations are run every time the cluster starts, since persistence"+ - " for the topology server has not been implemented yet") + " cluster startup if the data directory does not already exist. "+ + " Changes to VSchema are persisted across cluster restarts using a simple"+ + " watcher if the --data_dir argument is specified.") fs.BoolVar(&doSeed, "initialize_with_random_data", false, "If this flag is each table-shard will be initialized"+ diff --git a/go/cmd/vttestserver/vttestserver_test.go b/go/cmd/vttestserver/vttestserver_test.go index 0665d5f9c46..9313182d9b8 100644 --- a/go/cmd/vttestserver/vttestserver_test.go +++ b/go/cmd/vttestserver/vttestserver_test.go @@ -81,8 +81,14 @@ func TestPersistentMode(t *testing.T) { cluster, err := startPersistentCluster(dir) assert.NoError(t, err) - // basic sanity checks similar to TestRunsVschemaMigrations + // Add a new "ad-hoc" vindex via vtgate once the cluster is up, to later make sure it is persisted across teardowns + err = addColumnVindex(cluster, "test_keyspace", "alter vschema on persistence_test add vindex my_vdx(id)") + assert.NoError(t, err) + + // Basic sanity checks similar to TestRunsVschemaMigrations + // See go/cmd/vttestserver/data/schema/app_customer/* and go/cmd/vttestserver/data/schema/test_keyspace/* assertColumnVindex(t, cluster, columnVindex{keyspace: "test_keyspace", table: "test_table", vindex: "my_vdx", vindexType: "hash", column: "id"}) + assertColumnVindex(t, cluster, columnVindex{keyspace: "test_keyspace", table: "persistence_test", vindex: "my_vdx", vindexType: "hash", column: "id"}) assertColumnVindex(t, cluster, columnVindex{keyspace: "app_customer", table: "customers", vindex: "hash", vindexType: "hash", column: "id"}) // insert some data to ensure persistence across teardowns @@ -111,8 +117,9 @@ func TestPersistentMode(t *testing.T) { defer cluster.TearDown() assert.NoError(t, err) - // rerun our sanity checks to make sure vschema migrations are run during every startup + // rerun our sanity checks to make sure vschema is persisted correctly assertColumnVindex(t, cluster, columnVindex{keyspace: "test_keyspace", table: "test_table", vindex: "my_vdx", vindexType: "hash", column: "id"}) + assertColumnVindex(t, cluster, columnVindex{keyspace: "test_keyspace", table: "persistence_test", vindex: "my_vdx", vindexType: "hash", column: "id"}) assertColumnVindex(t, cluster, columnVindex{keyspace: "app_customer", table: "customers", vindex: "hash", vindexType: "hash", column: "id"}) // ensure previous data was successfully persisted @@ -316,7 +323,8 @@ func startCluster(flags ...string) (vttest.LocalCluster, error) { keyspaceArg := "--keyspaces=" + strings.Join(clusterKeyspaces, ",") numShardsArg := "--num_shards=2,2" vschemaDDLAuthorizedUsers := "--vschema_ddl_authorized_users=%" - os.Args = append(os.Args, []string{schemaDirArg, keyspaceArg, numShardsArg, tabletHostname, vschemaDDLAuthorizedUsers}...) + alsoLogToStderr := "--alsologtostderr" // better debugging + os.Args = append(os.Args, []string{schemaDirArg, keyspaceArg, numShardsArg, tabletHostname, vschemaDDLAuthorizedUsers, alsoLogToStderr}...) os.Args = append(os.Args, flags...) return runCluster() } diff --git a/go/flags/endtoend/vttestserver.txt b/go/flags/endtoend/vttestserver.txt index b63bc1ad30f..231a50e2d3c 100644 --- a/go/flags/endtoend/vttestserver.txt +++ b/go/flags/endtoend/vttestserver.txt @@ -80,7 +80,7 @@ Usage of vttestserver: --num_shards strings Comma separated shard count (one per keyspace) (default [2]) --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 10s) --onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 10s) - --persistent_mode If this flag is set, the MySQL data directory is not cleaned up when LocalCluster.TearDown() is called. This is useful for running vttestserver as a database container in local developer environments. Note that db migration files (--schema_dir option) and seeding of random data (--initialize_with_random_data option) will only run during cluster startup if the data directory does not already exist. vschema migrations are run every time the cluster starts, since persistence for the topology server has not been implemented yet + --persistent_mode If this flag is set, the MySQL data directory is not cleaned up when LocalCluster.TearDown() is called. This is useful for running vttestserver as a database container in local developer environments. Note that db migration files (--schema_dir option) and seeding of random data (--initialize_with_random_data option) will only run during cluster startup if the data directory does not already exist. Changes to VSchema are persisted across cluster restarts using a simple watcher if the --data_dir argument is specified. --pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown. --planner-version string Sets the default planner to use when the session has not changed it. Valid values are: V3, Gen4, Gen4Greedy and Gen4Fallback. Gen4Fallback tries the new gen4 planner and falls back to the V3 planner if the gen4 fails. --pool_hostname_resolve_interval duration if set force an update to all hostnames and reconnect if changed, defaults to 0 (disabled) diff --git a/go/vt/vttest/local_cluster.go b/go/vt/vttest/local_cluster.go index 7dcbdc67afa..4db10d9f941 100644 --- a/go/vt/vttest/local_cluster.go +++ b/go/vt/vttest/local_cluster.go @@ -463,28 +463,26 @@ func (db *LocalCluster) loadSchema(shouldRunDatabaseMigrations bool) error { } } - glob, _ := filepath.Glob(path.Join(schemaDir, "*.sql")) - for _, filepath := range glob { - cmds, err := LoadSQLFile(filepath, schemaDir) - if err != nil { - return err - } - - // One single vschema migration per file - if !db.OnlyMySQL && len(cmds) == 1 && strings.HasPrefix(strings.ToUpper(cmds[0]), "ALTER VSCHEMA") { - if err = db.applyVschema(keyspace, cmds[0]); err != nil { + if shouldRunDatabaseMigrations { + glob, _ := filepath.Glob(path.Join(schemaDir, "*.sql")) + for _, filepath := range glob { + cmds, err := LoadSQLFile(filepath, schemaDir) + if err != nil { return err } - continue - } - if !shouldRunDatabaseMigrations { - continue - } + // One single vschema migration per file + if !db.OnlyMySQL && len(cmds) == 1 && strings.HasPrefix(strings.ToUpper(cmds[0]), "ALTER VSCHEMA") { + if err = db.applyVschema(keyspace, cmds[0]); err != nil { + return err + } + continue + } - for _, dbname := range db.shardNames(kpb) { - if err := db.Execute(cmds, dbname); err != nil { - return err + for _, dbname := range db.shardNames(kpb) { + if err := db.Execute(cmds, dbname); err != nil { + return err + } } } } diff --git a/go/vt/vttest/vtprocess.go b/go/vt/vttest/vtprocess.go index 11761cfa2a2..c2cdc4e59e6 100644 --- a/go/vt/vttest/vtprocess.go +++ b/go/vt/vttest/vtprocess.go @@ -23,6 +23,7 @@ import ( "net/http" "os" "os/exec" + "path" "strings" "syscall" "time" @@ -243,6 +244,9 @@ func VtcomboProcess(environment Environment, args *Config, mysql MySQLManager) ( if args.SchemaDir != "" { vt.ExtraArgs = append(vt.ExtraArgs, []string{"--schema_dir", args.SchemaDir}...) } + if args.PersistentMode && args.DataDir != "" { + vt.ExtraArgs = append(vt.ExtraArgs, []string{"--vschema-persistence-dir", path.Join(args.DataDir, "vschema_data")}...) + } if args.TransactionMode != "" { vt.ExtraArgs = append(vt.ExtraArgs, []string{"--transaction_mode", args.TransactionMode}...) } From e1722e13c4913a55457160093ffbc7c6d1b399a0 Mon Sep 17 00:00:00 2001 From: Austen Lacy Date: Tue, 19 Sep 2023 13:11:25 -0400 Subject: [PATCH 09/12] [BACK-PORT] Properly support ignore_nulls in CreateLookupVindex (#13913) (#122) * Properly support ignore_nulls in CreateLookupVindex (#13913) Signed-off-by: Matt Lord * remove new vreplication tests that dont work with v15 Signed-off-by: Austen Lacy --------- Signed-off-by: Matt Lord Signed-off-by: Austen Lacy Co-authored-by: Matt Lord Co-authored-by: Austen Lacy --- go/test/endtoend/vreplication/config_test.go | 31 ++++- .../tabletserver/vstreamer/planbuilder.go | 25 ++++ go/vt/wrangler/materializer.go | 48 +++++-- go/vt/wrangler/materializer_test.go | 119 ++++++++++++++++++ 4 files changed, 209 insertions(+), 14 deletions(-) diff --git a/go/test/endtoend/vreplication/config_test.go b/go/test/endtoend/vreplication/config_test.go index 9cb05fa1044..74fe420f62d 100644 --- a/go/test/endtoend/vreplication/config_test.go +++ b/go/test/endtoend/vreplication/config_test.go @@ -36,8 +36,8 @@ var ( initialProductSchema = ` create table product(pid int, description varbinary(128), date1 datetime not null default '0000-00-00 00:00:00', date2 datetime not null default '2021-00-01 00:00:00', primary key(pid), key(date1,date2)) CHARSET=utf8mb4; create table customer(cid int, name varchar(128) collate utf8mb4_bin, meta json default null, typ enum('individual','soho','enterprise'), sport set('football','cricket','baseball'), - ts timestamp not null default current_timestamp, bits bit(2) default b'11', date1 datetime not null default '0000-00-00 00:00:00', - date2 datetime not null default '2021-00-01 00:00:00', dec80 decimal(8,0), primary key(cid,typ), key(name)) CHARSET=utf8mb4; + ts timestamp not null default current_timestamp, bits bit(2) default b'11', date1 datetime not null default '0000-00-00 00:00:00', + date2 datetime not null default '2021-00-01 00:00:00', dec80 decimal(8,0), primary key(cid,typ)) CHARSET=utf8mb4; create table customer_seq(id int, next_id bigint, cache bigint, primary key(id)) comment 'vitess_sequence'; create table merchant(mname varchar(128), category varchar(128), primary key(mname), key(category)) CHARSET=utf8mb4 COLLATE=utf8mb4_general_ci; create table orders(oid int, cid int, pid int, mname varchar(128), price int, qty int, total int as (qty * price), total2 int as (qty * price) stored, primary key(oid), key(pid), key(cid)) CHARSET=utf8; @@ -85,6 +85,33 @@ create table datze (id int, dt1 datetime not null default current_timestamp, dt2 } } ` + + createLookupVindexVSchema = ` +{ + "sharded": true, + "vindexes": { + "customer_name_keyspace_id": { + "type": "consistent_lookup", + "params": { + "table": "product.customer_name_keyspace_id", + "from": "name,cid", + "to": "keyspace_id", + "ignore_nulls": "true" + }, + "owner": "customer" + } + }, + "tables": { + "customer": { + "column_vindexes": [{ + "columns": ["name", "cid"], + "name": "customer_name_keyspace_id" + }] + } + } +} +` + customerSchema = "" customerVSchema = ` { diff --git a/go/vt/vttablet/tabletserver/vstreamer/planbuilder.go b/go/vt/vttablet/tabletserver/vstreamer/planbuilder.go index 0378d04c373..0f72ac4a3d7 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/planbuilder.go +++ b/go/vt/vttablet/tabletserver/vstreamer/planbuilder.go @@ -81,6 +81,8 @@ const ( GreaterThanEqual // NotEqual is used to filter a comparable column if != specific value NotEqual + // IsNotNull is used to filter a column if it is NULL + IsNotNull ) // Filter contains opcodes for filtering. @@ -226,6 +228,10 @@ func (plan *Plan) filter(values, result []sqltypes.Value, charsets []collations. if !key.KeyRangeContains(filter.KeyRange, ksid) { return false, nil } + case IsNotNull: + if values[filter.ColNum].IsNull() { + return false, nil + } default: match, err := compare(filter.Opcode, values[filter.ColNum], filter.Value, charsets[filter.ColNum]) if err != nil { @@ -552,6 +558,25 @@ func (plan *Plan) analyzeWhere(vschema *localVSchema, where *sqlparser.Where) er if err := plan.analyzeInKeyRange(vschema, expr.Exprs); err != nil { return err } + case *sqlparser.IsExpr: // Needed for CreateLookupVindex with ignore_nulls + if expr.Right != sqlparser.IsNotNullOp { + return fmt.Errorf("unsupported constraint: %v", sqlparser.String(expr)) + } + qualifiedName, ok := expr.Left.(*sqlparser.ColName) + if !ok { + return fmt.Errorf("unexpected: %v", sqlparser.String(expr)) + } + if !qualifiedName.Qualifier.IsEmpty() { + return fmt.Errorf("unsupported qualifier for column: %v", sqlparser.String(qualifiedName)) + } + colnum, err := findColumn(plan.Table, qualifiedName.Name) + if err != nil { + return err + } + plan.Filters = append(plan.Filters, Filter{ + Opcode: IsNotNull, + ColNum: colnum, + }) default: return fmt.Errorf("unsupported constraint: %v", sqlparser.String(expr)) } diff --git a/go/vt/wrangler/materializer.go b/go/vt/wrangler/materializer.go index 1b6aa2f5909..e5c92c13f3c 100644 --- a/go/vt/wrangler/materializer.go +++ b/go/vt/wrangler/materializer.go @@ -54,6 +54,7 @@ import ( tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" vschemapb "vitess.io/vitess/go/vt/proto/vschema" vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) type materializer struct { @@ -442,12 +443,13 @@ func (wr *Wrangler) prepareCreateLookup(ctx context.Context, keyspace string, sp // Important variables are pulled out here. var ( // lookup vindex info - vindexName string - vindex *vschemapb.Vindex - targetKeyspace string - targetTableName string - vindexFromCols []string - vindexToCol string + vindexName string + vindex *vschemapb.Vindex + targetKeyspace string + targetTableName string + vindexFromCols []string + vindexToCol string + vindexIgnoreNulls bool // source table info sourceTableName string @@ -498,6 +500,18 @@ func (wr *Wrangler) prepareCreateLookup(ctx context.Context, keyspace string, sp if _, err := vindexes.CreateVindex(vindex.Type, vindexName, vindex.Params); err != nil { return nil, nil, nil, err } + if ignoreNullsStr, ok := vindex.Params["ignore_nulls"]; ok { + // This mirrors the behavior of vindexes.boolFromMap(). + switch ignoreNullsStr { + case "true": + vindexIgnoreNulls = true + case "false": + vindexIgnoreNulls = false + default: + return nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "ignore_nulls value must be 'true' or 'false': '%s'", + ignoreNullsStr) + } + } // Validate input table if len(specs.Tables) != 1 { @@ -634,21 +648,31 @@ func (wr *Wrangler) prepareCreateLookup(ctx context.Context, keyspace string, sp buf = sqlparser.NewTrackedBuffer(nil) buf.Myprintf("select ") for i := range vindexFromCols { - buf.Myprintf("%v as %v, ", sqlparser.NewIdentifierCI(sourceVindexColumns[i]), sqlparser.NewIdentifierCI(vindexFromCols[i])) + buf.Myprintf("%s as %s, ", sqlparser.String(sqlparser.NewIdentifierCI(sourceVindexColumns[i])), sqlparser.String(sqlparser.NewIdentifierCI(vindexFromCols[i]))) } if strings.EqualFold(vindexToCol, "keyspace_id") || strings.EqualFold(vindex.Type, "consistent_lookup_unique") || strings.EqualFold(vindex.Type, "consistent_lookup") { - buf.Myprintf("keyspace_id() as %v ", sqlparser.NewIdentifierCI(vindexToCol)) + buf.Myprintf("keyspace_id() as %s ", sqlparser.String(sqlparser.NewIdentifierCI(vindexToCol))) } else { - buf.Myprintf("%v as %v ", sqlparser.NewIdentifierCI(vindexToCol), sqlparser.NewIdentifierCI(vindexToCol)) + buf.Myprintf("%s as %s ", sqlparser.String(sqlparser.NewIdentifierCI(vindexToCol)), sqlparser.String(sqlparser.NewIdentifierCI(vindexToCol))) + } + buf.Myprintf("from %s", sqlparser.String(sqlparser.NewIdentifierCS(sourceTableName))) + if vindexIgnoreNulls { + buf.Myprintf(" where ") + lastValIdx := len(vindexFromCols) - 1 + for i := range vindexFromCols { + buf.Myprintf("%s is not null", sqlparser.String(sqlparser.NewIdentifierCI(vindexFromCols[i]))) + if i != lastValIdx { + buf.Myprintf(" and ") + } + } } - buf.Myprintf("from %v", sqlparser.NewIdentifierCS(sourceTableName)) if vindex.Owner != "" { // Only backfill buf.Myprintf(" group by ") for i := range vindexFromCols { - buf.Myprintf("%v, ", sqlparser.NewIdentifierCI(vindexFromCols[i])) + buf.Myprintf("%s, ", sqlparser.String(sqlparser.NewIdentifierCI(vindexFromCols[i]))) } - buf.Myprintf("%v", sqlparser.NewIdentifierCI(vindexToCol)) + buf.Myprintf("%s", sqlparser.String(sqlparser.NewIdentifierCI(vindexToCol))) } materializeQuery = buf.String() diff --git a/go/vt/wrangler/materializer_test.go b/go/vt/wrangler/materializer_test.go index 8a7e68f9d82..d05cbe602af 100644 --- a/go/vt/wrangler/materializer_test.go +++ b/go/vt/wrangler/materializer_test.go @@ -1278,6 +1278,125 @@ func TestCreateCustomizedVindex(t *testing.T) { } } +func TestCreateLookupVindexIgnoreNulls(t *testing.T) { + ms := &vtctldatapb.MaterializeSettings{ + SourceKeyspace: "ks", + TargetKeyspace: "ks", + } + + env := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"0"}) + defer env.close() + + specs := &vschemapb.Keyspace{ + Vindexes: map[string]*vschemapb.Vindex{ + "v": { + Type: "consistent_lookup", + Params: map[string]string{ + "table": "ks.lkp", + "from": "col2,col1", + "to": "keyspace_id", + "ignore_nulls": "true", + }, + Owner: "t1", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Name: "v", + Columns: []string{"col2", "col1"}, + }}, + }, + }, + } + // Dummy sourceSchema + sourceSchema := "CREATE TABLE `t1` (\n" + + " `col1` int(11) NOT NULL AUTO_INCREMENT,\n" + + " `col2` int(11) DEFAULT NULL,\n" + + " PRIMARY KEY (`id`)\n" + + ") ENGINE=InnoDB AUTO_INCREMENT=3 DEFAULT CHARSET=latin1" + + vschema := &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "hash": { + Type: "hash", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Name: "hash", + Column: "col1", + }}, + }, + }, + } + + wantKs := &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "hash": { + Type: "hash", + }, + "v": { + Type: "consistent_lookup", + Params: map[string]string{ + "table": "ks.lkp", + "from": "col2,col1", + "to": "keyspace_id", + "write_only": "true", + "ignore_nulls": "true", + }, + Owner: "t1", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Name: "hash", + Column: "col1", + }, { + Name: "v", + Columns: []string{"col2", "col1"}, + }}, + }, + "lkp": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Column: "col2", + Name: "hash", + }}, + }, + }, + } + wantQuery := "select col2 as col2, col1 as col1, keyspace_id() as keyspace_id from t1 where col2 is not null and col1 is not null group by col2, col1, keyspace_id" + + env.tmc.schema[ms.SourceKeyspace+".t1"] = &tabletmanagerdatapb.SchemaDefinition{ + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{{ + Fields: []*querypb.Field{{ + Name: "col1", + Type: querypb.Type_INT64, + }, { + Name: "col2", + Type: querypb.Type_INT64, + }}, + Schema: sourceSchema, + }}, + } + if err := env.topoServ.SaveVSchema(context.Background(), ms.SourceKeyspace, vschema); err != nil { + t.Fatal(err) + } + + ms, ks, _, err := env.wr.prepareCreateLookup(context.Background(), ms.SourceKeyspace, specs, false) + require.NoError(t, err) + if !proto.Equal(wantKs, ks) { + t.Errorf("unexpected keyspace value: got:\n%v, want\n%v", ks, wantKs) + } + require.NotNil(t, ms) + require.GreaterOrEqual(t, len(ms.TableSettings), 1) + require.Equal(t, wantQuery, ms.TableSettings[0].SourceExpression, "unexpected query") +} + func TestStopAfterCopyFlag(t *testing.T) { ms := &vtctldatapb.MaterializeSettings{ SourceKeyspace: "ks", From 8e961dd41081f49e5672fd19c944778c4c68668e Mon Sep 17 00:00:00 2001 From: Austen Lacy Date: Mon, 6 Nov 2023 11:24:22 -0500 Subject: [PATCH 10/12] Bug fix: Use target tablet from health stats cache when checking replication status (#14436) (#128) Signed-off-by: Austen Lacy Co-authored-by: Austen Lacy --- go/test/endtoend/cluster/cluster_process.go | 5 ++++ go/test/endtoend/tabletgateway/vtgate_test.go | 29 +++++++++++++++++++ go/vt/vtgate/executor.go | 4 +-- 3 files changed, 36 insertions(+), 2 deletions(-) diff --git a/go/test/endtoend/cluster/cluster_process.go b/go/test/endtoend/cluster/cluster_process.go index 21a21202811..f50eb7038d3 100644 --- a/go/test/endtoend/cluster/cluster_process.go +++ b/go/test/endtoend/cluster/cluster_process.go @@ -896,6 +896,11 @@ func (cluster *LocalProcessCluster) VtctlclientGetTablet(tablet *Vttablet) (*top return &ti, nil } +func (cluster *LocalProcessCluster) VtctlclientChangeTabletType(tablet *Vttablet, tabletType topodatapb.TabletType) error { + _, err := cluster.VtctlclientProcess.ExecuteCommandWithOutput("ChangeTabletType", "--", tablet.Alias, tabletType.String()) + return err +} + // Teardown brings down the cluster by invoking teardown for individual processes func (cluster *LocalProcessCluster) Teardown() { PanicHandler(nil) diff --git a/go/test/endtoend/tabletgateway/vtgate_test.go b/go/test/endtoend/tabletgateway/vtgate_test.go index a3876b259f3..be227927981 100644 --- a/go/test/endtoend/tabletgateway/vtgate_test.go +++ b/go/test/endtoend/tabletgateway/vtgate_test.go @@ -29,6 +29,7 @@ import ( "time" "vitess.io/vitess/go/test/endtoend/utils" + "vitess.io/vitess/go/vt/proto/topodata" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -69,6 +70,34 @@ func TestVtgateReplicationStatusCheck(t *testing.T) { assert.Equal(t, expectNumRows, numRows, fmt.Sprintf("wrong number of results from show vitess_replication_status. Expected %d, got %d", expectNumRows, numRows)) } +func TestVtgateReplicationStatusCheckWithTabletTypeChange(t *testing.T) { + defer cluster.PanicHandler(t) + // Healthcheck interval on tablet is set to 1s, so sleep for 2s + time.Sleep(2 * time.Second) + verifyVtgateVariables(t, clusterInstance.VtgateProcess.VerifyURL) + ctx := context.Background() + conn, err := mysql.Connect(ctx, &vtParams) + require.NoError(t, err) + defer conn.Close() + + // Only returns rows for REPLICA and RDONLY tablets -- so should be 2 of them + qr := utils.Exec(t, conn, "show vitess_replication_status like '%'") + expectNumRows := 2 + numRows := len(qr.Rows) + assert.Equal(t, expectNumRows, numRows, fmt.Sprintf("wrong number of results from show vitess_replication_status. Expected %d, got %d", expectNumRows, numRows)) + + // change the RDONLY tablet to SPARE + rdOnlyTablet := clusterInstance.Keyspaces[0].Shards[0].Rdonly() + err = clusterInstance.VtctlclientChangeTabletType(rdOnlyTablet, topodata.TabletType_SPARE) + require.NoError(t, err) + + // Only returns rows for REPLICA and RDONLY tablets -- so should be 1 of them since we updated 1 to spare + qr = utils.Exec(t, conn, "show vitess_replication_status like '%'") + expectNumRows = 1 + numRows = len(qr.Rows) + assert.Equal(t, expectNumRows, numRows, fmt.Sprintf("wrong number of results from show vitess_replication_status. Expected %d, got %d", expectNumRows, numRows)) +} + func verifyVtgateVariables(t *testing.T, url string) { resp, err := http.Get(url) require.NoError(t, err) diff --git a/go/vt/vtgate/executor.go b/go/vt/vtgate/executor.go index 8558eed1ed5..f8aa35e5b28 100644 --- a/go/vt/vtgate/executor.go +++ b/go/vt/vtgate/executor.go @@ -848,14 +848,14 @@ func (e *Executor) showVitessReplicationStatus(ctx context.Context, filter *sqlp for _, s := range status { for _, ts := range s.TabletsStats { // We only want to show REPLICA and RDONLY tablets - if ts.Tablet.Type != topodatapb.TabletType_REPLICA && ts.Tablet.Type != topodatapb.TabletType_RDONLY { + if ts.Target.TabletType != topodatapb.TabletType_REPLICA && ts.Target.TabletType != topodatapb.TabletType_RDONLY { continue } // Allow people to filter by Keyspace and Shard using a LIKE clause if filter != nil { ksFilterRegex := sqlparser.LikeToRegexp(filter.Like) - keyspaceShardStr := fmt.Sprintf("%s/%s", ts.Tablet.Keyspace, ts.Tablet.Shard) + keyspaceShardStr := fmt.Sprintf("%s/%s", ts.Target.Keyspace, ts.Target.Shard) if !ksFilterRegex.MatchString(keyspaceShardStr) { continue } From 9576f5792a33b4bc17d0ec6259e1c9f37aa9240f Mon Sep 17 00:00:00 2001 From: Brendan Dougherty Date: Tue, 16 Jan 2024 22:46:58 +0000 Subject: [PATCH 11/12] Merge pull request #136 from Shopify/backport-14653-fix-missing-caller-id-error-with-consistent-lookup-vindexes Backport: Vindexes: Pass context in consistent lookup handleDup (#14653) --- go/vt/vtgate/vindexes/consistent_lookup.go | 3 +- .../vtgate/vindexes/consistent_lookup_test.go | 90 ++++++++++++++----- 2 files changed, 67 insertions(+), 26 deletions(-) diff --git a/go/vt/vtgate/vindexes/consistent_lookup.go b/go/vt/vtgate/vindexes/consistent_lookup.go index 3c2166c0aaf..1f0f1d54af5 100644 --- a/go/vt/vtgate/vindexes/consistent_lookup.go +++ b/go/vt/vtgate/vindexes/consistent_lookup.go @@ -360,8 +360,7 @@ func (lu *clCommon) handleDup(ctx context.Context, vcursor VCursor, values []sql return err } // Lock the target row using normal transaction priority. - // TODO: context needs to be passed on. - qr, err = vcursor.ExecuteKeyspaceID(context.Background(), lu.keyspace, existingksid, lu.lockOwnerQuery, bindVars, false /* rollbackOnError */, false /* autocommit */) + qr, err = vcursor.ExecuteKeyspaceID(ctx, lu.keyspace, existingksid, lu.lockOwnerQuery, bindVars, false /* rollbackOnError */, false /* autocommit */) if err != nil { return err } diff --git a/go/vt/vtgate/vindexes/consistent_lookup_test.go b/go/vt/vtgate/vindexes/consistent_lookup_test.go index 297732325ea..bf209d35fd4 100644 --- a/go/vt/vtgate/vindexes/consistent_lookup_test.go +++ b/go/vt/vtgate/vindexes/consistent_lookup_test.go @@ -75,8 +75,9 @@ func TestConsistentLookupMap(t *testing.T) { lookup := createConsistentLookup(t, "consistent_lookup", false) vc := &loggingVCursor{} vc.AddResult(makeTestResultLookup([]int{2, 2}), nil) + ctx := newTestContext() - got, err := lookup.Map(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}) + got, err := lookup.Map(ctx, vc, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}) require.NoError(t, err) want := []key.Destination{ key.DestinationKeyspaceIDs([][]byte{ @@ -94,10 +95,11 @@ func TestConsistentLookupMap(t *testing.T) { vc.verifyLog(t, []string{ "ExecutePre select fromc1, toc from t where fromc1 in ::fromc1 [{fromc1 }] false", }) + vc.verifyContext(t, ctx) // Test query fail. vc.AddResult(nil, fmt.Errorf("execute failed")) - _, err = lookup.Map(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1)}) + _, err = lookup.Map(ctx, vc, []sqltypes.Value{sqltypes.NewInt64(1)}) wantErr := "lookup.Map: execute failed" if err == nil || err.Error() != wantErr { t.Errorf("lookup(query fail) err: %v, want %s", err, wantErr) @@ -126,8 +128,9 @@ func TestConsistentLookupUniqueMap(t *testing.T) { lookup := createConsistentLookup(t, "consistent_lookup_unique", false) vc := &loggingVCursor{} vc.AddResult(makeTestResultLookup([]int{0, 1}), nil) + ctx := newTestContext() - got, err := lookup.Map(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}) + got, err := lookup.Map(ctx, vc, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}) require.NoError(t, err) want := []key.Destination{ key.DestinationNone{}, @@ -139,10 +142,11 @@ func TestConsistentLookupUniqueMap(t *testing.T) { vc.verifyLog(t, []string{ "ExecutePre select fromc1, toc from t where fromc1 in ::fromc1 [{fromc1 }] false", }) + vc.verifyContext(t, ctx) // More than one result is invalid vc.AddResult(makeTestResultLookup([]int{2}), nil) - _, err = lookup.Map(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1)}) + _, err = lookup.Map(ctx, vc, []sqltypes.Value{sqltypes.NewInt64(1)}) wanterr := "Lookup.Map: unexpected multiple results from vindex t: INT64(1)" if err == nil || err.Error() != wanterr { t.Errorf("lookup(query fail) err: %v, want %s", err, wanterr) @@ -171,8 +175,9 @@ func TestConsistentLookupMapAbsent(t *testing.T) { lookup := createConsistentLookup(t, "consistent_lookup", false) vc := &loggingVCursor{} vc.AddResult(makeTestResultLookup([]int{0, 0}), nil) + ctx := newTestContext() - got, err := lookup.Map(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}) + got, err := lookup.Map(ctx, vc, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}) require.NoError(t, err) want := []key.Destination{ key.DestinationNone{}, @@ -184,6 +189,7 @@ func TestConsistentLookupMapAbsent(t *testing.T) { vc.verifyLog(t, []string{ "ExecutePre select fromc1, toc from t where fromc1 in ::fromc1 [{fromc1 }] false", }) + vc.verifyContext(t, ctx) } func TestConsistentLookupVerify(t *testing.T) { @@ -191,17 +197,19 @@ func TestConsistentLookupVerify(t *testing.T) { vc := &loggingVCursor{} vc.AddResult(makeTestResult(1), nil) vc.AddResult(makeTestResult(1), nil) + ctx := newTestContext() - _, err := lookup.Verify(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}, [][]byte{[]byte("test1"), []byte("test2")}) + _, err := lookup.Verify(ctx, vc, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}, [][]byte{[]byte("test1"), []byte("test2")}) require.NoError(t, err) vc.verifyLog(t, []string{ "ExecutePre select fromc1 from t where fromc1 = :fromc1 and toc = :toc [{fromc1 1} {toc test1}] false", "ExecutePre select fromc1 from t where fromc1 = :fromc1 and toc = :toc [{fromc1 2} {toc test2}] false", }) + vc.verifyContext(t, ctx) // Test query fail. vc.AddResult(nil, fmt.Errorf("execute failed")) - _, err = lookup.Verify(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1)}, [][]byte{[]byte("\x16k@\xb4J\xbaK\xd6")}) + _, err = lookup.Verify(ctx, vc, []sqltypes.Value{sqltypes.NewInt64(1)}, [][]byte{[]byte("\x16k@\xb4J\xbaK\xd6")}) want := "lookup.Verify: execute failed" if err == nil || err.Error() != want { t.Errorf("lookup(query fail) err: %v, want %s", err, want) @@ -209,7 +217,7 @@ func TestConsistentLookupVerify(t *testing.T) { // Test write_only. lookup = createConsistentLookup(t, "consistent_lookup", true) - got, err := lookup.Verify(context.Background(), nil, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}, [][]byte{[]byte(""), []byte("")}) + got, err := lookup.Verify(ctx, nil, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}, [][]byte{[]byte(""), []byte("")}) require.NoError(t, err) wantBools := []bool{true, true} if !reflect.DeepEqual(got, wantBools) { @@ -221,8 +229,9 @@ func TestConsistentLookupCreateSimple(t *testing.T) { lookup := createConsistentLookup(t, "consistent_lookup", false) vc := &loggingVCursor{} vc.AddResult(&sqltypes.Result{}, nil) + ctx := newTestContext() - if err := lookup.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{ + if err := lookup.(Lookup).Create(ctx, vc, [][]sqltypes.Value{{ sqltypes.NewInt64(1), sqltypes.NewInt64(2), }, { @@ -234,6 +243,7 @@ func TestConsistentLookupCreateSimple(t *testing.T) { vc.verifyLog(t, []string{ "ExecutePre insert into t(fromc1, fromc2, toc) values(:fromc1_0, :fromc2_0, :toc_0), (:fromc1_1, :fromc2_1, :toc_1) [{fromc1_0 1} {fromc1_1 3} {fromc2_0 2} {fromc2_1 4} {toc_0 test1} {toc_1 test2}] true", }) + vc.verifyContext(t, ctx) } func TestConsistentLookupCreateThenRecreate(t *testing.T) { @@ -242,8 +252,9 @@ func TestConsistentLookupCreateThenRecreate(t *testing.T) { vc.AddResult(nil, mysql.NewSQLError(mysql.ERDupEntry, mysql.SSConstraintViolation, "Duplicate entry")) vc.AddResult(&sqltypes.Result{}, nil) vc.AddResult(&sqltypes.Result{}, nil) + ctx := newTestContext() - if err := lookup.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{ + if err := lookup.(Lookup).Create(ctx, vc, [][]sqltypes.Value{{ sqltypes.NewInt64(1), sqltypes.NewInt64(2), }}, [][]byte{[]byte("test1")}, false); err != nil { @@ -254,6 +265,7 @@ func TestConsistentLookupCreateThenRecreate(t *testing.T) { "ExecutePre select toc from t where fromc1 = :fromc1 and fromc2 = :fromc2 for update [{fromc1 1} {fromc2 2} {toc test1}] false", "ExecutePre insert into t(fromc1, fromc2, toc) values(:fromc1, :fromc2, :toc) [{fromc1 1} {fromc2 2} {toc test1}] true", }) + vc.verifyContext(t, ctx) } func TestConsistentLookupCreateThenUpdate(t *testing.T) { @@ -263,8 +275,9 @@ func TestConsistentLookupCreateThenUpdate(t *testing.T) { vc.AddResult(makeTestResult(1), nil) vc.AddResult(&sqltypes.Result{}, nil) vc.AddResult(&sqltypes.Result{}, nil) + ctx := newTestContext() - if err := lookup.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{ + if err := lookup.(Lookup).Create(ctx, vc, [][]sqltypes.Value{{ sqltypes.NewInt64(1), sqltypes.NewInt64(2), }}, [][]byte{[]byte("test1")}, false); err != nil { @@ -276,6 +289,7 @@ func TestConsistentLookupCreateThenUpdate(t *testing.T) { "ExecuteKeyspaceID select fc1 from `dot.t1` where fc1 = :fromc1 and fc2 = :fromc2 lock in share mode [{fromc1 1} {fromc2 2} {toc test1}] false", "ExecutePre update t set toc=:toc where fromc1 = :fromc1 and fromc2 = :fromc2 [{fromc1 1} {fromc2 2} {toc test1}] true", }) + vc.verifyContext(t, ctx) } func TestConsistentLookupCreateThenSkipUpdate(t *testing.T) { @@ -285,8 +299,9 @@ func TestConsistentLookupCreateThenSkipUpdate(t *testing.T) { vc.AddResult(makeTestResult(1), nil) vc.AddResult(&sqltypes.Result{}, nil) vc.AddResult(&sqltypes.Result{}, nil) + ctx := newTestContext() - if err := lookup.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{ + if err := lookup.(Lookup).Create(ctx, vc, [][]sqltypes.Value{{ sqltypes.NewInt64(1), sqltypes.NewInt64(2), }}, [][]byte{[]byte("1")}, false); err != nil { @@ -297,6 +312,7 @@ func TestConsistentLookupCreateThenSkipUpdate(t *testing.T) { "ExecutePre select toc from t where fromc1 = :fromc1 and fromc2 = :fromc2 for update [{fromc1 1} {fromc2 2} {toc 1}] false", "ExecuteKeyspaceID select fc1 from `dot.t1` where fc1 = :fromc1 and fc2 = :fromc2 lock in share mode [{fromc1 1} {fromc2 2} {toc 1}] false", }) + vc.verifyContext(t, ctx) } func TestConsistentLookupCreateThenDupkey(t *testing.T) { @@ -306,8 +322,9 @@ func TestConsistentLookupCreateThenDupkey(t *testing.T) { vc.AddResult(makeTestResult(1), nil) vc.AddResult(makeTestResult(1), nil) vc.AddResult(&sqltypes.Result{}, nil) + ctx := newTestContext() - err := lookup.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{ + err := lookup.(Lookup).Create(ctx, vc, [][]sqltypes.Value{{ sqltypes.NewInt64(1), sqltypes.NewInt64(2), }}, [][]byte{[]byte("test1")}, false) @@ -318,14 +335,16 @@ func TestConsistentLookupCreateThenDupkey(t *testing.T) { "ExecutePre select toc from t where fromc1 = :fromc1 and fromc2 = :fromc2 for update [{fromc1 1} {fromc2 2} {toc test1}] false", "ExecuteKeyspaceID select fc1 from `dot.t1` where fc1 = :fromc1 and fc2 = :fromc2 lock in share mode [{fromc1 1} {fromc2 2} {toc test1}] false", }) + vc.verifyContext(t, ctx) } func TestConsistentLookupCreateNonDupError(t *testing.T) { lookup := createConsistentLookup(t, "consistent_lookup", false) vc := &loggingVCursor{} vc.AddResult(nil, errors.New("general error")) + ctx := newTestContext() - err := lookup.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{ + err := lookup.(Lookup).Create(ctx, vc, [][]sqltypes.Value{{ sqltypes.NewInt64(1), sqltypes.NewInt64(2), }}, [][]byte{[]byte("test1")}, false) @@ -336,6 +355,7 @@ func TestConsistentLookupCreateNonDupError(t *testing.T) { vc.verifyLog(t, []string{ "ExecutePre insert into t(fromc1, fromc2, toc) values(:fromc1_0, :fromc2_0, :toc_0) [{fromc1_0 1} {fromc2_0 2} {toc_0 test1}] true", }) + vc.verifyContext(t, ctx) } func TestConsistentLookupCreateThenBadRows(t *testing.T) { @@ -343,8 +363,9 @@ func TestConsistentLookupCreateThenBadRows(t *testing.T) { vc := &loggingVCursor{} vc.AddResult(nil, vterrors.New(vtrpcpb.Code_ALREADY_EXISTS, "(errno 1062) (sqlstate 23000) Duplicate entry")) vc.AddResult(makeTestResult(2), nil) + ctx := newTestContext() - err := lookup.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{ + err := lookup.(Lookup).Create(ctx, vc, [][]sqltypes.Value{{ sqltypes.NewInt64(1), sqltypes.NewInt64(2), }}, [][]byte{[]byte("test1")}, false) @@ -356,14 +377,16 @@ func TestConsistentLookupCreateThenBadRows(t *testing.T) { "ExecutePre insert into t(fromc1, fromc2, toc) values(:fromc1_0, :fromc2_0, :toc_0) [{fromc1_0 1} {fromc2_0 2} {toc_0 test1}] true", "ExecutePre select toc from t where fromc1 = :fromc1 and fromc2 = :fromc2 for update [{fromc1 1} {fromc2 2} {toc test1}] false", }) + vc.verifyContext(t, ctx) } func TestConsistentLookupDelete(t *testing.T) { lookup := createConsistentLookup(t, "consistent_lookup", false) vc := &loggingVCursor{} vc.AddResult(&sqltypes.Result{}, nil) + ctx := newTestContext() - if err := lookup.(Lookup).Delete(context.Background(), vc, [][]sqltypes.Value{{ + if err := lookup.(Lookup).Delete(ctx, vc, [][]sqltypes.Value{{ sqltypes.NewInt64(1), sqltypes.NewInt64(2), }}, []byte("test")); err != nil { @@ -372,6 +395,7 @@ func TestConsistentLookupDelete(t *testing.T) { vc.verifyLog(t, []string{ "ExecutePost delete from t where fromc1 = :fromc1 and fromc2 = :fromc2 and toc = :toc [{fromc1 1} {fromc2 2} {toc test}] true", }) + vc.verifyContext(t, ctx) } func TestConsistentLookupUpdate(t *testing.T) { @@ -379,8 +403,9 @@ func TestConsistentLookupUpdate(t *testing.T) { vc := &loggingVCursor{} vc.AddResult(&sqltypes.Result{}, nil) vc.AddResult(&sqltypes.Result{}, nil) + ctx := newTestContext() - if err := lookup.(Lookup).Update(context.Background(), vc, []sqltypes.Value{ + if err := lookup.(Lookup).Update(ctx, vc, []sqltypes.Value{ sqltypes.NewInt64(1), sqltypes.NewInt64(2), }, []byte("test"), []sqltypes.Value{ @@ -393,6 +418,7 @@ func TestConsistentLookupUpdate(t *testing.T) { "ExecutePost delete from t where fromc1 = :fromc1 and fromc2 = :fromc2 and toc = :toc [{fromc1 1} {fromc2 2} {toc test}] true", "ExecutePre insert into t(fromc1, fromc2, toc) values(:fromc1_0, :fromc2_0, :toc_0) [{fromc1_0 3} {fromc2_0 4} {toc_0 test}] true", }) + vc.verifyContext(t, ctx) } func TestConsistentLookupNoUpdate(t *testing.T) { @@ -469,13 +495,19 @@ func createConsistentLookup(t *testing.T, name string, writeOnly bool) SingleCol return l.(SingleColumn) } +func newTestContext() context.Context { + type testContextKey string // keep static checks from complaining about built-in types as context keys + return context.WithValue(context.Background(), (testContextKey)("test"), "foo") +} + var _ VCursor = (*loggingVCursor)(nil) type loggingVCursor struct { - results []*sqltypes.Result - errors []error - index int - log []string + results []*sqltypes.Result + errors []error + index int + log []string + contexts []context.Context } func (vc *loggingVCursor) LookupRowLockShardSession() vtgatepb.CommitOrder { @@ -508,14 +540,14 @@ func (vc *loggingVCursor) Execute(ctx context.Context, method string, query stri case vtgatepb.CommitOrder_AUTOCOMMIT: name = "ExecuteAutocommit" } - return vc.execute(name, query, bindvars, rollbackOnError) + return vc.execute(ctx, name, query, bindvars, rollbackOnError) } func (vc *loggingVCursor) ExecuteKeyspaceID(ctx context.Context, keyspace string, ksid []byte, query string, bindVars map[string]*querypb.BindVariable, rollbackOnError, autocommit bool) (*sqltypes.Result, error) { - return vc.execute("ExecuteKeyspaceID", query, bindVars, rollbackOnError) + return vc.execute(ctx, "ExecuteKeyspaceID", query, bindVars, rollbackOnError) } -func (vc *loggingVCursor) execute(method string, query string, bindvars map[string]*querypb.BindVariable, rollbackOnError bool) (*sqltypes.Result, error) { +func (vc *loggingVCursor) execute(ctx context.Context, method string, query string, bindvars map[string]*querypb.BindVariable, rollbackOnError bool) (*sqltypes.Result, error) { if vc.index >= len(vc.results) { return nil, fmt.Errorf("ran out of results to return: %s", query) } @@ -525,6 +557,7 @@ func (vc *loggingVCursor) execute(method string, query string, bindvars map[stri } sort.Slice(bvl, func(i, j int) bool { return bvl[i].Name < bvl[j].Name }) vc.log = append(vc.log, fmt.Sprintf("%s %s %v %v", method, query, bvl, rollbackOnError)) + vc.contexts = append(vc.contexts, ctx) idx := vc.index vc.index++ if vc.errors[idx] != nil { @@ -548,6 +581,15 @@ func (vc *loggingVCursor) verifyLog(t *testing.T, want []string) { } } +func (vc *loggingVCursor) verifyContext(t *testing.T, want context.Context) { + t.Helper() + for i, got := range vc.contexts { + if got != want { + t.Errorf("context(%d):\ngot: %v\nwant: %v", i, got, want) + } + } +} + // create lookup result with one to one mapping func makeTestResult(numRows int) *sqltypes.Result { result := &sqltypes.Result{ From 8cc24fa1b388d5ce5424d9da4e6c52f46b2a7120 Mon Sep 17 00:00:00 2001 From: shanth96 Date: Thu, 14 Mar 2024 14:37:04 -0400 Subject: [PATCH 12/12] Merge pull request #148 from Shopify/fix-tests Run all relevant tests on all PRs --- .../check_make_vtadmin_authz_testgen.yml | 2 +- .../check_make_vtadmin_web_proto.yml | 2 +- .github/workflows/cluster_endtoend_12.yml | 2 +- .github/workflows/cluster_endtoend_13.yml | 2 +- .github/workflows/cluster_endtoend_15.yml | 2 +- .github/workflows/cluster_endtoend_18.yml | 2 +- .github/workflows/cluster_endtoend_21.yml | 2 +- .github/workflows/cluster_endtoend_22.yml | 2 +- .../cluster_endtoend_backup_pitr.yml | 2 +- .../cluster_endtoend_backup_pitr_mysql57.yml | 2 +- ...ter_endtoend_ers_prs_newfeatures_heavy.yml | 2 +- .../workflows/cluster_endtoend_mysql80.yml | 2 +- .../cluster_endtoend_mysql_server_vault.yml | 2 +- .../cluster_endtoend_onlineddl_ghost.yml | 2 +- ...uster_endtoend_onlineddl_ghost_mysql57.yml | 2 +- .../cluster_endtoend_onlineddl_revert.yml | 2 +- ...ster_endtoend_onlineddl_revert_mysql57.yml | 2 +- .../cluster_endtoend_onlineddl_scheduler.yml | 2 +- ...r_endtoend_onlineddl_scheduler_mysql57.yml | 2 +- .../cluster_endtoend_onlineddl_vrepl.yml | 2 +- ...uster_endtoend_onlineddl_vrepl_mysql57.yml | 2 +- ...luster_endtoend_onlineddl_vrepl_stress.yml | 2 +- ...ndtoend_onlineddl_vrepl_stress_mysql57.yml | 2 +- ..._endtoend_onlineddl_vrepl_stress_suite.yml | 2 +- ...d_onlineddl_vrepl_stress_suite_mysql57.yml | 2 +- ...cluster_endtoend_onlineddl_vrepl_suite.yml | 2 +- ...endtoend_onlineddl_vrepl_suite_mysql57.yml | 2 +- .../cluster_endtoend_schemadiff_vrepl.yml | 2 +- ...ster_endtoend_schemadiff_vrepl_mysql57.yml | 2 +- .../cluster_endtoend_tabletmanager_consul.yml | 2 +- ...cluster_endtoend_tabletmanager_tablegc.yml | 2 +- ...endtoend_tabletmanager_tablegc_mysql57.yml | 2 +- ...uster_endtoend_tabletmanager_throttler.yml | 2 +- ..._tabletmanager_throttler_custom_config.yml | 2 +- ..._endtoend_tabletmanager_throttler_topo.yml | 2 +- ...cluster_endtoend_topo_connection_cache.yml | 2 +- ...dtoend_vreplication_across_db_versions.yml | 2 +- .../cluster_endtoend_vreplication_basic.yml | 2 +- ...luster_endtoend_vreplication_cellalias.yml | 2 +- ...vreplication_migrate_vdiff2_convert_tz.yml | 2 +- ...luster_endtoend_vreplication_multicell.yml | 2 +- .../cluster_endtoend_vreplication_v2.yml | 2 +- .../cluster_endtoend_vstream_failover.yml | 2 +- ...r_endtoend_vstream_stoponreshard_false.yml | 2 +- ...er_endtoend_vstream_stoponreshard_true.yml | 2 +- ...dtoend_vstream_with_keyspaces_to_watch.yml | 2 +- .../workflows/cluster_endtoend_vtbackup.yml | 2 +- ..._vtctlbackup_sharded_clustertest_heavy.yml | 2 +- .../cluster_endtoend_vtgate_concurrentdml.yml | 2 +- .../cluster_endtoend_vtgate_gen4.yml | 2 +- .../cluster_endtoend_vtgate_general_heavy.yml | 2 +- .../cluster_endtoend_vtgate_godriver.yml | 2 +- ...uster_endtoend_vtgate_partial_keyspace.yml | 2 +- .../cluster_endtoend_vtgate_queries.yml | 2 +- ...cluster_endtoend_vtgate_readafterwrite.yml | 2 +- .../cluster_endtoend_vtgate_reservedconn.yml | 2 +- .../cluster_endtoend_vtgate_schema.yml | 2 +- ...cluster_endtoend_vtgate_schema_tracker.yml | 2 +- ...dtoend_vtgate_tablet_healthcheck_cache.yml | 2 +- .../cluster_endtoend_vtgate_topo.yml | 2 +- .../cluster_endtoend_vtgate_topo_consul.yml | 2 +- .../cluster_endtoend_vtgate_topo_etcd.yml | 2 +- .../cluster_endtoend_vtgate_transaction.yml | 2 +- .../cluster_endtoend_vtgate_unsharded.yml | 2 +- .../cluster_endtoend_vtgate_vindex_heavy.yml | 2 +- .../cluster_endtoend_vtgate_vschema.yml | 2 +- .github/workflows/cluster_endtoend_vtorc.yml | 2 +- .../cluster_endtoend_vtorc_mysql57.yml | 2 +- .../cluster_endtoend_vttablet_prscomplex.yml | 2 +- .../workflows/cluster_endtoend_xb_backup.yml | 2 +- .../cluster_endtoend_xb_backup_mysql57.yml | 2 +- .../cluster_endtoend_xb_recovery.yml | 2 +- .../cluster_endtoend_xb_recovery_mysql57.yml | 2 +- .github/workflows/docker_test_cluster_10.yml | 2 +- .github/workflows/docker_test_cluster_25.yml | 2 +- .github/workflows/e2e_race.yml | 4 +- .github/workflows/endtoend.yml | 2 +- .github/workflows/local_example.yml | 4 +- .github/workflows/region_example.yml | 2 +- .github/workflows/static_checks_etc.yml | 4 +- .github/workflows/unit_race.yml | 2 +- .github/workflows/unit_test_mysql57.yml | 4 +- .github/workflows/unit_test_mysql80.yml | 4 +- .../upgrade_downgrade_test_backups_e2e.yml | 4 +- ...owngrade_test_backups_e2e_next_release.yml | 4 +- .../upgrade_downgrade_test_backups_manual.yml | 4 +- ...grade_test_backups_manual_next_release.yml | 4 +- ...e_downgrade_test_query_serving_queries.yml | 4 +- ...est_query_serving_queries_next_release.yml | 4 +- ...de_downgrade_test_query_serving_schema.yml | 4 +- ...test_query_serving_schema_next_release.yml | 4 +- ...rade_downgrade_test_reparent_new_vtctl.yml | 4 +- ...e_downgrade_test_reparent_new_vttablet.yml | 4 +- ...rade_downgrade_test_reparent_old_vtctl.yml | 4 +- ...e_downgrade_test_reparent_old_vttablet.yml | 4 +- .github/workflows/vtadmin_web_build.yml | 6 +-- .github/workflows/vtadmin_web_lint.yml | 14 +++---- .github/workflows/vtadmin_web_unit_tests.yml | 8 ++-- test/templates/cluster_endtoend_test.tpl | 2 +- .../cluster_endtoend_test_docker.tpl | 2 +- .../cluster_endtoend_test_mysql57.tpl | 2 +- .../cluster_endtoend_test_self_hosted.tpl | 2 +- test/templates/unit_test.tpl | 4 +- test/templates/unit_test_self_hosted.tpl | 2 +- tools/get_next_release_shopify.sh | 38 +++++++++++++++++++ tools/get_previous_release_shopify.sh | 33 ++++++++++++++++ 106 files changed, 204 insertions(+), 133 deletions(-) create mode 100755 tools/get_next_release_shopify.sh create mode 100755 tools/get_previous_release_shopify.sh diff --git a/.github/workflows/check_make_vtadmin_authz_testgen.yml b/.github/workflows/check_make_vtadmin_authz_testgen.yml index 5c9e9c4ac45..11c325ba1cb 100644 --- a/.github/workflows/check_make_vtadmin_authz_testgen.yml +++ b/.github/workflows/check_make_vtadmin_authz_testgen.yml @@ -17,7 +17,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/check_make_vtadmin_web_proto.yml b/.github/workflows/check_make_vtadmin_web_proto.yml index 98b9343cfb4..eed79c18065 100644 --- a/.github/workflows/check_make_vtadmin_web_proto.yml +++ b/.github/workflows/check_make_vtadmin_web_proto.yml @@ -17,7 +17,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_12.yml b/.github/workflows/cluster_endtoend_12.yml index b862bbb7f95..f42164543c1 100644 --- a/.github/workflows/cluster_endtoend_12.yml +++ b/.github/workflows/cluster_endtoend_12.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_13.yml b/.github/workflows/cluster_endtoend_13.yml index 299149277bd..439f71a5e34 100644 --- a/.github/workflows/cluster_endtoend_13.yml +++ b/.github/workflows/cluster_endtoend_13.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_15.yml b/.github/workflows/cluster_endtoend_15.yml index 1b15c8402c0..6b6f3883695 100644 --- a/.github/workflows/cluster_endtoend_15.yml +++ b/.github/workflows/cluster_endtoend_15.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_18.yml b/.github/workflows/cluster_endtoend_18.yml index d9b758e57a4..bf60d38029f 100644 --- a/.github/workflows/cluster_endtoend_18.yml +++ b/.github/workflows/cluster_endtoend_18.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_21.yml b/.github/workflows/cluster_endtoend_21.yml index 006d3c16975..bd29e54de27 100644 --- a/.github/workflows/cluster_endtoend_21.yml +++ b/.github/workflows/cluster_endtoend_21.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_22.yml b/.github/workflows/cluster_endtoend_22.yml index f81ed17aabd..8ae0562e51e 100644 --- a/.github/workflows/cluster_endtoend_22.yml +++ b/.github/workflows/cluster_endtoend_22.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_backup_pitr.yml b/.github/workflows/cluster_endtoend_backup_pitr.yml index c43b8155cbe..00d2430c0cd 100644 --- a/.github/workflows/cluster_endtoend_backup_pitr.yml +++ b/.github/workflows/cluster_endtoend_backup_pitr.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_backup_pitr_mysql57.yml b/.github/workflows/cluster_endtoend_backup_pitr_mysql57.yml index edd2ef21e89..0b7f1b54d19 100644 --- a/.github/workflows/cluster_endtoend_backup_pitr_mysql57.yml +++ b/.github/workflows/cluster_endtoend_backup_pitr_mysql57.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_ers_prs_newfeatures_heavy.yml b/.github/workflows/cluster_endtoend_ers_prs_newfeatures_heavy.yml index 7f321326c29..d1939f65038 100644 --- a/.github/workflows/cluster_endtoend_ers_prs_newfeatures_heavy.yml +++ b/.github/workflows/cluster_endtoend_ers_prs_newfeatures_heavy.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_mysql80.yml b/.github/workflows/cluster_endtoend_mysql80.yml index fa287e8e903..a97b0cb8ddd 100644 --- a/.github/workflows/cluster_endtoend_mysql80.yml +++ b/.github/workflows/cluster_endtoend_mysql80.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_mysql_server_vault.yml b/.github/workflows/cluster_endtoend_mysql_server_vault.yml index 2aeba642982..bbf3b0a63aa 100644 --- a/.github/workflows/cluster_endtoend_mysql_server_vault.yml +++ b/.github/workflows/cluster_endtoend_mysql_server_vault.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_onlineddl_ghost.yml b/.github/workflows/cluster_endtoend_onlineddl_ghost.yml index 6015ae6b3a2..07f14f4ff42 100644 --- a/.github/workflows/cluster_endtoend_onlineddl_ghost.yml +++ b/.github/workflows/cluster_endtoend_onlineddl_ghost.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_onlineddl_ghost_mysql57.yml b/.github/workflows/cluster_endtoend_onlineddl_ghost_mysql57.yml index 2d5eec0be27..97cf802a7ca 100644 --- a/.github/workflows/cluster_endtoend_onlineddl_ghost_mysql57.yml +++ b/.github/workflows/cluster_endtoend_onlineddl_ghost_mysql57.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_onlineddl_revert.yml b/.github/workflows/cluster_endtoend_onlineddl_revert.yml index 75ed6b3593f..a7c001368c3 100644 --- a/.github/workflows/cluster_endtoend_onlineddl_revert.yml +++ b/.github/workflows/cluster_endtoend_onlineddl_revert.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_onlineddl_revert_mysql57.yml b/.github/workflows/cluster_endtoend_onlineddl_revert_mysql57.yml index 5b1a33558a9..0fc9ce3822d 100644 --- a/.github/workflows/cluster_endtoend_onlineddl_revert_mysql57.yml +++ b/.github/workflows/cluster_endtoend_onlineddl_revert_mysql57.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_onlineddl_scheduler.yml b/.github/workflows/cluster_endtoend_onlineddl_scheduler.yml index 51ce089a38e..4e2ddb2d81f 100644 --- a/.github/workflows/cluster_endtoend_onlineddl_scheduler.yml +++ b/.github/workflows/cluster_endtoend_onlineddl_scheduler.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_onlineddl_scheduler_mysql57.yml b/.github/workflows/cluster_endtoend_onlineddl_scheduler_mysql57.yml index 07c18acb307..40eab103743 100644 --- a/.github/workflows/cluster_endtoend_onlineddl_scheduler_mysql57.yml +++ b/.github/workflows/cluster_endtoend_onlineddl_scheduler_mysql57.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl.yml index 20b0ef3fc9e..1de54753975 100644 --- a/.github/workflows/cluster_endtoend_onlineddl_vrepl.yml +++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_mysql57.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_mysql57.yml index 545aca2d7ee..651624bec95 100644 --- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_mysql57.yml +++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl_mysql57.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress.yml index 118ad834d4f..d1f7925efd0 100644 --- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress.yml +++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_mysql57.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_mysql57.yml index a590374e309..61c355f741d 100644 --- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_mysql57.yml +++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_mysql57.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite.yml index 011d0b638c6..182199acb57 100644 --- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite.yml +++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite_mysql57.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite_mysql57.yml index a6f93183b44..cbb80171614 100644 --- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite_mysql57.yml +++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite_mysql57.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite.yml index cfa389fffc1..2038fcd5ea0 100644 --- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite.yml +++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite_mysql57.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite_mysql57.yml index 45bfe9b24e8..8d7aa8dcda0 100644 --- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite_mysql57.yml +++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite_mysql57.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_schemadiff_vrepl.yml b/.github/workflows/cluster_endtoend_schemadiff_vrepl.yml index 193c29661de..19d32a10069 100644 --- a/.github/workflows/cluster_endtoend_schemadiff_vrepl.yml +++ b/.github/workflows/cluster_endtoend_schemadiff_vrepl.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_schemadiff_vrepl_mysql57.yml b/.github/workflows/cluster_endtoend_schemadiff_vrepl_mysql57.yml index e2506666b6b..3f8ef17a5fc 100644 --- a/.github/workflows/cluster_endtoend_schemadiff_vrepl_mysql57.yml +++ b/.github/workflows/cluster_endtoend_schemadiff_vrepl_mysql57.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_tabletmanager_consul.yml b/.github/workflows/cluster_endtoend_tabletmanager_consul.yml index 8cf80a94d99..83611d16882 100644 --- a/.github/workflows/cluster_endtoend_tabletmanager_consul.yml +++ b/.github/workflows/cluster_endtoend_tabletmanager_consul.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_tabletmanager_tablegc.yml b/.github/workflows/cluster_endtoend_tabletmanager_tablegc.yml index ab66b26a953..5ee84c155c2 100644 --- a/.github/workflows/cluster_endtoend_tabletmanager_tablegc.yml +++ b/.github/workflows/cluster_endtoend_tabletmanager_tablegc.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_tabletmanager_tablegc_mysql57.yml b/.github/workflows/cluster_endtoend_tabletmanager_tablegc_mysql57.yml index 0bed61f1e53..4af0d6b0999 100644 --- a/.github/workflows/cluster_endtoend_tabletmanager_tablegc_mysql57.yml +++ b/.github/workflows/cluster_endtoend_tabletmanager_tablegc_mysql57.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_tabletmanager_throttler.yml b/.github/workflows/cluster_endtoend_tabletmanager_throttler.yml index 7a5423dfbb3..5141625b724 100644 --- a/.github/workflows/cluster_endtoend_tabletmanager_throttler.yml +++ b/.github/workflows/cluster_endtoend_tabletmanager_throttler.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_tabletmanager_throttler_custom_config.yml b/.github/workflows/cluster_endtoend_tabletmanager_throttler_custom_config.yml index ee27029f7f2..ec5c88b36e9 100644 --- a/.github/workflows/cluster_endtoend_tabletmanager_throttler_custom_config.yml +++ b/.github/workflows/cluster_endtoend_tabletmanager_throttler_custom_config.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_tabletmanager_throttler_topo.yml b/.github/workflows/cluster_endtoend_tabletmanager_throttler_topo.yml index 932ef23822c..858e364ac6c 100644 --- a/.github/workflows/cluster_endtoend_tabletmanager_throttler_topo.yml +++ b/.github/workflows/cluster_endtoend_tabletmanager_throttler_topo.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_topo_connection_cache.yml b/.github/workflows/cluster_endtoend_topo_connection_cache.yml index f28fce1fe27..9f07edbffe8 100644 --- a/.github/workflows/cluster_endtoend_topo_connection_cache.yml +++ b/.github/workflows/cluster_endtoend_topo_connection_cache.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_vreplication_across_db_versions.yml b/.github/workflows/cluster_endtoend_vreplication_across_db_versions.yml index b05564728c4..0a9e6acd9b3 100644 --- a/.github/workflows/cluster_endtoend_vreplication_across_db_versions.yml +++ b/.github/workflows/cluster_endtoend_vreplication_across_db_versions.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_vreplication_basic.yml b/.github/workflows/cluster_endtoend_vreplication_basic.yml index ed42d01fbc0..dd9ed497b9b 100644 --- a/.github/workflows/cluster_endtoend_vreplication_basic.yml +++ b/.github/workflows/cluster_endtoend_vreplication_basic.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_vreplication_cellalias.yml b/.github/workflows/cluster_endtoend_vreplication_cellalias.yml index a7b0b92b09c..01387c4b99c 100644 --- a/.github/workflows/cluster_endtoend_vreplication_cellalias.yml +++ b/.github/workflows/cluster_endtoend_vreplication_cellalias.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_vreplication_migrate_vdiff2_convert_tz.yml b/.github/workflows/cluster_endtoend_vreplication_migrate_vdiff2_convert_tz.yml index 8cbec9b1ff6..ec6cdd10e81 100644 --- a/.github/workflows/cluster_endtoend_vreplication_migrate_vdiff2_convert_tz.yml +++ b/.github/workflows/cluster_endtoend_vreplication_migrate_vdiff2_convert_tz.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_vreplication_multicell.yml b/.github/workflows/cluster_endtoend_vreplication_multicell.yml index 66e7ec8c39a..9a3fe1f829a 100644 --- a/.github/workflows/cluster_endtoend_vreplication_multicell.yml +++ b/.github/workflows/cluster_endtoend_vreplication_multicell.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_vreplication_v2.yml b/.github/workflows/cluster_endtoend_vreplication_v2.yml index f45e7bde223..9f5cc1b2e49 100644 --- a/.github/workflows/cluster_endtoend_vreplication_v2.yml +++ b/.github/workflows/cluster_endtoend_vreplication_v2.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_vstream_failover.yml b/.github/workflows/cluster_endtoend_vstream_failover.yml index b83d80d0e80..81f1552d63f 100644 --- a/.github/workflows/cluster_endtoend_vstream_failover.yml +++ b/.github/workflows/cluster_endtoend_vstream_failover.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_vstream_stoponreshard_false.yml b/.github/workflows/cluster_endtoend_vstream_stoponreshard_false.yml index dd386481847..65f5fb69bbc 100644 --- a/.github/workflows/cluster_endtoend_vstream_stoponreshard_false.yml +++ b/.github/workflows/cluster_endtoend_vstream_stoponreshard_false.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_vstream_stoponreshard_true.yml b/.github/workflows/cluster_endtoend_vstream_stoponreshard_true.yml index e37ccbcc792..963d405bc11 100644 --- a/.github/workflows/cluster_endtoend_vstream_stoponreshard_true.yml +++ b/.github/workflows/cluster_endtoend_vstream_stoponreshard_true.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_vstream_with_keyspaces_to_watch.yml b/.github/workflows/cluster_endtoend_vstream_with_keyspaces_to_watch.yml index 46183b60a87..8d49f722eec 100644 --- a/.github/workflows/cluster_endtoend_vstream_with_keyspaces_to_watch.yml +++ b/.github/workflows/cluster_endtoend_vstream_with_keyspaces_to_watch.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_vtbackup.yml b/.github/workflows/cluster_endtoend_vtbackup.yml index 73c55ca8690..86a36beee17 100644 --- a/.github/workflows/cluster_endtoend_vtbackup.yml +++ b/.github/workflows/cluster_endtoend_vtbackup.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_vtctlbackup_sharded_clustertest_heavy.yml b/.github/workflows/cluster_endtoend_vtctlbackup_sharded_clustertest_heavy.yml index 6bae4723c9a..5a82f16d5fd 100644 --- a/.github/workflows/cluster_endtoend_vtctlbackup_sharded_clustertest_heavy.yml +++ b/.github/workflows/cluster_endtoend_vtctlbackup_sharded_clustertest_heavy.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_vtgate_concurrentdml.yml b/.github/workflows/cluster_endtoend_vtgate_concurrentdml.yml index 0c10dc02cf0..e753c462f30 100644 --- a/.github/workflows/cluster_endtoend_vtgate_concurrentdml.yml +++ b/.github/workflows/cluster_endtoend_vtgate_concurrentdml.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_vtgate_gen4.yml b/.github/workflows/cluster_endtoend_vtgate_gen4.yml index c82cc2d6ce3..d8a70b872fb 100644 --- a/.github/workflows/cluster_endtoend_vtgate_gen4.yml +++ b/.github/workflows/cluster_endtoend_vtgate_gen4.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_vtgate_general_heavy.yml b/.github/workflows/cluster_endtoend_vtgate_general_heavy.yml index 6a5c226db69..89f21163b43 100644 --- a/.github/workflows/cluster_endtoend_vtgate_general_heavy.yml +++ b/.github/workflows/cluster_endtoend_vtgate_general_heavy.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_vtgate_godriver.yml b/.github/workflows/cluster_endtoend_vtgate_godriver.yml index adfa9b62548..a6975fd7f51 100644 --- a/.github/workflows/cluster_endtoend_vtgate_godriver.yml +++ b/.github/workflows/cluster_endtoend_vtgate_godriver.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_vtgate_partial_keyspace.yml b/.github/workflows/cluster_endtoend_vtgate_partial_keyspace.yml index 14726741f94..4334b0984f1 100644 --- a/.github/workflows/cluster_endtoend_vtgate_partial_keyspace.yml +++ b/.github/workflows/cluster_endtoend_vtgate_partial_keyspace.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_vtgate_queries.yml b/.github/workflows/cluster_endtoend_vtgate_queries.yml index 0e5ebaa579a..dfb5cd34526 100644 --- a/.github/workflows/cluster_endtoend_vtgate_queries.yml +++ b/.github/workflows/cluster_endtoend_vtgate_queries.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_vtgate_readafterwrite.yml b/.github/workflows/cluster_endtoend_vtgate_readafterwrite.yml index 9566366e047..673ae2ada48 100644 --- a/.github/workflows/cluster_endtoend_vtgate_readafterwrite.yml +++ b/.github/workflows/cluster_endtoend_vtgate_readafterwrite.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_vtgate_reservedconn.yml b/.github/workflows/cluster_endtoend_vtgate_reservedconn.yml index ffa94407c31..ce9449bb350 100644 --- a/.github/workflows/cluster_endtoend_vtgate_reservedconn.yml +++ b/.github/workflows/cluster_endtoend_vtgate_reservedconn.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_vtgate_schema.yml b/.github/workflows/cluster_endtoend_vtgate_schema.yml index a8f36987c7f..5c429a82db2 100644 --- a/.github/workflows/cluster_endtoend_vtgate_schema.yml +++ b/.github/workflows/cluster_endtoend_vtgate_schema.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_vtgate_schema_tracker.yml b/.github/workflows/cluster_endtoend_vtgate_schema_tracker.yml index 4bd4ee6a587..7050a7fc10f 100644 --- a/.github/workflows/cluster_endtoend_vtgate_schema_tracker.yml +++ b/.github/workflows/cluster_endtoend_vtgate_schema_tracker.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_vtgate_tablet_healthcheck_cache.yml b/.github/workflows/cluster_endtoend_vtgate_tablet_healthcheck_cache.yml index f6ee055f844..21a3deb21cd 100644 --- a/.github/workflows/cluster_endtoend_vtgate_tablet_healthcheck_cache.yml +++ b/.github/workflows/cluster_endtoend_vtgate_tablet_healthcheck_cache.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_vtgate_topo.yml b/.github/workflows/cluster_endtoend_vtgate_topo.yml index 9c6aa2487fa..41c80dca5c5 100644 --- a/.github/workflows/cluster_endtoend_vtgate_topo.yml +++ b/.github/workflows/cluster_endtoend_vtgate_topo.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_vtgate_topo_consul.yml b/.github/workflows/cluster_endtoend_vtgate_topo_consul.yml index 90bcc73fa55..de0a9b22683 100644 --- a/.github/workflows/cluster_endtoend_vtgate_topo_consul.yml +++ b/.github/workflows/cluster_endtoend_vtgate_topo_consul.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_vtgate_topo_etcd.yml b/.github/workflows/cluster_endtoend_vtgate_topo_etcd.yml index 44d5007442f..ba9cfc5013e 100644 --- a/.github/workflows/cluster_endtoend_vtgate_topo_etcd.yml +++ b/.github/workflows/cluster_endtoend_vtgate_topo_etcd.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_vtgate_transaction.yml b/.github/workflows/cluster_endtoend_vtgate_transaction.yml index 9adaf739548..503638b9932 100644 --- a/.github/workflows/cluster_endtoend_vtgate_transaction.yml +++ b/.github/workflows/cluster_endtoend_vtgate_transaction.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_vtgate_unsharded.yml b/.github/workflows/cluster_endtoend_vtgate_unsharded.yml index 1f88f4f1f70..dda0fbcc4b8 100644 --- a/.github/workflows/cluster_endtoend_vtgate_unsharded.yml +++ b/.github/workflows/cluster_endtoend_vtgate_unsharded.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_vtgate_vindex_heavy.yml b/.github/workflows/cluster_endtoend_vtgate_vindex_heavy.yml index bed5263c7f2..ec8d584283d 100644 --- a/.github/workflows/cluster_endtoend_vtgate_vindex_heavy.yml +++ b/.github/workflows/cluster_endtoend_vtgate_vindex_heavy.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_vtgate_vschema.yml b/.github/workflows/cluster_endtoend_vtgate_vschema.yml index 604ff1ffedf..77960461fee 100644 --- a/.github/workflows/cluster_endtoend_vtgate_vschema.yml +++ b/.github/workflows/cluster_endtoend_vtgate_vschema.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_vtorc.yml b/.github/workflows/cluster_endtoend_vtorc.yml index e38947d7faa..0affbccc8d4 100644 --- a/.github/workflows/cluster_endtoend_vtorc.yml +++ b/.github/workflows/cluster_endtoend_vtorc.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_vtorc_mysql57.yml b/.github/workflows/cluster_endtoend_vtorc_mysql57.yml index 9a50df9098c..8f22f9e93d6 100644 --- a/.github/workflows/cluster_endtoend_vtorc_mysql57.yml +++ b/.github/workflows/cluster_endtoend_vtorc_mysql57.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_vttablet_prscomplex.yml b/.github/workflows/cluster_endtoend_vttablet_prscomplex.yml index ab2503ed50a..01f96e8ca51 100644 --- a/.github/workflows/cluster_endtoend_vttablet_prscomplex.yml +++ b/.github/workflows/cluster_endtoend_vttablet_prscomplex.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_xb_backup.yml b/.github/workflows/cluster_endtoend_xb_backup.yml index 928a1f8e575..09467fdfe8f 100644 --- a/.github/workflows/cluster_endtoend_xb_backup.yml +++ b/.github/workflows/cluster_endtoend_xb_backup.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_xb_backup_mysql57.yml b/.github/workflows/cluster_endtoend_xb_backup_mysql57.yml index 999928f526d..38228fff292 100644 --- a/.github/workflows/cluster_endtoend_xb_backup_mysql57.yml +++ b/.github/workflows/cluster_endtoend_xb_backup_mysql57.yml @@ -32,7 +32,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_xb_recovery.yml b/.github/workflows/cluster_endtoend_xb_recovery.yml index 2d70c1779d9..9869419ef66 100644 --- a/.github/workflows/cluster_endtoend_xb_recovery.yml +++ b/.github/workflows/cluster_endtoend_xb_recovery.yml @@ -28,7 +28,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/cluster_endtoend_xb_recovery_mysql57.yml b/.github/workflows/cluster_endtoend_xb_recovery_mysql57.yml index 4674023ad13..024d2d90fe1 100644 --- a/.github/workflows/cluster_endtoend_xb_recovery_mysql57.yml +++ b/.github/workflows/cluster_endtoend_xb_recovery_mysql57.yml @@ -32,7 +32,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/docker_test_cluster_10.yml b/.github/workflows/docker_test_cluster_10.yml index 59f6c33f5ae..a95e8accf1e 100644 --- a/.github/workflows/docker_test_cluster_10.yml +++ b/.github/workflows/docker_test_cluster_10.yml @@ -18,7 +18,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip $skip diff --git a/.github/workflows/docker_test_cluster_25.yml b/.github/workflows/docker_test_cluster_25.yml index f9627b1a167..bd4c1326cd7 100644 --- a/.github/workflows/docker_test_cluster_25.yml +++ b/.github/workflows/docker_test_cluster_25.yml @@ -18,7 +18,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/e2e_race.yml b/.github/workflows/e2e_race.yml index 7fa14bb6e1a..89dacb57c27 100644 --- a/.github/workflows/e2e_race.yml +++ b/.github/workflows/e2e_race.yml @@ -17,7 +17,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} @@ -68,7 +68,7 @@ jobs: echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* sudo apt-get update - + # Install everything else we need, and configure sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils sudo service mysql stop diff --git a/.github/workflows/endtoend.yml b/.github/workflows/endtoend.yml index 808698e4041..8202aa14a7c 100644 --- a/.github/workflows/endtoend.yml +++ b/.github/workflows/endtoend.yml @@ -17,7 +17,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/local_example.yml b/.github/workflows/local_example.yml index 526f5d0248c..7d31f8af399 100644 --- a/.github/workflows/local_example.yml +++ b/.github/workflows/local_example.yml @@ -22,7 +22,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} @@ -75,7 +75,7 @@ jobs: echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* sudo apt-get update - + # Install everything else we need, and configure sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata sudo service mysql stop diff --git a/.github/workflows/region_example.yml b/.github/workflows/region_example.yml index 829bf3aeb4c..b0e34a76751 100644 --- a/.github/workflows/region_example.yml +++ b/.github/workflows/region_example.yml @@ -22,7 +22,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/static_checks_etc.yml b/.github/workflows/static_checks_etc.yml index 6529167ecf6..16886f4f932 100644 --- a/.github/workflows/static_checks_etc.yml +++ b/.github/workflows/static_checks_etc.yml @@ -21,11 +21,11 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} - echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - name: Checkout code if: steps.skip-workflow.outputs.skip-workflow == 'false' diff --git a/.github/workflows/unit_race.yml b/.github/workflows/unit_race.yml index 43838d742c0..b92f3d712dc 100644 --- a/.github/workflows/unit_race.yml +++ b/.github/workflows/unit_race.yml @@ -21,7 +21,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/unit_test_mysql57.yml b/.github/workflows/unit_test_mysql57.yml index 805f0d9b610..1af886f0a08 100644 --- a/.github/workflows/unit_test_mysql57.yml +++ b/.github/workflows/unit_test_mysql57.yml @@ -27,7 +27,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} @@ -115,7 +115,7 @@ jobs: go mod download go install golang.org/x/tools/cmd/goimports@latest - + # install JUnit report formatter go install github.com/vitessio/go-junit-report@HEAD diff --git a/.github/workflows/unit_test_mysql80.yml b/.github/workflows/unit_test_mysql80.yml index def7ae069d7..fdf8b1c6851 100644 --- a/.github/workflows/unit_test_mysql80.yml +++ b/.github/workflows/unit_test_mysql80.yml @@ -27,7 +27,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} @@ -112,7 +112,7 @@ jobs: go mod download go install golang.org/x/tools/cmd/goimports@latest - + # install JUnit report formatter go install github.com/vitessio/go-junit-report@HEAD diff --git a/.github/workflows/upgrade_downgrade_test_backups_e2e.yml b/.github/workflows/upgrade_downgrade_test_backups_e2e.yml index 8867f90634f..b1ec5aead4c 100644 --- a/.github/workflows/upgrade_downgrade_test_backups_e2e.yml +++ b/.github/workflows/upgrade_downgrade_test_backups_e2e.yml @@ -24,7 +24,7 @@ jobs: - name: Set output with latest release branch id: output-previous-release-ref run: | - previous_release_ref=$(./tools/get_previous_release.sh ${{github.base_ref}} ${{github.ref}}) + previous_release_ref=$(./tools/get_previous_release_shopify.sh) echo $previous_release_ref echo "previous_release_ref=${previous_release_ref}" >> $GITHUB_OUTPUT @@ -48,7 +48,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/upgrade_downgrade_test_backups_e2e_next_release.yml b/.github/workflows/upgrade_downgrade_test_backups_e2e_next_release.yml index cf6e4b1998b..475f525a33b 100644 --- a/.github/workflows/upgrade_downgrade_test_backups_e2e_next_release.yml +++ b/.github/workflows/upgrade_downgrade_test_backups_e2e_next_release.yml @@ -24,7 +24,7 @@ jobs: - name: Set output with latest release branch id: output-next-release-ref run: | - next_release_ref=$(./tools/get_next_release.sh ${{github.base_ref}} ${{github.ref}}) + next_release_ref=$(./tools/get_next_release_shopify.sh) echo $next_release_ref echo "next_release_ref=${next_release_ref}" >> $GITHUB_OUTPUT @@ -48,7 +48,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi if [[ "${{needs.get_next_release.outputs.next_release}}" == "" ]]; then diff --git a/.github/workflows/upgrade_downgrade_test_backups_manual.yml b/.github/workflows/upgrade_downgrade_test_backups_manual.yml index a54cac5c0e0..42b2d272b36 100644 --- a/.github/workflows/upgrade_downgrade_test_backups_manual.yml +++ b/.github/workflows/upgrade_downgrade_test_backups_manual.yml @@ -24,7 +24,7 @@ jobs: - name: Set output with latest release branch id: output-previous-release-ref run: | - previous_release_ref=$(./tools/get_previous_release.sh ${{github.base_ref}} ${{github.ref}}) + previous_release_ref=$(./tools/get_previous_release_shopify.sh) echo $previous_release_ref echo "previous_release_ref=${previous_release_ref}" >> $GITHUB_OUTPUT @@ -49,7 +49,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/upgrade_downgrade_test_backups_manual_next_release.yml b/.github/workflows/upgrade_downgrade_test_backups_manual_next_release.yml index 60b77f3b685..72a7087bbaa 100644 --- a/.github/workflows/upgrade_downgrade_test_backups_manual_next_release.yml +++ b/.github/workflows/upgrade_downgrade_test_backups_manual_next_release.yml @@ -24,7 +24,7 @@ jobs: - name: Set output with latest release branch id: output-next-release-ref run: | - next_release_ref=$(./tools/get_next_release.sh ${{github.base_ref}} ${{github.ref}}) + next_release_ref=$(./tools/get_next_release_shopify.sh) echo $next_release_ref echo "next_release_ref=${next_release_ref}" >> $GITHUB_OUTPUT @@ -49,7 +49,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi if [[ "${{needs.get_next_release.outputs.next_release}}" == "" ]]; then diff --git a/.github/workflows/upgrade_downgrade_test_query_serving_queries.yml b/.github/workflows/upgrade_downgrade_test_query_serving_queries.yml index 365f901e335..dbd0cec99f2 100644 --- a/.github/workflows/upgrade_downgrade_test_query_serving_queries.yml +++ b/.github/workflows/upgrade_downgrade_test_query_serving_queries.yml @@ -27,7 +27,7 @@ jobs: - name: Set output with latest release branch id: output-previous-release-ref run: | - previous_release_ref=$(./tools/get_previous_release.sh ${{github.base_ref}} ${{github.ref}}) + previous_release_ref=$(./tools/get_previous_release_shopify.sh) echo $previous_release_ref echo "previous_release_ref=${previous_release_ref}" >> $GITHUB_OUTPUT @@ -50,7 +50,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/upgrade_downgrade_test_query_serving_queries_next_release.yml b/.github/workflows/upgrade_downgrade_test_query_serving_queries_next_release.yml index c9145d95404..40213223acb 100644 --- a/.github/workflows/upgrade_downgrade_test_query_serving_queries_next_release.yml +++ b/.github/workflows/upgrade_downgrade_test_query_serving_queries_next_release.yml @@ -27,7 +27,7 @@ jobs: - name: Set output with latest release branch id: output-next-release-ref run: | - next_release_ref=$(./tools/get_next_release.sh ${{github.base_ref}} ${{github.ref}}) + next_release_ref=$(./tools/get_next_release_shopify.sh) echo $next_release_ref echo "next_release_ref=${next_release_ref}" >> $GITHUB_OUTPUT @@ -50,7 +50,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi if [[ "${{needs.get_next_release.outputs.next_release}}" == "" ]]; then diff --git a/.github/workflows/upgrade_downgrade_test_query_serving_schema.yml b/.github/workflows/upgrade_downgrade_test_query_serving_schema.yml index 37285228a9c..cc7eae03faa 100644 --- a/.github/workflows/upgrade_downgrade_test_query_serving_schema.yml +++ b/.github/workflows/upgrade_downgrade_test_query_serving_schema.yml @@ -27,7 +27,7 @@ jobs: - name: Set output with latest release branch id: output-previous-release-ref run: | - previous_release_ref=$(./tools/get_previous_release.sh ${{github.base_ref}} ${{github.ref}}) + previous_release_ref=$(./tools/get_previous_release_shopify.sh) echo $previous_release_ref echo "previous_release_ref=${previous_release_ref}" >> $GITHUB_OUTPUT @@ -50,7 +50,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/upgrade_downgrade_test_query_serving_schema_next_release.yml b/.github/workflows/upgrade_downgrade_test_query_serving_schema_next_release.yml index a57b5b6ac68..36da0419671 100644 --- a/.github/workflows/upgrade_downgrade_test_query_serving_schema_next_release.yml +++ b/.github/workflows/upgrade_downgrade_test_query_serving_schema_next_release.yml @@ -27,7 +27,7 @@ jobs: - name: Set output with latest release branch id: output-next-release-ref run: | - next_release_ref=$(./tools/get_next_release.sh ${{github.base_ref}} ${{github.ref}}) + next_release_ref=$(./tools/get_next_release_shopify.sh) echo $next_release_ref echo "next_release_ref=${next_release_ref}" >> $GITHUB_OUTPUT @@ -50,7 +50,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi if [[ "${{needs.get_next_release.outputs.next_release}}" == "" ]]; then diff --git a/.github/workflows/upgrade_downgrade_test_reparent_new_vtctl.yml b/.github/workflows/upgrade_downgrade_test_reparent_new_vtctl.yml index c9e8983348e..aeb73c2b7b3 100644 --- a/.github/workflows/upgrade_downgrade_test_reparent_new_vtctl.yml +++ b/.github/workflows/upgrade_downgrade_test_reparent_new_vtctl.yml @@ -27,7 +27,7 @@ jobs: - name: Set output with latest release branch id: output-next-release-ref run: | - next_release_ref=$(./tools/get_next_release.sh ${{github.base_ref}} ${{github.ref}}) + next_release_ref=$(./tools/get_next_release_shopify.sh) echo $next_release_ref echo "next_release_ref=${next_release_ref}" >> $GITHUB_OUTPUT @@ -50,7 +50,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi if [[ "${{needs.get_next_release.outputs.next_release}}" == "" ]]; then diff --git a/.github/workflows/upgrade_downgrade_test_reparent_new_vttablet.yml b/.github/workflows/upgrade_downgrade_test_reparent_new_vttablet.yml index 22864627473..9e61d6a9296 100644 --- a/.github/workflows/upgrade_downgrade_test_reparent_new_vttablet.yml +++ b/.github/workflows/upgrade_downgrade_test_reparent_new_vttablet.yml @@ -27,7 +27,7 @@ jobs: - name: Set output with latest release branch id: output-next-release-ref run: | - next_release_ref=$(./tools/get_next_release.sh ${{github.base_ref}} ${{github.ref}}) + next_release_ref=$(./tools/get_next_release_shopify.sh) echo $next_release_ref echo "next_release_ref=${next_release_ref}" >> $GITHUB_OUTPUT @@ -50,7 +50,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi if [[ "${{needs.get_next_release.outputs.next_release}}" == "" ]]; then diff --git a/.github/workflows/upgrade_downgrade_test_reparent_old_vtctl.yml b/.github/workflows/upgrade_downgrade_test_reparent_old_vtctl.yml index 247b0b65bcf..41a76337f9d 100644 --- a/.github/workflows/upgrade_downgrade_test_reparent_old_vtctl.yml +++ b/.github/workflows/upgrade_downgrade_test_reparent_old_vtctl.yml @@ -27,7 +27,7 @@ jobs: - name: Set output with latest release branch id: output-previous-release-ref run: | - previous_release_ref=$(./tools/get_previous_release.sh ${{github.base_ref}} ${{github.ref}}) + previous_release_ref=$(./tools/get_previous_release_shopify.sh) echo $previous_release_ref echo "previous_release_ref=${previous_release_ref}" >> $GITHUB_OUTPUT @@ -50,7 +50,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/upgrade_downgrade_test_reparent_old_vttablet.yml b/.github/workflows/upgrade_downgrade_test_reparent_old_vttablet.yml index e6dfdae6dc9..c96062d09fc 100644 --- a/.github/workflows/upgrade_downgrade_test_reparent_old_vttablet.yml +++ b/.github/workflows/upgrade_downgrade_test_reparent_old_vttablet.yml @@ -27,7 +27,7 @@ jobs: - name: Set output with latest release branch id: output-previous-release-ref run: | - previous_release_ref=$(./tools/get_previous_release.sh ${{github.base_ref}} ${{github.ref}}) + previous_release_ref=$(./tools/get_previous_release_shopify.sh) echo $previous_release_ref echo "previous_release_ref=${previous_release_ref}" >> $GITHUB_OUTPUT @@ -50,7 +50,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/.github/workflows/vtadmin_web_build.yml b/.github/workflows/vtadmin_web_build.yml index c0b668641ea..3ea75e32c62 100644 --- a/.github/workflows/vtadmin_web_build.yml +++ b/.github/workflows/vtadmin_web_build.yml @@ -1,6 +1,6 @@ name: vtadmin-web build -# In specifying the 'paths' property, we need to include the path to this workflow .yml file. +# In specifying the 'paths' property, we need to include the path to this workflow .yml file. # See https://github.community/t/trigger-a-workflow-on-change-to-the-yml-file-itself/17792/4) on: push: @@ -27,7 +27,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} @@ -51,6 +51,6 @@ jobs: run: cd ./web/vtadmin && npm run build # Cancel pending and in-progress runs of this workflow if a newer ref is pushed to CI. - concurrency: + concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true diff --git a/.github/workflows/vtadmin_web_lint.yml b/.github/workflows/vtadmin_web_lint.yml index 9b7e9a68847..ffcc31e51b6 100644 --- a/.github/workflows/vtadmin_web_lint.yml +++ b/.github/workflows/vtadmin_web_lint.yml @@ -1,6 +1,6 @@ name: vtadmin-web linting + formatting -# In specifying the 'paths' property, we need to include the path to this workflow .yml file. +# In specifying the 'paths' property, we need to include the path to this workflow .yml file. # See https://github.community/t/trigger-a-workflow-on-change-to-the-yml-file-itself/17792/4) on: push: @@ -27,7 +27,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} @@ -47,13 +47,13 @@ jobs: run: cd ./web/vtadmin && npm ci # Using "if: always()" means each step will run, even if a previous - # step fails. This is nice because, for example, we want stylelint and - # prettier to run even if eslint fails. + # step fails. This is nice because, for example, we want stylelint and + # prettier to run even if eslint fails. # # An undesirable secondary effect of this is these steps # will run even if the install, etc. steps fail, which is... weird. # A nice enhancement is to parallelize these steps into jobs, with the - # trade-off of more complexity around sharing npm install artifacts. + # trade-off of more complexity around sharing npm install artifacts. - name: Run eslint if: steps.skip-workflow.outputs.skip-workflow == 'false' && always() run: cd ./web/vtadmin && npm run lint:eslint @@ -61,12 +61,12 @@ jobs: - name: Run stylelint if: steps.skip-workflow.outputs.skip-workflow == 'false' && always() run: cd ./web/vtadmin && npm run lint:stylelint -- -f verbose - + - name: Run prettier if: steps.skip-workflow.outputs.skip-workflow == 'false' && always() run: cd ./web/vtadmin && npm run lint:prettier # Cancel pending and in-progress runs of this workflow if a newer ref is pushed to CI. - concurrency: + concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true diff --git a/.github/workflows/vtadmin_web_unit_tests.yml b/.github/workflows/vtadmin_web_unit_tests.yml index bfc4a5d15ff..36b5bc505e4 100644 --- a/.github/workflows/vtadmin_web_unit_tests.yml +++ b/.github/workflows/vtadmin_web_unit_tests.yml @@ -1,6 +1,6 @@ name: vtadmin-web unit tests -# In specifying the 'paths' property, we need to include the path to this workflow .yml file. +# In specifying the 'paths' property, we need to include the path to this workflow .yml file. # See https://github.community/t/trigger-a-workflow-on-change-to-the-yml-file-itself/17792/4) on: push: @@ -27,7 +27,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + if [[ "${{github.event.pull_request}}" == "" ]]; then skip='true' fi echo Skip ${skip} @@ -49,8 +49,8 @@ jobs: - name: Run unit tests if: steps.skip-workflow.outputs.skip-workflow == 'false' run: cd ./web/vtadmin && CI=true npm run test - + # Cancel pending and in-progress runs of this workflow if a newer ref is pushed to CI. - concurrency: + concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true diff --git a/test/templates/cluster_endtoend_test.tpl b/test/templates/cluster_endtoend_test.tpl index fedd7a30620..e31e544eaf0 100644 --- a/test/templates/cluster_endtoend_test.tpl +++ b/test/templates/cluster_endtoend_test.tpl @@ -26,7 +26,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "{{"${{github.event.pull_request}}"}}" == "" ]] && [[ "{{"${{github.ref}}"}}" != "refs/heads/main" ]] && [[ ! "{{"${{github.ref}}"}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "{{"${{github.ref}}"}}" =~ "refs/tags/.*" ]]; then + if [[ "{{"${{github.event.pull_request}}"}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/test/templates/cluster_endtoend_test_docker.tpl b/test/templates/cluster_endtoend_test_docker.tpl index c48c1060bd7..d2acdcc43b0 100644 --- a/test/templates/cluster_endtoend_test_docker.tpl +++ b/test/templates/cluster_endtoend_test_docker.tpl @@ -18,7 +18,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "{{"${{github.event.pull_request}}"}}" == "" ]] && [[ "{{"${{github.ref}}"}}" != "refs/heads/main" ]] && [[ ! "{{"${{github.ref}}"}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "{{"${{github.ref}}"}}" =~ "refs/tags/.*" ]]; then + if [[ "{{"${{github.event.pull_request}}"}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/test/templates/cluster_endtoend_test_mysql57.tpl b/test/templates/cluster_endtoend_test_mysql57.tpl index 3575ce418cb..63b6c6e79d2 100644 --- a/test/templates/cluster_endtoend_test_mysql57.tpl +++ b/test/templates/cluster_endtoend_test_mysql57.tpl @@ -31,7 +31,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "{{"${{github.event.pull_request}}"}}" == "" ]] && [[ "{{"${{github.ref}}"}}" != "refs/heads/main" ]] && [[ ! "{{"${{github.ref}}"}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "{{"${{github.ref}}"}}" =~ "refs/tags/.*" ]]; then + if [[ "{{"${{github.event.pull_request}}"}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/test/templates/cluster_endtoend_test_self_hosted.tpl b/test/templates/cluster_endtoend_test_self_hosted.tpl index 8095d5ff0b7..0d245c67117 100644 --- a/test/templates/cluster_endtoend_test_self_hosted.tpl +++ b/test/templates/cluster_endtoend_test_self_hosted.tpl @@ -21,7 +21,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "{{"${{github.event.pull_request}}"}}" == "" ]] && [[ "{{"${{github.ref}}"}}" != "refs/heads/main" ]] && [[ ! "{{"${{github.ref}}"}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "{{"${{github.ref}}"}}" =~ "refs/tags/.*" ]]; then + if [[ "{{"${{github.event.pull_request}}"}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/test/templates/unit_test.tpl b/test/templates/unit_test.tpl index 83df6d8c033..9c683324fef 100644 --- a/test/templates/unit_test.tpl +++ b/test/templates/unit_test.tpl @@ -25,7 +25,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "{{"${{github.event.pull_request}}"}}" == "" ]] && [[ "{{"${{github.ref}}"}}" != "refs/heads/main" ]] && [[ ! "{{"${{github.ref}}"}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "{{"${{github.ref}}"}}" =~ "refs/tags/.*" ]]; then + if [[ "{{"${{github.event.pull_request}}"}}" == "" ]]; then skip='true' fi echo Skip ${skip} @@ -129,7 +129,7 @@ jobs: go mod download go install golang.org/x/tools/cmd/goimports@latest - + # install JUnit report formatter go install github.com/vitessio/go-junit-report@HEAD diff --git a/test/templates/unit_test_self_hosted.tpl b/test/templates/unit_test_self_hosted.tpl index aa415458426..0c0cbf2acca 100644 --- a/test/templates/unit_test_self_hosted.tpl +++ b/test/templates/unit_test_self_hosted.tpl @@ -20,7 +20,7 @@ jobs: id: skip-workflow run: | skip='false' - if [[ "{{"${{github.event.pull_request}}"}}" == "" ]] && [[ "{{"${{github.ref}}"}}" != "refs/heads/main" ]] && [[ ! "{{"${{github.ref}}"}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "{{"${{github.ref}}"}}" =~ "refs/tags/.*" ]]; then + if [[ "{{"${{github.event.pull_request}}"}}" == "" ]]; then skip='true' fi echo Skip ${skip} diff --git a/tools/get_next_release_shopify.sh b/tools/get_next_release_shopify.sh new file mode 100755 index 00000000000..38cc78784a0 --- /dev/null +++ b/tools/get_next_release_shopify.sh @@ -0,0 +1,38 @@ +#!/bin/bash + +# Copyright 2022 The Vitess Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# github.base_ref $1 +git fetch --tags origin + +target_release="" + +latest_major_release=$(git show-ref --tags | grep -E 'refs/tags/v[0-9]*\.[0-9]*\.[0-9]*$' | sed 's/[a-z0-9]* refs\/tags\/v//' | awk -v FS=. '{print $1}' | sort -Vr | head -n1) +major_release=$(cat ./go/vt/servenv/version.go| grep versionName | awk -v 'FS= ' '{print $4}' | tr -d '"' | sed 's/\..*//') +target_major_release=$((major_release+1)) + +# Try to get the latest shopify release +target_release_number=$(git show-ref | grep -E 'refs/remotes/origin/v[0-9]*\.[0-9]*\.[0-9]*-shopify-[0-9]*$' | sed 's/[a-z0-9]* refs\/remotes\/origin\/v//' | awk -v FS=. -v RELEASE="$target_major_release" '{if ($1 == RELEASE) print; }' | sort -Vr | head -n1) +if [ -z "$target_release_number" ]; then + # Fallback to upstream release if shopify release doesn't exist + target_release_number=$(git show-ref --tags | grep -E 'refs/tags/v[0-9]*\.[0-9]*\.[0-9]*$' | sed 's/[a-z0-9]* refs\/tags\/v//' | awk -v FS=. -v RELEASE="$target_major_release" '{if ($1 == RELEASE) print; }' | sort -Vr | head -n1) +fi +target_release="v$target_release_number" + +if [ "$major_release" == "$latest_major_release" ]; then + target_release="main" +fi + +echo "$target_release" diff --git a/tools/get_previous_release_shopify.sh b/tools/get_previous_release_shopify.sh new file mode 100755 index 00000000000..74ad558648b --- /dev/null +++ b/tools/get_previous_release_shopify.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +# Copyright 2022 The Vitess Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script is used to build and copy the Angular 2 based vtctld UI +# into the release folder (app) for checkin. Prior to running this script, +# bootstrap.sh and bootstrap_web.sh should already have been run. + +# github.base_ref $1 + +target_release="" + +major_release=$(cat ./go/vt/servenv/version.go| grep versionName | awk -v 'FS= ' '{print $4}' | tr -d '"' | sed 's/\..*//') +target_major_release=$((major_release-1)) +target_release_number=$(git show-ref | grep -E 'refs/remotes/origin/v[0-9]*.[0-9]*.[0-9]*-shopify-[0-9]*$' | sed 's/[a-z0-9]* refs\/remotes\/origin\/v//' | awk -v FS=. -v RELEASE="$target_major_release" '{if ($1 == RELEASE) print; }' | sort -Vr | head -n1) +if [ ! -z "$target_release_number" ] +then + target_release="v$target_release_number" +fi + +echo "$target_release"