diff --git a/.github/workflows/cluster_endtoend_21.yml b/.github/workflows/cluster_endtoend_21.yml index 5d703099a3d..e6bccee0004 100644 --- a/.github/workflows/cluster_endtoend_21.yml +++ b/.github/workflows/cluster_endtoend_21.yml @@ -121,6 +121,13 @@ jobs: # install JUnit report formatter go install github.com/vitessio/go-junit-report@HEAD + - name: Install Minio + if: steps.skip-workflow.outputs.skip-workflow == 'false' + run: | + wget https://dl.min.io/server/minio/release/linux-amd64/minio + chmod +x minio + mv minio /usr/local/bin + - name: Setup launchable dependencies if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | diff --git a/.github/workflows/cluster_endtoend_vtgate_plantests.yml b/.github/workflows/cluster_endtoend_vtgate_plantests.yml new file mode 100644 index 00000000000..93ed6a55f05 --- /dev/null +++ b/.github/workflows/cluster_endtoend_vtgate_plantests.yml @@ -0,0 +1,166 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Cluster (vtgate_plantests) +on: [push, pull_request] +concurrency: + group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vtgate_plantests)') + cancel-in-progress: true + +permissions: read-all + +env: + LAUNCHABLE_ORGANIZATION: "vitess" + LAUNCHABLE_WORKSPACE: "vitess-app" + GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}" + +jobs: + build: + name: Run endtoend tests on Cluster (vtgate_plantests) + runs-on: ubuntu-24.04 + + steps: + - name: Skip CI + run: | + if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then + echo "skipping CI due to the 'Skip CI' label" + exit 1 + fi + + - name: Check if workflow needs to be skipped + id: skip-workflow + run: | + skip='false' + if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + skip='true' + fi + echo Skip ${skip} + echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + + PR_DATA=$(curl -s\ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + + - name: Check out code + if: steps.skip-workflow.outputs.skip-workflow == 'false' + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + + - name: Check for changes in relevant files + if: steps.skip-workflow.outputs.skip-workflow == 'false' + uses: dorny/paths-filter@ebc4d7e9ebcb0b1eb21480bb8f43113e996ac77a # v3.0.1 + id: changes + with: + token: '' + filters: | + end_to_end: + - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' + - 'test.go' + - 'Makefile' + - 'build.env' + - 'go.sum' + - 'go.mod' + - 'proto/*.proto' + - 'tools/**' + - 'config/**' + - 'bootstrap.sh' + - '.github/workflows/cluster_endtoend_vtgate_plantests.yml' + + - name: Set up Go + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 + with: + go-version-file: go.mod + + - name: Set up python + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + + - name: Tune the OS + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + run: | + # Limit local port range to not use ports that overlap with server side + # ports that we listen on. + sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535" + # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio + echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf + sudo sysctl -p /etc/sysctl.conf + + - name: Get dependencies + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + run: | + + # Get key to latest MySQL repo + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C + # Setup MySQL 8.0 + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.33-1_all.deb + echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections + sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* + sudo apt-get -qq update + + # We have to install this old version of libaio1 in case we end up testing with MySQL 5.7. See also: + # https://bugs.launchpad.net/ubuntu/+source/libaio/+bug/2067501 + curl -L -O http://mirrors.kernel.org/ubuntu/pool/main/liba/libaio/libaio1_0.3.112-13build1_amd64.deb + sudo dpkg -i libaio1_0.3.112-13build1_amd64.deb + # libtinfo5 is also needed for older MySQL 5.7 builds. + curl -L -O http://mirrors.kernel.org/ubuntu/pool/universe/n/ncurses/libtinfo5_6.3-2ubuntu0.1_amd64.deb + sudo dpkg -i libtinfo5_6.3-2ubuntu0.1_amd64.deb + + # Install everything else we need, and configure + sudo apt-get -qq install -y mysql-server mysql-shell mysql-client make unzip g++ etcd-client etcd-server curl git wget eatmydata xz-utils libncurses6 + + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + # install JUnit report formatter + go install github.com/vitessio/go-junit-report@HEAD + + - name: Setup launchable dependencies + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + run: | + # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up + pip3 install --user launchable~=1.0 > /dev/null + + # verify that launchable setup is all correct. + launchable verify || true + + # Tell Launchable about the build you are producing and testing + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . + + - name: Run cluster endtoend test + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + timeout-minutes: 45 + run: | + # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file + # which musn't be more than 107 characters long. + export VTDATAROOT="/tmp/" + source build.env + + set -exo pipefail + + # run the tests however you normally do, then produce a JUnit XML file + eatmydata -- go run test.go -docker=false -follow -shard vtgate_plantests | tee -a output.txt | go-junit-report -set-exit-code > report.xml + + - name: Print test output and Record test result in launchable if PR is not a draft + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() + run: | + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi + + # print test output + cat output.txt + + - name: Test Summary + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() + uses: test-summary/action@31493c76ec9e7aa675f1585d3ed6f1da69269a86 # v2.4 + with: + paths: "report.xml" + show: "fail" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index b879ec8c144..de90706da27 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -53,7 +53,7 @@ jobs: # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF # format to the repository Actions tab. - name: "Upload artifact" - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 with: name: SARIF file path: results.sarif @@ -62,6 +62,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard (optional). # Commenting out will disable upload of results to your repo's Code Scanning dashboard - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@v3 + uses: github/codeql-action/upload-sarif@f09c1c0a94de965c15400f5634aa42fac8fb8f88 # v3.27.5 with: sarif_file: results.sarif diff --git a/.github/workflows/update_golang_version.yml b/.github/workflows/update_golang_version.yml index 09bbf451451..ad5dceeafa5 100644 --- a/.github/workflows/update_golang_version.yml +++ b/.github/workflows/update_golang_version.yml @@ -39,7 +39,7 @@ jobs: if [ ${{ matrix.branch }} == "main" ]; then go run ./go/tools/go-upgrade/go-upgrade.go upgrade --main --allow-major-upgrade - else if [ ${{ matrix.branch }} == "release-21.0" ]; then + elif [ ${{ matrix.branch }} == "release-21.0" ]; then go run ./go/tools/go-upgrade/go-upgrade.go upgrade else go run ./go/tools/go-upgrade/go-upgrade.go upgrade --workflow-update=false diff --git a/Makefile b/Makefile index 385a3238d47..13f7fde28e0 100644 --- a/Makefile +++ b/Makefile @@ -286,7 +286,7 @@ $(PROTO_GO_OUTS): minimaltools install_protoc-gen-go proto/*.proto # This rule builds the bootstrap images for all flavors. DOCKER_IMAGES_FOR_TEST = mysql80 percona80 DOCKER_IMAGES = common $(DOCKER_IMAGES_FOR_TEST) -BOOTSTRAP_VERSION=38 +BOOTSTRAP_VERSION=39 ensure_bootstrap_version: find docker/ -type f -exec sed -i "s/^\(ARG bootstrap_version\)=.*/\1=${BOOTSTRAP_VERSION}/" {} \; sed -i 's/\(^.*flag.String(\"bootstrap-version\",\) *\"[^\"]\+\"/\1 \"${BOOTSTRAP_VERSION}\"/' test.go diff --git a/build.env b/build.env index 5c51b4dc660..58abbf41d1b 100755 --- a/build.env +++ b/build.env @@ -17,7 +17,7 @@ source ./tools/shell_functions.inc go version >/dev/null 2>&1 || fail "Go is not installed or is not in \$PATH. See https://vitess.io/contributing/build-from-source for install instructions." -goversion_min 1.23.3 || echo "Go version reported: `go version`. Version 1.23.3+ recommended. See https://vitess.io/contributing/build-from-source for install instructions." +goversion_min 1.23.4 || echo "Go version reported: `go version`. Version 1.23.4+ recommended. See https://vitess.io/contributing/build-from-source for install instructions." mkdir -p dist mkdir -p bin diff --git a/changelog/22.0/22.0.0/summary.md b/changelog/22.0/22.0.0/summary.md index d21acf48a30..ebc0c485fc1 100644 --- a/changelog/22.0/22.0.0/summary.md +++ b/changelog/22.0/22.0.0/summary.md @@ -3,9 +3,13 @@ ### Table of Contents - **[Major Changes](#major-changes)** + - **[Deprecations and Deletions](#deprecations-and-deletions)** + - [Deprecated VTTablet Flags](#vttablet-flags) - **[RPC Changes](#rpc-changes)** - **[Prefer not promoting a replica that is currently taking a backup](#reparents-prefer-not-backing-up)** - **[VTOrc Config File Changes](#vtorc-config-file-changes)** +- **[Minor Changes](#minor-changes)** + - **[VTTablet Flags](#flags-vttablet)** ## Major Changes @@ -16,6 +20,12 @@ These are the RPC changes made in this release - 1. `GetTransactionInfo` RPC has been added to both `VtctldServer`, and `TabletManagerClient` interface. These RPCs are used to fecilitate the users in reading the state of an unresolved distributed transaction. This can be useful in debugging what went wrong and how to fix the problem. +### Deprecations and Deletions + +#### Deprecated VTTablet Flags + +- `twopc_enable` flag is deprecated. Usage of TwoPC commit will be determined by the `transaction_mode` set on VTGate via flag or session variable. + ### Prefer not promoting a replica that is currently taking a backup Emergency reparents now prefer not promoting replicas that are currently taking backups with a backup engine other than @@ -48,3 +58,12 @@ The following fields can be dynamically changed - 13. `change-tablets-with-errant-gtid-to-drained` To upgrade to the newer version of the configuration file, first switch to using the flags in your current deployment before upgrading. Then you can switch to using the configuration file in the newer release. + + +## Minor Changes + +#### VTTablet Flags + +- `twopc_abandon_age` flag now supports values in the time.Duration format (e.g., 1s, 2m, 1h). +While the flag will continue to accept float values (interpreted as seconds) for backward compatibility, +**float inputs are deprecated** and will be removed in a future release. diff --git a/docker/bootstrap/CHANGELOG.md b/docker/bootstrap/CHANGELOG.md index 24ed5261df9..b52783d0c30 100644 --- a/docker/bootstrap/CHANGELOG.md +++ b/docker/bootstrap/CHANGELOG.md @@ -149,4 +149,8 @@ List of changes between bootstrap image versions. ## [38] - 2024-11-10 ### Changes -- Update build to golang 1.23.3 \ No newline at end of file +- Update build to golang 1.23.3 + +## [39] - 2024-12-04 +### Changes +- Update build to golang 1.23.4 \ No newline at end of file diff --git a/docker/bootstrap/Dockerfile.common b/docker/bootstrap/Dockerfile.common index f9654cc22d8..3ace19543f1 100644 --- a/docker/bootstrap/Dockerfile.common +++ b/docker/bootstrap/Dockerfile.common @@ -1,4 +1,4 @@ -FROM --platform=linux/amd64 golang:1.23.3-bullseye +FROM --platform=linux/amd64 golang:1.23.4-bullseye # Install Vitess build dependencies RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ diff --git a/docker/lite/Dockerfile b/docker/lite/Dockerfile index 59fd9297bbf..fff1414fab6 100644 --- a/docker/lite/Dockerfile +++ b/docker/lite/Dockerfile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM --platform=linux/amd64 golang:1.23.3-bullseye AS builder +FROM --platform=linux/amd64 golang:1.23.4-bullseye AS builder # Allows docker builds to set the BUILD_NUMBER ARG BUILD_NUMBER diff --git a/docker/lite/Dockerfile.percona80 b/docker/lite/Dockerfile.percona80 index 953a42c9fea..a839fb35514 100644 --- a/docker/lite/Dockerfile.percona80 +++ b/docker/lite/Dockerfile.percona80 @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM --platform=linux/amd64 golang:1.23.3-bullseye AS builder +FROM --platform=linux/amd64 golang:1.23.4-bullseye AS builder # Allows docker builds to set the BUILD_NUMBER ARG BUILD_NUMBER diff --git a/docker/vttestserver/Dockerfile.mysql80 b/docker/vttestserver/Dockerfile.mysql80 index ed5d0b78acb..74b9564ef48 100644 --- a/docker/vttestserver/Dockerfile.mysql80 +++ b/docker/vttestserver/Dockerfile.mysql80 @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM --platform=linux/amd64 golang:1.23.3-bullseye AS builder +FROM --platform=linux/amd64 golang:1.23.4-bullseye AS builder # Allows docker builds to set the BUILD_NUMBER ARG BUILD_NUMBER diff --git a/go.mod b/go.mod index ccc4cc32b3b..6fd800b80ab 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module vitess.io/vitess -go 1.23.3 +go 1.23.4 require ( cloud.google.com/go/storage v1.46.0 @@ -66,13 +66,13 @@ require ( go.etcd.io/etcd/client/pkg/v3 v3.5.17 go.etcd.io/etcd/client/v3 v3.5.17 go.uber.org/mock v0.2.0 - golang.org/x/crypto v0.29.0 // indirect + golang.org/x/crypto v0.31.0 // indirect golang.org/x/mod v0.22.0 // indirect golang.org/x/net v0.31.0 golang.org/x/oauth2 v0.24.0 - golang.org/x/sys v0.27.0 - golang.org/x/term v0.26.0 - golang.org/x/text v0.20.0 // indirect + golang.org/x/sys v0.28.0 + golang.org/x/term v0.27.0 + golang.org/x/text v0.21.0 // indirect golang.org/x/time v0.8.0 golang.org/x/tools v0.27.0 google.golang.org/api v0.205.0 @@ -108,7 +108,7 @@ require ( github.com/xlab/treeprint v1.2.0 go.uber.org/goleak v1.3.0 golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f - golang.org/x/sync v0.9.0 + golang.org/x/sync v0.10.0 gonum.org/v1/gonum v0.14.0 modernc.org/sqlite v1.33.1 ) diff --git a/go.sum b/go.sum index 167a620da78..3a7c5e07599 100644 --- a/go.sum +++ b/go.sum @@ -624,8 +624,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.29.0 h1:L5SG1JTTXupVV3n6sUqMTeWbjAyfPwoda2DLX8J8FrQ= -golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f h1:XdNn9LlyWAhLVp6P/i8QYBW+hlyhrhei9uErw2B5GJo= golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f/go.mod h1:D5SMRVC3C2/4+F/DB1wZsLRnSNimn2Sp/NPsCrsv8ak= @@ -673,8 +673,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= -golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -712,17 +712,17 @@ golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= -golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.26.0 h1:WEQa6V3Gja/BhNxg540hBip/kkaYtRg3cxg4oXSw4AU= -golang.org/x/term v0.26.0/go.mod h1:Si5m1o57C5nBNQo5z1iq+XDijt21BDBDp2bK0QI8e3E= +golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= -golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/go/flags/endtoend/vtcombo.txt b/go/flags/endtoend/vtcombo.txt index c10d045ecbd..7348fcf1753 100644 --- a/go/flags/endtoend/vtcombo.txt +++ b/go/flags/endtoend/vtcombo.txt @@ -395,8 +395,7 @@ Flags: --transaction_limit_per_user float Maximum number of transactions a single user is allowed to use at any time, represented as fraction of -transaction_cap. (default 0.4) --transaction_mode string SINGLE: disallow multi-db transactions, MULTI: allow multi-db transactions with best effort commit, TWOPC: allow multi-db transactions with 2pc commit (default "MULTI") --truncate-error-len int truncate errors sent to client if they are longer than this value (0 means do not truncate) - --twopc_abandon_age float time in seconds. Any unresolved transaction older than this time will be sent to the coordinator to be resolved. - --twopc_enable if the flag is on, 2pc is enabled. Other 2pc flags must be supplied. + --twopc_abandon_age time.Duration Any unresolved transaction older than this time will be sent to the coordinator to be resolved. NOTE: Providing time as seconds (float64) is deprecated. Use time.Duration format (e.g., '1s', '2m', '1h'). (default 15m0s) --tx-throttler-config string Synonym to -tx_throttler_config (default "target_replication_lag_sec:2 max_replication_lag_sec:10 initial_rate:100 max_increase:1 emergency_decrease:0.5 min_duration_between_increases_sec:40 max_duration_between_increases_sec:62 min_duration_between_decreases_sec:20 spread_backlog_across_sec:20 age_bad_rate_after_sec:180 bad_rate_increase:0.1 max_rate_approach_threshold:0.9") --tx-throttler-default-priority int Default priority assigned to queries that lack priority information (default 100) --tx-throttler-dry-run If present, the transaction throttler only records metrics about requests received and throttled, but does not actually throttle any requests. diff --git a/go/flags/endtoend/vttablet.txt b/go/flags/endtoend/vttablet.txt index f79db05f327..e4c6fde66af 100644 --- a/go/flags/endtoend/vttablet.txt +++ b/go/flags/endtoend/vttablet.txt @@ -395,8 +395,7 @@ Flags: --transaction_limit_by_subcomponent Include CallerID.subcomponent when considering who the user is for the purpose of transaction limit. --transaction_limit_by_username Include VTGateCallerID.username when considering who the user is for the purpose of transaction limit. (default true) --transaction_limit_per_user float Maximum number of transactions a single user is allowed to use at any time, represented as fraction of -transaction_cap. (default 0.4) - --twopc_abandon_age float time in seconds. Any unresolved transaction older than this time will be sent to the coordinator to be resolved. - --twopc_enable if the flag is on, 2pc is enabled. Other 2pc flags must be supplied. + --twopc_abandon_age time.Duration Any unresolved transaction older than this time will be sent to the coordinator to be resolved. NOTE: Providing time as seconds (float64) is deprecated. Use time.Duration format (e.g., '1s', '2m', '1h'). (default 15m0s) --tx-throttler-config string Synonym to -tx_throttler_config (default "target_replication_lag_sec:2 max_replication_lag_sec:10 initial_rate:100 max_increase:1 emergency_decrease:0.5 min_duration_between_increases_sec:40 max_duration_between_increases_sec:62 min_duration_between_decreases_sec:20 spread_backlog_across_sec:20 age_bad_rate_after_sec:180 bad_rate_increase:0.1 max_rate_approach_threshold:0.9") --tx-throttler-default-priority int Default priority assigned to queries that lack priority information (default 100) --tx-throttler-dry-run If present, the transaction throttler only records metrics about requests received and throttled, but does not actually throttle any requests. diff --git a/go/flagutil/float_to_duration.go b/go/flagutil/float_to_duration.go new file mode 100644 index 00000000000..fe07d42bfaa --- /dev/null +++ b/go/flagutil/float_to_duration.go @@ -0,0 +1,71 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flagutil + +import ( + "errors" + "strconv" + "time" + + "github.com/spf13/pflag" +) + +// FloatOrDuration is a flag that can be set with either a float64 (interpreted as seconds) or a time.Duration +// The parsed value is stored in the Duration field, and the target pointer is updated with the parsed value +type FloatOrDuration struct { + Target *time.Duration // Pointer to the external duration + Duration time.Duration // Stores the current parsed value +} + +// String returns the current value as a string +func (f *FloatOrDuration) String() string { + return f.Duration.String() +} + +// Set parses the input and updates the duration +func (f *FloatOrDuration) Set(value string) error { + // Try to parse as float64 first (interpreted as seconds) + if floatVal, err := strconv.ParseFloat(value, 64); err == nil { + f.Duration = time.Duration(floatVal * float64(time.Second)) + *f.Target = f.Duration // Update the target pointer + return nil + } + + // Try to parse as time.Duration + if duration, err := time.ParseDuration(value); err == nil { + f.Duration = duration + *f.Target = f.Duration // Update the target pointer + return nil + } + + return errors.New("value must be either a float64 (interpreted as seconds) or a valid time.Duration") +} + +// Type returns the type description +func (f *FloatOrDuration) Type() string { + return "time.Duration" +} + +// FloatDuration defines a flag with the specified name, default value, and usage string and binds it to a time.Duration variable. +func FloatDuration(fs *pflag.FlagSet, p *time.Duration, name string, defaultValue time.Duration, usage string) { + *p = defaultValue + fd := FloatOrDuration{ + Target: p, + Duration: defaultValue, + } + fs.Var(&fd, name, usage) +} diff --git a/go/flagutil/float_to_duration_test.go b/go/flagutil/float_to_duration_test.go new file mode 100644 index 00000000000..50b3a36e94a --- /dev/null +++ b/go/flagutil/float_to_duration_test.go @@ -0,0 +1,104 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flagutil + +import ( + "testing" + "time" + + "github.com/spf13/pflag" + "github.com/stretchr/testify/assert" +) + +// TestFloatOrDuration_ValidFloat64Input verifies that a float64 input +// (representing seconds) is correctly converted to a time.Duration. +func TestFloatOrDuration_ValidFloat64Input(t *testing.T) { + var duration time.Duration + fs := pflag.NewFlagSet("test", pflag.ContinueOnError) + + FloatDuration(fs, &duration, "test_flag", 10*time.Second, "Test flag") + err := fs.Parse([]string{"--test_flag=2"}) + assert.NoError(t, err) + assert.Equal(t, 2*time.Second, duration) +} + +// TestFloatOrDuration_ValidDurationInput verifies that a valid time.Duration +// input (e.g., "1m30s") is parsed and stored correctly. +func TestFloatOrDuration_ValidDurationInput(t *testing.T) { + var duration time.Duration + fs := pflag.NewFlagSet("test", pflag.ContinueOnError) + + FloatDuration(fs, &duration, "test_flag", 10*time.Second, "Test flag") + err := fs.Parse([]string{"--test_flag=1m30s"}) + assert.NoError(t, err) + assert.Equal(t, 90*time.Second, duration) +} + +// TestFloatOrDuration_DefaultValue ensures that the default value is correctly +// assigned to the duration when the flag is not provided. +func TestFloatOrDuration_DefaultValue(t *testing.T) { + var duration time.Duration + fs := pflag.NewFlagSet("test", pflag.ContinueOnError) + + defaultValue := 15 * time.Second + FloatDuration(fs, &duration, "test_flag", defaultValue, "Test flag") + err := fs.Parse([]string{}) + assert.NoError(t, err) + assert.Equal(t, defaultValue, duration) +} + +// TestFloatOrDuration_InvalidInput verifies that an invalid input string +// results in an appropriate error. +func TestFloatOrDuration_InvalidInput(t *testing.T) { + var duration time.Duration + fs := pflag.NewFlagSet("test", pflag.ContinueOnError) + + FloatDuration(fs, &duration, "test_flag", 10*time.Second, "Test flag") + err := fs.Parse([]string{"--test_flag=invalid"}) + assert.Error(t, err) + assert.Contains(t, err.Error(), "value must be either a float64 (interpreted as seconds) or a valid time.Duration") +} + +// TestFloatOrDuration_MultipleFlags ensures that multiple FloatDuration flags +// can coexist and maintain independent values. +func TestFloatOrDuration_MultipleFlags(t *testing.T) { + var duration1, duration2 time.Duration + fs := pflag.NewFlagSet("test", pflag.ContinueOnError) + + FloatDuration(fs, &duration1, "flag1", 10*time.Second, "First test flag") + FloatDuration(fs, &duration2, "flag2", 20*time.Second, "Second test flag") + + err := fs.Parse([]string{"--flag1=2.5", "--flag2=1m"}) + assert.NoError(t, err) + assert.Equal(t, 2500*time.Millisecond, duration1) + assert.Equal(t, 1*time.Minute, duration2) +} + +// TestFloatOrDuration_HelpMessage verifies that the help message includes +// the correct flag name, description, and default value. +func TestFloatOrDuration_HelpMessage(t *testing.T) { + var duration time.Duration + fs := pflag.NewFlagSet("test", pflag.ContinueOnError) + + defaultValue := 10 * time.Second + FloatDuration(fs, &duration, "test_flag", defaultValue, "Test flag with default value") + + helpOutput := fs.FlagUsages() + assert.Contains(t, helpOutput, "--test_flag time.Duration") + assert.Contains(t, helpOutput, "Test flag with default value") + assert.Contains(t, helpOutput, "(default 10s)") +} diff --git a/go/mysql/binlog_event_compression.go b/go/mysql/binlog_event_compression.go index 7455218d4b5..37d28431087 100644 --- a/go/mysql/binlog_event_compression.go +++ b/go/mysql/binlog_event_compression.go @@ -98,6 +98,11 @@ type TransactionPayload struct { payload []byte reader io.Reader iterator func() (BinlogEvent, error) + // StreamingContents tells the consumer that we are streaming the + // decompressed payload and they should also stream the events. + // This ensures that neither the producer nor the consumer are + // holding the entire payload's contents in memory. + StreamingContents bool } // IsTransactionPayload returns true if a compressed transaction @@ -292,6 +297,8 @@ func (tp *TransactionPayload) decompress() error { } compressedTrxPayloadsUsingStream.Add(1) tp.reader = streamDecoder + // Signal the consumer to also stream the contents. + tp.StreamingContents = true return nil } diff --git a/go/mysql/binlog_event_mysql56_test.go b/go/mysql/binlog_event_mysql56_test.go index 861d98c6e4f..5844779de63 100644 --- a/go/mysql/binlog_event_mysql56_test.go +++ b/go/mysql/binlog_event_mysql56_test.go @@ -186,9 +186,11 @@ func TestMysql56DecodeTransactionPayload(t *testing.T) { } } if tc.inMemory { + require.False(t, tp.StreamingContents) require.Equal(t, memDecodingCnt+1, compressedTrxPayloadsInMem.Get()) require.Equal(t, tc.want, eventStrs) } else { + require.True(t, tp.StreamingContents) require.Equal(t, streamDecodingCnt+1, compressedTrxPayloadsUsingStream.Get()) require.Len(t, eventStrs, len(tc.want)) totalSize := 0 diff --git a/go/test/endtoend/backup/s3/s3_builtin_test.go b/go/test/endtoend/backup/s3/s3_builtin_test.go new file mode 100644 index 00000000000..9c83e5f8fec --- /dev/null +++ b/go/test/endtoend/backup/s3/s3_builtin_test.go @@ -0,0 +1,434 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package s3 + +import ( + "context" + "io" + "os" + "os/exec" + "path" + "strconv" + "strings" + "testing" + "time" + + "log" + + "github.com/minio/minio-go" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql/fakesqldb" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/mysqlctl" + "vitess.io/vitess/go/vt/mysqlctl/backupstats" + "vitess.io/vitess/go/vt/mysqlctl/blackbox" + "vitess.io/vitess/go/vt/mysqlctl/s3backupstorage" +) + +/* + These tests use Minio to emulate AWS S3. It allows us to run the tests on + GitHub Actions without having the security burden of carrying out AWS secrets + in our GitHub repo. + + Minio is almost a drop-in replacement for AWS S3, if you want to run these + tests against a true AWS S3 Bucket, you can do so by not running the TestMain + and setting the 'AWS_*' environment variables to your own values. + + This package and file are named 'endtoend', but it's more an integration test. + However, we don't want our CI infra to mistake this for a regular unit-test, + hence the rename to 'endtoend'. +*/ + +func TestMain(m *testing.M) { + f := func() int { + minioPath, err := exec.LookPath("minio") + if err != nil { + log.Fatalf("minio binary not found: %v", err) + } + + dataDir, err := os.MkdirTemp("", "") + if err != nil { + log.Fatalf("could not create temporary directory: %v", err) + } + err = os.MkdirAll(dataDir, 0755) + if err != nil { + log.Fatalf("failed to create MinIO data directory: %v", err) + } + + cmd := exec.Command(minioPath, "server", dataDir, "--console-address", ":9001") + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + err = cmd.Start() + if err != nil { + log.Fatalf("failed to start MinIO: %v", err) + } + defer func() { + cmd.Process.Kill() + }() + + // Local MinIO credentials + accessKey := "minioadmin" + secretKey := "minioadmin" + minioEndpoint := "http://localhost:9000" + bucketName := "test-bucket" + region := "us-east-1" + + client, err := minio.New("localhost:9000", accessKey, secretKey, false) + if err != nil { + log.Fatalf("failed to create MinIO client: %v", err) + } + waitForMinio(client) + + err = client.MakeBucket(bucketName, region) + if err != nil { + log.Fatalf("failed to create test bucket: %v", err) + } + + // Same env variables that are used between AWS S3 and Minio + os.Setenv("AWS_ACCESS_KEY_ID", accessKey) + os.Setenv("AWS_SECRET_ACCESS_KEY", secretKey) + os.Setenv("AWS_BUCKET", bucketName) + os.Setenv("AWS_ENDPOINT", minioEndpoint) + os.Setenv("AWS_REGION", region) + + return m.Run() + } + + os.Exit(f()) +} + +func waitForMinio(client *minio.Client) { + for i := 0; i < 60; i++ { + _, err := client.ListBuckets() + if err == nil { + return + } + time.Sleep(1 * time.Second) + } + log.Fatalf("MinIO server did not become ready in time") +} + +func checkEnvForS3(t *testing.T) { + // We never want to skip the tests if we are running on CI. + // We will always run these tests on CI with the TestMain and Minio. + // There should not be a need to skip the tests due to missing ENV vars. + if os.Getenv("GITHUB_ACTIONS") != "" { + return + } + + envRequired := []string{ + "AWS_ACCESS_KEY_ID", + "AWS_SECRET_ACCESS_KEY", + "AWS_BUCKET", + "AWS_ENDPOINT", + "AWS_REGION", + } + + var missing []string + for _, s := range envRequired { + if os.Getenv(s) == "" { + missing = append(missing, s) + } + } + if len(missing) > 0 { + t.Skipf("missing AWS secrets to run this test: please set: %s", strings.Join(missing, ", ")) + } +} + +type backupTestConfig struct { + concurrency int + addFileReturnFn func(s3 *s3backupstorage.S3BackupHandle, ctx context.Context, filename string, filesize int64, firstAdd bool) (io.WriteCloser, error) + checkCleanupError bool + expectedResult mysqlctl.BackupResult + expectedStats blackbox.StatSummary +} + +func runBackupTest(t *testing.T, cfg backupTestConfig) { + checkEnvForS3(t) + s3backupstorage.InitFlag(s3backupstorage.FakeConfig{ + Region: os.Getenv("AWS_REGION"), + Endpoint: os.Getenv("AWS_ENDPOINT"), + Bucket: os.Getenv("AWS_BUCKET"), + ForcePath: true, + }) + + ctx := context.Background() + backupRoot, keyspace, shard, ts := blackbox.SetupCluster(ctx, t, 2, 2) + + be := &mysqlctl.BuiltinBackupEngine{} + + // Configure a tight deadline to force a timeout + oldDeadline := blackbox.SetBuiltinBackupMysqldDeadline(time.Second) + defer blackbox.SetBuiltinBackupMysqldDeadline(oldDeadline) + + fakeStats := backupstats.NewFakeStats() + logger := logutil.NewMemoryLogger() + + bh, err := s3backupstorage.NewFakeS3BackupHandle(ctx, t.Name(), time.Now().Format(mysqlctl.BackupTimestampFormat), logger, fakeStats) + require.NoError(t, err) + t.Cleanup(func() { + err := bh.AbortBackup(ctx) + if cfg.checkCleanupError { + require.NoError(t, err) + } + }) + bh.AddFileReturnF = cfg.addFileReturnFn + + // Spin up a fake daemon to be used in backups. It needs to be allowed to receive: + // "STOP REPLICA", "START REPLICA", in that order. + fakedb := fakesqldb.New(t) + defer fakedb.Close() + mysqld := mysqlctl.NewFakeMysqlDaemon(fakedb) + defer mysqld.Close() + mysqld.ExpectedExecuteSuperQueryList = []string{"STOP REPLICA", "START REPLICA"} + + backupResult, err := be.ExecuteBackup(ctx, mysqlctl.BackupParams{ + Logger: logger, + Mysqld: mysqld, + Cnf: &mysqlctl.Mycnf{ + InnodbDataHomeDir: path.Join(backupRoot, "innodb"), + InnodbLogGroupHomeDir: path.Join(backupRoot, "log"), + DataDir: path.Join(backupRoot, "datadir"), + }, + Concurrency: cfg.concurrency, + HookExtraEnv: map[string]string{}, + TopoServer: ts, + Keyspace: keyspace, + Shard: shard, + Stats: fakeStats, + MysqlShutdownTimeout: blackbox.MysqlShutdownTimeout, + }, bh) + + require.Equal(t, cfg.expectedResult, backupResult) + switch cfg.expectedResult { + case mysqlctl.BackupUsable: + require.NoError(t, err) + case mysqlctl.BackupUnusable, mysqlctl.BackupEmpty: + require.Error(t, err) + } + + ss := blackbox.GetStats(fakeStats) + require.Equal(t, cfg.expectedStats.DestinationCloseStats, ss.DestinationCloseStats) + require.Equal(t, cfg.expectedStats.DestinationOpenStats, ss.DestinationOpenStats) + require.Equal(t, cfg.expectedStats.DestinationWriteStats, ss.DestinationWriteStats) + require.Equal(t, cfg.expectedStats.SourceCloseStats, ss.SourceCloseStats) + require.Equal(t, cfg.expectedStats.SourceOpenStats, ss.SourceOpenStats) + require.Equal(t, cfg.expectedStats.SourceReadStats, ss.SourceReadStats) +} + +func TestExecuteBackupS3FailEachFileOnce(t *testing.T) { + runBackupTest(t, backupTestConfig{ + concurrency: 2, + + // Modify the fake S3 storage to always fail when trying to write a file for the first time + addFileReturnFn: s3backupstorage.FailFirstWrite, + checkCleanupError: true, + expectedResult: mysqlctl.BackupUsable, + + // Even though we have 4 files, we expect '8' for all the values below as we re-do every file once. + expectedStats: blackbox.StatSummary{ + DestinationCloseStats: 8, + DestinationOpenStats: 8, + DestinationWriteStats: 8, + SourceCloseStats: 8, + SourceOpenStats: 8, + SourceReadStats: 8, + }, + }) +} + +func TestExecuteBackupS3FailEachFileTwice(t *testing.T) { + runBackupTest(t, backupTestConfig{ + concurrency: 1, + + // Modify the fake S3 storage to always fail when trying to write a file for the first time + addFileReturnFn: s3backupstorage.FailAllWrites, + + // If the code works as expected by this test, no files will be created on S3 and AbortBackup will + // fail, for this reason, let's not check the error return. + // We still call AbortBackup anyway in the event that the code is not behaving as expected and some + // files were created by mistakes, we delete them. + checkCleanupError: false, + expectedResult: mysqlctl.BackupUnusable, + + // All stats here must be equal to 5, we have four files, we go each of them, they all fail. + // The logic decides to retry each file once, we retry the first failed file, it fails again + // but since it has reached the limit of retries, the backup will fail anyway, thus we don't + // retry the other 3 files. + expectedStats: blackbox.StatSummary{ + DestinationCloseStats: 5, + DestinationOpenStats: 5, + DestinationWriteStats: 5, + SourceCloseStats: 5, + SourceOpenStats: 5, + SourceReadStats: 5, + }, + }) +} + +type restoreTestConfig struct { + readFileReturnFn func(s3 *s3backupstorage.S3BackupHandle, ctx context.Context, filename string, firstRead bool) (io.ReadCloser, error) + expectSuccess bool + expectedStats blackbox.StatSummary +} + +func runRestoreTest(t *testing.T, cfg restoreTestConfig) { + checkEnvForS3(t) + s3backupstorage.InitFlag(s3backupstorage.FakeConfig{ + Region: os.Getenv("AWS_REGION"), + Endpoint: os.Getenv("AWS_ENDPOINT"), + Bucket: os.Getenv("AWS_BUCKET"), + ForcePath: true, + }) + + ctx := context.Background() + backupRoot, keyspace, shard, ts := blackbox.SetupCluster(ctx, t, 2, 2) + + fakeStats := backupstats.NewFakeStats() + logger := logutil.NewMemoryLogger() + + be := &mysqlctl.BuiltinBackupEngine{} + dirName := time.Now().Format(mysqlctl.BackupTimestampFormat) + name := t.Name() + "-" + strconv.Itoa(int(time.Now().Unix())) + bh, err := s3backupstorage.NewFakeS3BackupHandle(ctx, name, dirName, logger, fakeStats) + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, bh.AbortBackup(ctx)) + }) + + // Spin up a fake daemon to be used in backups. It needs to be allowed to receive: + // "STOP REPLICA", "START REPLICA", in that order. + fakedb := fakesqldb.New(t) + defer fakedb.Close() + mysqld := mysqlctl.NewFakeMysqlDaemon(fakedb) + defer mysqld.Close() + mysqld.ExpectedExecuteSuperQueryList = []string{"STOP REPLICA", "START REPLICA"} + + backupResult, err := be.ExecuteBackup(ctx, mysqlctl.BackupParams{ + Logger: logutil.NewConsoleLogger(), + Mysqld: mysqld, + Cnf: &mysqlctl.Mycnf{ + InnodbDataHomeDir: path.Join(backupRoot, "innodb"), + InnodbLogGroupHomeDir: path.Join(backupRoot, "log"), + DataDir: path.Join(backupRoot, "datadir"), + }, + Stats: backupstats.NewFakeStats(), + Concurrency: 1, + HookExtraEnv: map[string]string{}, + TopoServer: ts, + Keyspace: keyspace, + Shard: shard, + MysqlShutdownTimeout: blackbox.MysqlShutdownTimeout, + }, bh) + + require.NoError(t, err) + require.Equal(t, mysqlctl.BackupUsable, backupResult) + + // Backup is done, let's move on to the restore now + + restoreBh, err := s3backupstorage.NewFakeS3RestoreHandle(ctx, name, logger, fakeStats) + require.NoError(t, err) + restoreBh.ReadFileReturnF = cfg.readFileReturnFn + + fakedb = fakesqldb.New(t) + defer fakedb.Close() + mysqld = mysqlctl.NewFakeMysqlDaemon(fakedb) + defer mysqld.Close() + mysqld.ExpectedExecuteSuperQueryList = []string{"STOP REPLICA", "START REPLICA"} + + restoreParams := mysqlctl.RestoreParams{ + Cnf: &mysqlctl.Mycnf{ + InnodbDataHomeDir: path.Join(backupRoot, "innodb"), + InnodbLogGroupHomeDir: path.Join(backupRoot, "log"), + DataDir: path.Join(backupRoot, "datadir"), + BinLogPath: path.Join(backupRoot, "binlog"), + RelayLogPath: path.Join(backupRoot, "relaylog"), + RelayLogIndexPath: path.Join(backupRoot, "relaylogindex"), + RelayLogInfoPath: path.Join(backupRoot, "relayloginfo"), + }, + Logger: logger, + Mysqld: mysqld, + Concurrency: 1, + HookExtraEnv: map[string]string{}, + DeleteBeforeRestore: false, + DbName: "test", + Keyspace: "test", + Shard: "-", + StartTime: time.Now(), + RestoreToPos: replication.Position{}, + RestoreToTimestamp: time.Time{}, + DryRun: false, + Stats: fakeStats, + MysqlShutdownTimeout: blackbox.MysqlShutdownTimeout, + } + + // Successful restore. + bm, err := be.ExecuteRestore(ctx, restoreParams, restoreBh) + + if cfg.expectSuccess { + assert.NoError(t, err) + assert.NotNil(t, bm) + } else { + assert.Error(t, err) + } + + ss := blackbox.GetStats(fakeStats) + require.Equal(t, cfg.expectedStats.DestinationCloseStats, ss.DestinationCloseStats) + require.Equal(t, cfg.expectedStats.DestinationOpenStats, ss.DestinationOpenStats) + require.Equal(t, cfg.expectedStats.DestinationWriteStats, ss.DestinationWriteStats) + require.Equal(t, cfg.expectedStats.SourceCloseStats, ss.SourceCloseStats) + require.Equal(t, cfg.expectedStats.SourceOpenStats, ss.SourceOpenStats) + require.Equal(t, cfg.expectedStats.SourceReadStats, ss.SourceReadStats) +} + +func TestExecuteRestoreS3FailEachFileOnce(t *testing.T) { + runRestoreTest(t, restoreTestConfig{ + readFileReturnFn: s3backupstorage.FailFirstRead, + expectSuccess: true, + expectedStats: blackbox.StatSummary{ + DestinationCloseStats: 8, + DestinationOpenStats: 8, + DestinationWriteStats: 4, // 4, because on the first attempt, we fail to read before writing to the filesystem + SourceCloseStats: 8, + SourceOpenStats: 8, + SourceReadStats: 8, + }, + }) +} + +func TestExecuteRestoreS3FailEachFileTwice(t *testing.T) { + runRestoreTest(t, restoreTestConfig{ + readFileReturnFn: s3backupstorage.FailAllReadExpectManifest, + expectSuccess: false, + + // Everything except destination writes must be equal to 5: + // +1 for every file on the first attempt (= 4), and +1 for the first file we try for the second time. + // Since we fail early as soon as a second-attempt-file fails, we won't see a value above 5. + expectedStats: blackbox.StatSummary{ + DestinationCloseStats: 5, + DestinationOpenStats: 5, + DestinationWriteStats: 0, // 0, because on the both attempts, we fail to read before writing to the filesystem + SourceCloseStats: 5, + SourceOpenStats: 5, + SourceReadStats: 5, + }, + }) +} diff --git a/go/test/endtoend/backup/vtbackup/backup_only_test.go b/go/test/endtoend/backup/vtbackup/backup_only_test.go index 4e018986100..c7a09c70d13 100644 --- a/go/test/endtoend/backup/vtbackup/backup_only_test.go +++ b/go/test/endtoend/backup/vtbackup/backup_only_test.go @@ -58,7 +58,6 @@ func TestTabletInitialBackup(t *testing.T) { // - Take a Second Backup // - Bring up a second replica, and restore from the second backup // - list the backups, remove them - defer cluster.PanicHandler(t) waitForReplicationToCatchup([]cluster.Vttablet{*replica1, *replica2}) @@ -102,7 +101,6 @@ func TestTabletBackupOnly(t *testing.T) { // - Take a Second Backup // - Bring up a second replica, and restore from the second backup // - list the backups, remove them - defer cluster.PanicHandler(t) // Reset the tablet object values in order on init tablet in the next step. primary.VttabletProcess.ServingStatus = "NOT_SERVING" diff --git a/go/test/endtoend/backup/vtbackup/main_test.go b/go/test/endtoend/backup/vtbackup/main_test.go index 367956c9827..6e1840b2979 100644 --- a/go/test/endtoend/backup/vtbackup/main_test.go +++ b/go/test/endtoend/backup/vtbackup/main_test.go @@ -52,7 +52,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode, err := func() (int, error) { diff --git a/go/test/endtoend/backup/vtctlbackup/backup_utils.go b/go/test/endtoend/backup/vtctlbackup/backup_utils.go index 7c03b776c74..86b2612a044 100644 --- a/go/test/endtoend/backup/vtctlbackup/backup_utils.go +++ b/go/test/endtoend/backup/vtctlbackup/backup_utils.go @@ -405,7 +405,6 @@ func TestBackup(t *testing.T, setupType int, streamMode string, stripes int, cDe }, // } - defer cluster.PanicHandler(t) // setup cluster for the testing code, err := LaunchCluster(setupType, streamMode, stripes, cDetails) require.Nilf(t, err, "setup failed with status code %d", code) @@ -1507,7 +1506,6 @@ func getLastBackup(t *testing.T) string { func TestBackupEngineSelector(t *testing.T) { defer setDefaultCommonArgs() - defer cluster.PanicHandler(t) // launch the custer with xtrabackup as the default engine code, err := LaunchCluster(XtraBackup, "xbstream", 0, &CompressionDetails{CompressorEngineName: "pgzip"}) @@ -1548,7 +1546,6 @@ func TestBackupEngineSelector(t *testing.T) { func TestRestoreAllowedBackupEngines(t *testing.T) { defer setDefaultCommonArgs() - defer cluster.PanicHandler(t) backupMsg := "right after xtrabackup backup" diff --git a/go/test/endtoend/backup/vtctlbackup/pitr_test_framework.go b/go/test/endtoend/backup/vtctlbackup/pitr_test_framework.go index 4c84c3e63bc..1b04dbc7aab 100644 --- a/go/test/endtoend/backup/vtctlbackup/pitr_test_framework.go +++ b/go/test/endtoend/backup/vtctlbackup/pitr_test_framework.go @@ -95,7 +95,6 @@ func waitForReplica(t *testing.T, replicaIndex int) int { // in between, it makes writes to the database, and takes notes: what data was available in what backup. // It then restores each and every one of those backups, in random order, and expects to find the specific data associated with the backup. func ExecTestIncrementalBackupAndRestoreToPos(t *testing.T, tcase *PITRTestCase) { - defer cluster.PanicHandler(t) t.Run(tcase.Name, func(t *testing.T) { // setup cluster for the testing @@ -339,7 +338,6 @@ func ExecTestIncrementalBackupAndRestoreToPos(t *testing.T, tcase *PITRTestCase) // ExecTestIncrementalBackupAndRestoreToPos func ExecTestIncrementalBackupAndRestoreToTimestamp(t *testing.T, tcase *PITRTestCase) { - defer cluster.PanicHandler(t) var lastInsertedRowTimestamp time.Time insertRowOnPrimary := func(t *testing.T, hint string) { @@ -605,7 +603,6 @@ func ExecTestIncrementalBackupAndRestoreToTimestamp(t *testing.T, tcase *PITRTes // Specifically, it's designed to test how incremental backups are taken by interleaved replicas, so that they successfully build on // one another. func ExecTestIncrementalBackupOnTwoTablets(t *testing.T, tcase *PITRTestCase) { - defer cluster.PanicHandler(t) t.Run(tcase.Name, func(t *testing.T) { // setup cluster for the testing diff --git a/go/test/endtoend/cellalias/cell_alias_test.go b/go/test/endtoend/cellalias/cell_alias_test.go index 07e8d687f4e..6e8f901a245 100644 --- a/go/test/endtoend/cellalias/cell_alias_test.go +++ b/go/test/endtoend/cellalias/cell_alias_test.go @@ -90,7 +90,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { @@ -232,7 +231,6 @@ func TestMain(m *testing.M) { } func TestAlias(t *testing.T) { - defer cluster.PanicHandler(t) insertInitialValues(t) defer deleteInitialValues(t) @@ -296,7 +294,6 @@ func TestAlias(t *testing.T) { } func TestAddAliasWhileVtgateUp(t *testing.T) { - defer cluster.PanicHandler(t) insertInitialValues(t) defer deleteInitialValues(t) diff --git a/go/test/endtoend/cluster/cluster_process.go b/go/test/endtoend/cluster/cluster_process.go index b89e007b4f2..2ac46a8d145 100644 --- a/go/test/endtoend/cluster/cluster_process.go +++ b/go/test/endtoend/cluster/cluster_process.go @@ -1043,7 +1043,6 @@ func (cluster *LocalProcessCluster) StreamTabletHealthUntil(ctx context.Context, // Teardown brings down the cluster by invoking teardown for individual processes func (cluster *LocalProcessCluster) Teardown() { - PanicHandler(nil) cluster.mx.Lock() defer cluster.mx.Unlock() if cluster.teardownCompleted { diff --git a/go/test/endtoend/cluster/cluster_util.go b/go/test/endtoend/cluster/cluster_util.go index 061e632dde7..cfc2071a746 100644 --- a/go/test/endtoend/cluster/cluster_util.go +++ b/go/test/endtoend/cluster/cluster_util.go @@ -126,15 +126,6 @@ func VerifyRowsInTablet(t *testing.T, vttablet *Vttablet, ksName string, expecte VerifyRowsInTabletForTable(t, vttablet, ksName, expectedRows, "vt_insert_test") } -// PanicHandler handles the panic in the testcase. -func PanicHandler(t testing.TB) { - err := recover() - if t == nil { - return - } - require.Nilf(t, err, "panic occured in testcase %v", t.Name()) -} - // ListBackups Lists back preset in shard func (cluster LocalProcessCluster) ListBackups(shardKsName string) ([]string, error) { output, err := cluster.VtctldClientProcess.ExecuteCommandWithOutput("GetBackups", shardKsName) diff --git a/go/test/endtoend/cluster/vtgate_process.go b/go/test/endtoend/cluster/vtgate_process.go index c01f7c6e93b..1290156a1cd 100644 --- a/go/test/endtoend/cluster/vtgate_process.go +++ b/go/test/endtoend/cluster/vtgate_process.go @@ -132,6 +132,7 @@ func (vtgate *VtgateProcess) Setup() (err error) { return err } vtgate.proc.Stderr = errFile + vtgate.ErrorLog = errFile.Name() vtgate.proc.Env = append(vtgate.proc.Env, os.Environ()...) vtgate.proc.Env = append(vtgate.proc.Env, DefaultVttestEnv) diff --git a/go/test/endtoend/clustertest/add_keyspace_test.go b/go/test/endtoend/clustertest/add_keyspace_test.go index edee87d035e..b8422b52eb3 100644 --- a/go/test/endtoend/clustertest/add_keyspace_test.go +++ b/go/test/endtoend/clustertest/add_keyspace_test.go @@ -61,7 +61,6 @@ primary key (id) ) func TestAddKeyspace(t *testing.T) { - defer cluster.PanicHandler(t) if err := clusterInstance.StartKeyspace(*testKeyspace, []string{"-80", "80-"}, 0, false); err != nil { log.Errorf("failed to AddKeyspace %v: %v", *testKeyspace, err) t.Fatal(err) diff --git a/go/test/endtoend/clustertest/etcd_test.go b/go/test/endtoend/clustertest/etcd_test.go index 5239d960c47..f47a002a3cf 100644 --- a/go/test/endtoend/clustertest/etcd_test.go +++ b/go/test/endtoend/clustertest/etcd_test.go @@ -24,12 +24,9 @@ import ( "github.com/stretchr/testify/require" clientv3 "go.etcd.io/etcd/client/v3" - - "vitess.io/vitess/go/test/endtoend/cluster" ) func TestEtcdServer(t *testing.T) { - defer cluster.PanicHandler(t) // Confirm the basic etcd cluster health. etcdHealthURL := fmt.Sprintf("http://%s:%d/health", clusterInstance.Hostname, clusterInstance.TopoPort) diff --git a/go/test/endtoend/clustertest/main_test.go b/go/test/endtoend/clustertest/main_test.go index 35da40a3edb..3fc2524208b 100644 --- a/go/test/endtoend/clustertest/main_test.go +++ b/go/test/endtoend/clustertest/main_test.go @@ -60,7 +60,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/clustertest/vtctld_test.go b/go/test/endtoend/clustertest/vtctld_test.go index c61f7820bb7..18d06cf3299 100644 --- a/go/test/endtoend/clustertest/vtctld_test.go +++ b/go/test/endtoend/clustertest/vtctld_test.go @@ -30,8 +30,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - - "vitess.io/vitess/go/test/endtoend/cluster" ) var ( @@ -44,7 +42,6 @@ var ( ) func TestVtctldProcess(t *testing.T) { - defer cluster.PanicHandler(t) url := fmt.Sprintf("http://%s:%d/api/keyspaces/", clusterInstance.Hostname, clusterInstance.VtctldHTTPPort) testURL(t, url, "keyspace url") diff --git a/go/test/endtoend/clustertest/vtgate_test.go b/go/test/endtoend/clustertest/vtgate_test.go index 2f72682a391..264b292f482 100644 --- a/go/test/endtoend/clustertest/vtgate_test.go +++ b/go/test/endtoend/clustertest/vtgate_test.go @@ -32,11 +32,9 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/test/endtoend/cluster" ) func TestVtgateProcess(t *testing.T) { - defer cluster.PanicHandler(t) verifyVtgateVariables(t, clusterInstance.VtgateProcess.VerifyURL) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) diff --git a/go/test/endtoend/clustertest/vttablet_test.go b/go/test/endtoend/clustertest/vttablet_test.go index 5e7d5e27182..86fc2a6983c 100644 --- a/go/test/endtoend/clustertest/vttablet_test.go +++ b/go/test/endtoend/clustertest/vttablet_test.go @@ -25,12 +25,9 @@ import ( "testing" "github.com/stretchr/testify/require" - - "vitess.io/vitess/go/test/endtoend/cluster" ) func TestVttabletProcess(t *testing.T) { - defer cluster.PanicHandler(t) firstTabletPort := clusterInstance.Keyspaces[0].Shards[0].Vttablets[0].HTTPPort testURL(t, fmt.Sprintf("http://localhost:%d/debug/vars/", firstTabletPort), "tablet debug var url") resp, err := http.Get(fmt.Sprintf("http://localhost:%d/debug/vars", firstTabletPort)) @@ -48,7 +45,6 @@ func TestVttabletProcess(t *testing.T) { } func TestDeleteTablet(t *testing.T) { - defer cluster.PanicHandler(t) primary := clusterInstance.Keyspaces[0].Shards[0].PrimaryTablet() require.NotNil(t, primary) _, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("DeleteTablets", "--allow-primary", primary.Alias) diff --git a/go/test/endtoend/encryption/encryptedreplication/encrypted_replication_test.go b/go/test/endtoend/encryption/encryptedreplication/encrypted_replication_test.go index 7dea6cf525f..460ae310d7f 100644 --- a/go/test/endtoend/encryption/encryptedreplication/encrypted_replication_test.go +++ b/go/test/endtoend/encryption/encryptedreplication/encrypted_replication_test.go @@ -42,7 +42,6 @@ var ( // This test makes sure that we can use SSL replication with Vitess func TestSecure(t *testing.T) { - defer cluster.PanicHandler(t) testReplicationBase(t, true) testReplicationBase(t, false) } diff --git a/go/test/endtoend/encryption/encryptedtransport/encrypted_transport_test.go b/go/test/endtoend/encryption/encryptedtransport/encrypted_transport_test.go index 1363e07b2cd..86c847125a7 100644 --- a/go/test/endtoend/encryption/encryptedtransport/encrypted_transport_test.go +++ b/go/test/endtoend/encryption/encryptedtransport/encrypted_transport_test.go @@ -102,7 +102,6 @@ var ( ) func TestSecureTransport(t *testing.T) { - defer cluster.PanicHandler(t) flag.Parse() // initialize cluster diff --git a/go/test/endtoend/keyspace/keyspace_test.go b/go/test/endtoend/keyspace/keyspace_test.go index 2a665c66214..f65301b9bb4 100644 --- a/go/test/endtoend/keyspace/keyspace_test.go +++ b/go/test/endtoend/keyspace/keyspace_test.go @@ -81,7 +81,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -167,7 +166,6 @@ func checkDurabilityPolicy(t *testing.T, durabilityPolicy string) { } func TestGetSrvKeyspaceNames(t *testing.T) { - defer cluster.PanicHandler(t) data, err := clusterForKSTest.VtctldClientProcess.ExecuteCommandWithOutput("GetSrvKeyspaceNames", cell) require.Nil(t, err) @@ -180,7 +178,6 @@ func TestGetSrvKeyspaceNames(t *testing.T) { } func TestGetSrvKeyspacePartitions(t *testing.T) { - defer cluster.PanicHandler(t) shardedSrvKeyspace := getSrvKeyspace(t, cell, keyspaceShardedName) otherShardRefFound := false for _, partition := range shardedSrvKeyspace.Partitions { @@ -209,20 +206,17 @@ func TestGetSrvKeyspacePartitions(t *testing.T) { } func TestShardNames(t *testing.T) { - defer cluster.PanicHandler(t) output, err := clusterForKSTest.VtctldClientProcess.GetSrvKeyspaces(keyspaceShardedName, cell) require.NoError(t, err) require.NotNil(t, output[cell], "no srvkeyspace for cell %s", cell) } func TestGetKeyspace(t *testing.T) { - defer cluster.PanicHandler(t) _, err := clusterForKSTest.VtctldClientProcess.GetKeyspace(keyspaceUnshardedName) require.Nil(t, err) } func TestDeleteKeyspace(t *testing.T) { - defer cluster.PanicHandler(t) _ = clusterForKSTest.VtctldClientProcess.CreateKeyspace("test_delete_keyspace", sidecar.DefaultName) _ = clusterForKSTest.VtctldClientProcess.ExecuteCommand("CreateShard", "test_delete_keyspace/0") _ = clusterForKSTest.InitTablet(&cluster.Vttablet{ @@ -353,7 +347,6 @@ func TestDeleteKeyspace(t *testing.T) { } */ func TestShardCountForAllKeyspaces(t *testing.T) { - defer cluster.PanicHandler(t) testShardCountForKeyspace(t, keyspaceUnshardedName, 1) testShardCountForKeyspace(t, keyspaceShardedName, 2) } @@ -370,7 +363,6 @@ func testShardCountForKeyspace(t *testing.T, keyspace string, count int) { } func TestShardNameForAllKeyspaces(t *testing.T) { - defer cluster.PanicHandler(t) testShardNameForKeyspace(t, keyspaceUnshardedName, []string{"test_ks_unsharded"}) testShardNameForKeyspace(t, keyspaceShardedName, []string{"-80", "80-"}) } @@ -389,7 +381,6 @@ func testShardNameForKeyspace(t *testing.T, keyspace string, shardNames []string } func TestKeyspaceToShardName(t *testing.T) { - defer cluster.PanicHandler(t) var id []byte srvKeyspace := getSrvKeyspace(t, cell, keyspaceShardedName) diff --git a/go/test/endtoend/messaging/main_test.go b/go/test/endtoend/messaging/main_test.go index 49477ebe631..c654869316b 100644 --- a/go/test/endtoend/messaging/main_test.go +++ b/go/test/endtoend/messaging/main_test.go @@ -104,7 +104,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { diff --git a/go/test/endtoend/messaging/message_test.go b/go/test/endtoend/messaging/message_test.go index 7e1190c16bb..e91a8dcc335 100644 --- a/go/test/endtoend/messaging/message_test.go +++ b/go/test/endtoend/messaging/message_test.go @@ -375,7 +375,6 @@ func TestUnsharded(t *testing.T) { // TestReparenting checks the client connection count after reparenting. func TestReparenting(t *testing.T) { - defer cluster.PanicHandler(t) name := "sharded_message" ctx := context.Background() @@ -435,7 +434,6 @@ func TestReparenting(t *testing.T) { // TestConnection validate the connection count and message streaming. func TestConnection(t *testing.T) { - defer cluster.PanicHandler(t) name := "sharded_message" @@ -494,7 +492,6 @@ func TestConnection(t *testing.T) { } func testMessaging(t *testing.T, name, ks string) { - defer cluster.PanicHandler(t) ctx := context.Background() stream, err := VtgateGrpcConn(ctx, clusterInstance) require.Nil(t, err) diff --git a/go/test/endtoend/mysqlctl/mysqlctl_test.go b/go/test/endtoend/mysqlctl/mysqlctl_test.go index f93724fa4a8..070114da420 100644 --- a/go/test/endtoend/mysqlctl/mysqlctl_test.go +++ b/go/test/endtoend/mysqlctl/mysqlctl_test.go @@ -40,7 +40,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -139,7 +138,6 @@ func initCluster(shardNames []string, totalTabletsRequired int) { } func TestRestart(t *testing.T) { - defer cluster.PanicHandler(t) err := primaryTablet.MysqlctlProcess.Stop() require.NoError(t, err) primaryTablet.MysqlctlProcess.CleanupFiles(primaryTablet.TabletUID) @@ -148,7 +146,6 @@ func TestRestart(t *testing.T) { } func TestAutoDetect(t *testing.T) { - defer cluster.PanicHandler(t) err := clusterInstance.Keyspaces[0].Shards[0].Vttablets[0].VttabletProcess.Setup() require.NoError(t, err) diff --git a/go/test/endtoend/mysqlctld/mysqlctld_test.go b/go/test/endtoend/mysqlctld/mysqlctld_test.go index beb155830e2..432beb0c6d5 100644 --- a/go/test/endtoend/mysqlctld/mysqlctld_test.go +++ b/go/test/endtoend/mysqlctld/mysqlctld_test.go @@ -44,7 +44,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -141,7 +140,6 @@ func initCluster(shardNames []string, totalTabletsRequired int) error { } func TestRestart(t *testing.T) { - defer cluster.PanicHandler(t) err := primaryTablet.MysqlctldProcess.Stop() require.Nil(t, err) require.Truef(t, primaryTablet.MysqlctldProcess.WaitForMysqlCtldShutdown(), "Mysqlctld has not stopped...") @@ -151,7 +149,6 @@ func TestRestart(t *testing.T) { } func TestAutoDetect(t *testing.T) { - defer cluster.PanicHandler(t) err := clusterInstance.Keyspaces[0].Shards[0].Vttablets[0].VttabletProcess.Setup() require.Nil(t, err, "error should be nil") diff --git a/go/test/endtoend/mysqlserver/main_test.go b/go/test/endtoend/mysqlserver/main_test.go index 18b169e33d7..20da69e18e8 100644 --- a/go/test/endtoend/mysqlserver/main_test.go +++ b/go/test/endtoend/mysqlserver/main_test.go @@ -61,7 +61,6 @@ END; ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() // setting grpc max size diff --git a/go/test/endtoend/mysqlserver/mysql_server_test.go b/go/test/endtoend/mysqlserver/mysql_server_test.go index 6b691582c66..ee6e973593b 100644 --- a/go/test/endtoend/mysqlserver/mysql_server_test.go +++ b/go/test/endtoend/mysqlserver/mysql_server_test.go @@ -35,14 +35,12 @@ import ( "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/test/endtoend/cluster" _ "github.com/go-sql-driver/mysql" ) // TestMultiStmt checks that multiStatements=True and multiStatements=False work properly. func TestMultiStatement(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() // connect database with multiStatements=True @@ -70,7 +68,6 @@ func TestMultiStatement(t *testing.T) { // TestLargeComment add large comment in insert stmt and validate the insert process. func TestLargeComment(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) @@ -89,7 +86,6 @@ func TestLargeComment(t *testing.T) { // TestInsertLargerThenGrpcLimit insert blob larger then grpc limit and verify the error. func TestInsertLargerThenGrpcLimit(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() @@ -109,7 +105,6 @@ func TestInsertLargerThenGrpcLimit(t *testing.T) { // TestTimeout executes sleep(5) with query_timeout of 1 second, and verifies the error. func TestTimeout(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) @@ -125,7 +120,6 @@ func TestTimeout(t *testing.T) { // TestInvalidField tries to fetch invalid column and verifies the error. func TestInvalidField(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) @@ -141,7 +135,6 @@ func TestInvalidField(t *testing.T) { // TestWarnings validates the behaviour of SHOW WARNINGS. func TestWarnings(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) @@ -183,7 +176,6 @@ func TestWarnings(t *testing.T) { // TestSelectWithUnauthorizedUser verifies that an unauthorized user // is not able to read from the table. func TestSelectWithUnauthorizedUser(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() tmpVtParam := vtParams @@ -202,7 +194,6 @@ func TestSelectWithUnauthorizedUser(t *testing.T) { // TestPartitionedTable validates that partitioned tables are recognized by schema engine func TestPartitionedTable(t *testing.T) { - defer cluster.PanicHandler(t) tablet := clusterInstance.Keyspaces[0].Shards[0].PrimaryTablet() diff --git a/go/test/endtoend/onlineddl/flow/onlineddl_flow_test.go b/go/test/endtoend/onlineddl/flow/onlineddl_flow_test.go index 035789e4b87..ee8141860f4 100644 --- a/go/test/endtoend/onlineddl/flow/onlineddl_flow_test.go +++ b/go/test/endtoend/onlineddl/flow/onlineddl_flow_test.go @@ -120,7 +120,6 @@ const ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { @@ -197,7 +196,6 @@ func TestMain(m *testing.M) { } func TestOnlineDDLFlow(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() require.NotNil(t, clusterInstance) diff --git a/go/test/endtoend/onlineddl/revert/onlineddl_revert_test.go b/go/test/endtoend/onlineddl/revert/onlineddl_revert_test.go index af88806fb26..a13077ef87b 100644 --- a/go/test/endtoend/onlineddl/revert/onlineddl_revert_test.go +++ b/go/test/endtoend/onlineddl/revert/onlineddl_revert_test.go @@ -137,7 +137,6 @@ type revertibleTestCase struct { } func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { @@ -204,7 +203,6 @@ func TestMain(m *testing.M) { } func TestRevertSchemaChanges(t *testing.T) { - defer cluster.PanicHandler(t) shards = clusterInstance.Keyspaces[0].Shards require.Equal(t, 1, len(shards)) diff --git a/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go b/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go index 53a1c8137fd..5f6423b2556 100644 --- a/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go +++ b/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go @@ -239,7 +239,6 @@ func waitForMessage(t *testing.T, uuid string, messageSubstring string) { } func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { @@ -321,7 +320,6 @@ func TestSchedulerSchemaChanges(t *testing.T) { } func testScheduler(t *testing.T) { - defer cluster.PanicHandler(t) shards = clusterInstance.Keyspaces[0].Shards require.Equal(t, 1, len(shards)) @@ -1593,7 +1591,6 @@ func testScheduler(t *testing.T) { } func testSingleton(t *testing.T) { - defer cluster.PanicHandler(t) shards = clusterInstance.Keyspaces[0].Shards require.Equal(t, 1, len(shards)) @@ -1844,7 +1841,6 @@ DROP TABLE IF EXISTS stress_test }) } func testDeclarative(t *testing.T) { - defer cluster.PanicHandler(t) shards = clusterInstance.Keyspaces[0].Shards require.Equal(t, 1, len(shards)) @@ -2516,7 +2512,6 @@ func testDeclarative(t *testing.T) { } func testForeignKeys(t *testing.T) { - defer cluster.PanicHandler(t) var ( createStatements = []string{ diff --git a/go/test/endtoend/onlineddl/vrepl/onlineddl_vrepl_test.go b/go/test/endtoend/onlineddl/vrepl/onlineddl_vrepl_test.go index 92dfa2b0c4a..a7c38527152 100644 --- a/go/test/endtoend/onlineddl/vrepl/onlineddl_vrepl_test.go +++ b/go/test/endtoend/onlineddl/vrepl/onlineddl_vrepl_test.go @@ -160,7 +160,6 @@ const ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { @@ -224,7 +223,6 @@ func TestMain(m *testing.M) { } func TestVreplSchemaChanges(t *testing.T) { - defer cluster.PanicHandler(t) shards = clusterInstance.Keyspaces[0].Shards require.Equal(t, 2, len(shards)) diff --git a/go/test/endtoend/onlineddl/vrepl_stress/onlineddl_vrepl_mini_stress_test.go b/go/test/endtoend/onlineddl/vrepl_stress/onlineddl_vrepl_mini_stress_test.go index 88c145dc40c..f7b222c175d 100644 --- a/go/test/endtoend/onlineddl/vrepl_stress/onlineddl_vrepl_mini_stress_test.go +++ b/go/test/endtoend/onlineddl/vrepl_stress/onlineddl_vrepl_mini_stress_test.go @@ -159,7 +159,6 @@ func nextOpOrder() int64 { } func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { @@ -226,7 +225,6 @@ func TestMain(m *testing.M) { } func TestVreplMiniStressSchemaChanges(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() diff --git a/go/test/endtoend/onlineddl/vrepl_stress_suite/onlineddl_vrepl_stress_suite_test.go b/go/test/endtoend/onlineddl/vrepl_stress_suite/onlineddl_vrepl_stress_suite_test.go index 85b3585beb4..1e52db38bd0 100644 --- a/go/test/endtoend/onlineddl/vrepl_stress_suite/onlineddl_vrepl_stress_suite_test.go +++ b/go/test/endtoend/onlineddl/vrepl_stress_suite/onlineddl_vrepl_stress_suite_test.go @@ -407,7 +407,6 @@ func mysqlParams() *mysql.ConnParams { } func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { @@ -478,7 +477,6 @@ func TestMain(m *testing.M) { } func TestVreplStressSchemaChanges(t *testing.T) { - defer cluster.PanicHandler(t) shards = clusterInstance.Keyspaces[0].Shards require.Equal(t, 1, len(shards)) diff --git a/go/test/endtoend/onlineddl/vrepl_suite/onlineddl_vrepl_suite_test.go b/go/test/endtoend/onlineddl/vrepl_suite/onlineddl_vrepl_suite_test.go index 972421c96da..4a2f7f1a3ce 100644 --- a/go/test/endtoend/onlineddl/vrepl_suite/onlineddl_vrepl_suite_test.go +++ b/go/test/endtoend/onlineddl/vrepl_suite/onlineddl_vrepl_suite_test.go @@ -67,7 +67,6 @@ const ( // Use $VREPL_SUITE_TEST_FILTER environment variable to filter tests by name. func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() testsFilter = os.Getenv(testFilterEnvVar) @@ -133,7 +132,6 @@ func TestMain(m *testing.M) { } func TestVreplSuiteSchemaChanges(t *testing.T) { - defer cluster.PanicHandler(t) shards := clusterInstance.Keyspaces[0].Shards require.Equal(t, 1, len(shards)) diff --git a/go/test/endtoend/preparestmt/main_test.go b/go/test/endtoend/preparestmt/main_test.go index 018e9d266fd..0e067062c94 100644 --- a/go/test/endtoend/preparestmt/main_test.go +++ b/go/test/endtoend/preparestmt/main_test.go @@ -162,7 +162,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { diff --git a/go/test/endtoend/preparestmt/stmt_methods_test.go b/go/test/endtoend/preparestmt/stmt_methods_test.go index 24fb58bff81..5768c6eec7a 100644 --- a/go/test/endtoend/preparestmt/stmt_methods_test.go +++ b/go/test/endtoend/preparestmt/stmt_methods_test.go @@ -27,20 +27,16 @@ import ( "github.com/icrowley/fake" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - - "vitess.io/vitess/go/test/endtoend/cluster" ) // TestSelect simple select the data without any condition. func TestSelect(t *testing.T) { - defer cluster.PanicHandler(t) dbo := Connect(t) defer dbo.Close() selectWhere(t, dbo, "") } func TestSelectDatabase(t *testing.T) { - defer cluster.PanicHandler(t) dbo := Connect(t) defer dbo.Close() prepare, err := dbo.Prepare("select database()") @@ -58,7 +54,6 @@ func TestSelectDatabase(t *testing.T) { // TestInsertUpdateDelete validates all insert, update and // delete method on prepared statements. func TestInsertUpdateDelete(t *testing.T) { - defer cluster.PanicHandler(t) dbo := Connect(t) defer dbo.Close() // prepare insert statement @@ -134,7 +129,6 @@ func testReplica(t *testing.T) { // testcount validates inserted rows count with expected count. func testcount(t *testing.T, dbo *sql.DB, except int) { - defer cluster.PanicHandler(t) r, err := dbo.Query("SELECT count(1) FROM " + tableName) require.Nil(t, err) @@ -148,7 +142,6 @@ func testcount(t *testing.T, dbo *sql.DB, except int) { // TestAutoIncColumns test insertion of row without passing // the value of auto increment columns (here it is id). func TestAutoIncColumns(t *testing.T) { - defer cluster.PanicHandler(t) dbo := Connect(t) defer dbo.Close() // insert a row without id @@ -227,7 +220,6 @@ func reconnectAndTest(t *testing.T) { // TestColumnParameter query database using column // parameter. func TestColumnParameter(t *testing.T) { - defer cluster.PanicHandler(t) dbo := Connect(t) defer dbo.Close() @@ -267,7 +259,6 @@ func TestColumnParameter(t *testing.T) { // TestWrongTableName query database using invalid // tablename and validate error. func TestWrongTableName(t *testing.T) { - defer cluster.PanicHandler(t) dbo := Connect(t) defer dbo.Close() execWithError(t, dbo, []uint16{1146}, "select * from teseting_table;") @@ -319,7 +310,6 @@ func getStringToString(x sql.NullString) string { } func TestSelectDBA(t *testing.T) { - defer cluster.PanicHandler(t) dbo := Connect(t) defer dbo.Close() @@ -381,7 +371,6 @@ func TestSelectDBA(t *testing.T) { } func TestSelectLock(t *testing.T) { - defer cluster.PanicHandler(t) dbo := Connect(t) defer dbo.Close() @@ -417,7 +406,6 @@ func TestSelectLock(t *testing.T) { } func TestShowColumns(t *testing.T) { - defer cluster.PanicHandler(t) dbo := Connect(t) defer dbo.Close() @@ -438,7 +426,6 @@ func TestShowColumns(t *testing.T) { } func TestBinaryColumn(t *testing.T) { - defer cluster.PanicHandler(t) dbo := Connect(t, "interpolateParams=false") defer dbo.Close() diff --git a/go/test/endtoend/recovery/pitr/shardedpitr_test.go b/go/test/endtoend/recovery/pitr/shardedpitr_test.go index f2a76662918..3bb2399737e 100644 --- a/go/test/endtoend/recovery/pitr/shardedpitr_test.go +++ b/go/test/endtoend/recovery/pitr/shardedpitr_test.go @@ -126,7 +126,6 @@ var ( // - asserting that restoring to restoreTime2 (going from 2 shards to 2 shards with past time) is working, it will assert for both shards // - asserting that restoring to restoreTime3 is working, we should get complete data after restoring, as we have in existing shards. func TestPITRRecovery(t *testing.T) { - defer cluster.PanicHandler(nil) initializeCluster(t) defer clusterInstance.Teardown() @@ -525,7 +524,6 @@ func launchRecoveryTablet(t *testing.T, tablet *cluster.Vttablet, binlogServer * tablet.MysqlctlProcess = *mysqlctlProcess extraArgs := []string{"--db-credentials-file", dbCredentialFile} tablet.MysqlctlProcess.InitDBFile = initDBFileWithPassword - tablet.VttabletProcess.DbPassword = mysqlPassword tablet.MysqlctlProcess.ExtraArgs = extraArgs err = tablet.MysqlctlProcess.Start() require.NoError(t, err) @@ -545,6 +543,7 @@ func launchRecoveryTablet(t *testing.T, tablet *cluster.Vttablet, binlogServer * clusterInstance.VtTabletExtraArgs, clusterInstance.DefaultCharset) tablet.Alias = tablet.VttabletProcess.TabletPath + tablet.VttabletProcess.DbPassword = mysqlPassword tablet.VttabletProcess.SupportsBackup = true tablet.VttabletProcess.Keyspace = restoreKeyspaceName tablet.VttabletProcess.ExtraArgs = []string{ diff --git a/go/test/endtoend/recovery/unshardedrecovery/recovery.go b/go/test/endtoend/recovery/unshardedrecovery/recovery.go index 1ebb7c2647f..ae6b152271b 100644 --- a/go/test/endtoend/recovery/unshardedrecovery/recovery.go +++ b/go/test/endtoend/recovery/unshardedrecovery/recovery.go @@ -72,7 +72,6 @@ var ( // TestMainImpl creates cluster for unsharded recovery testing. func TestMainImpl(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode, err := func() (int, error) { @@ -201,7 +200,6 @@ func TestMainImpl(m *testing.M) { // // 7. check that vtgate queries work correctly func TestRecoveryImpl(t *testing.T) { - defer cluster.PanicHandler(t) defer tabletsTeardown() verifyInitialReplication(t) diff --git a/go/test/endtoend/reparent/emergencyreparent/ers_test.go b/go/test/endtoend/reparent/emergencyreparent/ers_test.go index 584bccfdfb7..0d2eb8935d2 100644 --- a/go/test/endtoend/reparent/emergencyreparent/ers_test.go +++ b/go/test/endtoend/reparent/emergencyreparent/ers_test.go @@ -31,7 +31,6 @@ import ( ) func TestTrivialERS(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -56,7 +55,6 @@ func TestTrivialERS(t *testing.T) { } func TestReparentIgnoreReplicas(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -98,7 +96,6 @@ func TestReparentIgnoreReplicas(t *testing.T) { } func TestReparentDownPrimary(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -134,7 +131,6 @@ func TestReparentDownPrimary(t *testing.T) { } func TestReparentNoChoiceDownPrimary(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -170,7 +166,6 @@ func TestReparentNoChoiceDownPrimary(t *testing.T) { func TestSemiSyncSetupCorrectly(t *testing.T) { t.Run("semi-sync enabled", func(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -198,7 +193,6 @@ func TestSemiSyncSetupCorrectly(t *testing.T) { }) t.Run("semi-sync disabled", func(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "none") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -228,7 +222,6 @@ func TestSemiSyncSetupCorrectly(t *testing.T) { // TestERSPromoteRdonly tests that we never end up promoting a rdonly instance as the primary func TestERSPromoteRdonly(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -256,7 +249,6 @@ func TestERSPromoteRdonly(t *testing.T) { // TestERSPreventCrossCellPromotion tests that we promote a replica in the same cell as the previous primary if prevent cross cell promotion flag is set func TestERSPreventCrossCellPromotion(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -279,7 +271,6 @@ func TestERSPreventCrossCellPromotion(t *testing.T) { // TestPullFromRdonly tests that if a rdonly tablet is the most advanced, then our promoted primary should have // caught up to it by pulling transactions from it func TestPullFromRdonly(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -351,7 +342,6 @@ func TestPullFromRdonly(t *testing.T) { // replicas which do not have any replication status and also succeeds if the io thread // is stopped on the primary elect. func TestNoReplicationStatusAndIOThreadStopped(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -451,7 +441,6 @@ func TestERSForInitialization(t *testing.T) { } func TestRecoverWithMultipleFailures(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -479,7 +468,6 @@ func TestRecoverWithMultipleFailures(t *testing.T) { // TestERSFailFast tests that ERS will fail fast if it cannot find any tablet which can be safely promoted instead of promoting // a tablet and hanging while inserting a row in the reparent journal on getting semi-sync ACKs func TestERSFailFast(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -519,7 +507,6 @@ func TestERSFailFast(t *testing.T) { // TestReplicationStopped checks that ERS ignores the tablets that have sql thread stopped. // If there are more than 1, we also fail. func TestReplicationStopped(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets diff --git a/go/test/endtoend/reparent/newfeaturetest/reparent_test.go b/go/test/endtoend/reparent/newfeaturetest/reparent_test.go index b6f34af7294..a041ca04c68 100644 --- a/go/test/endtoend/reparent/newfeaturetest/reparent_test.go +++ b/go/test/endtoend/reparent/newfeaturetest/reparent_test.go @@ -36,7 +36,6 @@ import ( // The test takes down the vttablets of the primary and a rdonly tablet and runs ERS with the // default values of remote_operation_timeout, lock-timeout flags and wait_replicas_timeout subflag. func TestRecoverWithMultipleVttabletFailures(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -68,7 +67,6 @@ func TestRecoverWithMultipleVttabletFailures(t *testing.T) { // and ERS succeeds. func TestSingleReplicaERS(t *testing.T) { // Set up a cluster with none durability policy - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "none") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -104,7 +102,6 @@ func TestSingleReplicaERS(t *testing.T) { // TestTabletRestart tests that a running tablet can be restarted and everything is still fine func TestTabletRestart(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -117,7 +114,6 @@ func TestTabletRestart(t *testing.T) { // Tests ensures that ChangeTabletType works even when semi-sync plugins are not loaded. func TestChangeTypeWithoutSemiSync(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "none") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -163,7 +159,6 @@ func TestChangeTypeWithoutSemiSync(t *testing.T) { // TestERSWithWriteInPromoteReplica tests that ERS doesn't fail even if there is a // write that happens when PromoteReplica is called. func TestERSWithWriteInPromoteReplica(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -181,7 +176,6 @@ func TestERSWithWriteInPromoteReplica(t *testing.T) { } func TestBufferingWithMultipleDisruptions(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupShardedReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) diff --git a/go/test/endtoend/reparent/plannedreparent/reparent_range_based_test.go b/go/test/endtoend/reparent/plannedreparent/reparent_range_based_test.go index 2d89893569d..91471b1cebb 100644 --- a/go/test/endtoend/reparent/plannedreparent/reparent_range_based_test.go +++ b/go/test/endtoend/reparent/plannedreparent/reparent_range_based_test.go @@ -28,7 +28,6 @@ import ( ) func TestReparentGracefulRangeBased(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() utils.ShardName = "0000000000000000-ffffffffffffffff" diff --git a/go/test/endtoend/reparent/plannedreparent/reparent_test.go b/go/test/endtoend/reparent/plannedreparent/reparent_test.go index d3907b0bc5b..94e37d715f4 100644 --- a/go/test/endtoend/reparent/plannedreparent/reparent_test.go +++ b/go/test/endtoend/reparent/plannedreparent/reparent_test.go @@ -36,7 +36,6 @@ import ( ) func TestPrimaryToSpareStateChangeImpossible(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -48,7 +47,6 @@ func TestPrimaryToSpareStateChangeImpossible(t *testing.T) { } func TestReparentCrossCell(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -62,7 +60,6 @@ func TestReparentCrossCell(t *testing.T) { } func TestReparentGraceful(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -85,7 +82,6 @@ func TestReparentGraceful(t *testing.T) { // TestPRSWithDrainedLaggingTablet tests that PRS succeeds even if we have a lagging drained tablet func TestPRSWithDrainedLaggingTablet(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -112,7 +108,6 @@ func TestPRSWithDrainedLaggingTablet(t *testing.T) { } func TestReparentReplicaOffline(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -130,7 +125,6 @@ func TestReparentReplicaOffline(t *testing.T) { } func TestReparentAvoid(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -178,14 +172,12 @@ func TestReparentAvoid(t *testing.T) { } func TestReparentFromOutside(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) reparentFromOutside(t, clusterInstance, false) } func TestReparentFromOutsideWithNoPrimary(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -285,7 +277,6 @@ func reparentFromOutside(t *testing.T, clusterInstance *cluster.LocalProcessClus } func TestReparentWithDownReplica(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -332,7 +323,6 @@ func TestReparentWithDownReplica(t *testing.T) { } func TestChangeTypeSemiSync(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -399,7 +389,6 @@ func TestChangeTypeSemiSync(t *testing.T) { // 1. When PRS is run with the cross_cell durability policy setup, then the semi-sync settings on all the tablets are as expected // 2. Bringing up a new vttablet should have its replication and semi-sync setup correctly without any manual intervention func TestCrossCellDurability(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "cross_cell") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -439,7 +428,6 @@ func TestCrossCellDurability(t *testing.T) { // TestFullStatus tests that the RPC FullStatus works as intended. func TestFullStatus(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets diff --git a/go/test/endtoend/reparent/prscomplex/main_test.go b/go/test/endtoend/reparent/prscomplex/main_test.go index 88e3d6c09fa..c2dafb8589f 100644 --- a/go/test/endtoend/reparent/prscomplex/main_test.go +++ b/go/test/endtoend/reparent/prscomplex/main_test.go @@ -44,7 +44,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/reparent/prssettingspool/main_test.go b/go/test/endtoend/reparent/prssettingspool/main_test.go index 4364836841b..c6b59fd6372 100644 --- a/go/test/endtoend/reparent/prssettingspool/main_test.go +++ b/go/test/endtoend/reparent/prssettingspool/main_test.go @@ -43,7 +43,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/reparent/semisync/semi_sync_test.go b/go/test/endtoend/reparent/semisync/semi_sync_test.go index 07cf4a7abc8..df9bf192e65 100644 --- a/go/test/endtoend/reparent/semisync/semi_sync_test.go +++ b/go/test/endtoend/reparent/semisync/semi_sync_test.go @@ -33,7 +33,6 @@ func TestSemiSyncUpgradeDowngrade(t *testing.T) { if ver != 21 { t.Skip("We only want to run this test for v21 release") } - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets diff --git a/go/test/endtoend/schemadiff/vrepl/schemadiff_vrepl_suite_test.go b/go/test/endtoend/schemadiff/vrepl/schemadiff_vrepl_suite_test.go index b4ecb367e8b..c850e22945c 100644 --- a/go/test/endtoend/schemadiff/vrepl/schemadiff_vrepl_suite_test.go +++ b/go/test/endtoend/schemadiff/vrepl/schemadiff_vrepl_suite_test.go @@ -68,7 +68,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { @@ -132,7 +131,6 @@ func TestMain(m *testing.M) { } func TestSchemadiffSchemaChanges(t *testing.T) { - defer cluster.PanicHandler(t) shards := clusterInstance.Keyspaces[0].Shards require.Equal(t, 1, len(shards)) @@ -275,7 +273,6 @@ func testSingle(t *testing.T, testName string) { } // func TestRandomSchemaChanges(t *testing.T) { -// defer cluster.PanicHandler(t) // hints := &schemadiff.DiffHints{AutoIncrementStrategy: schemadiff.AutoIncrementIgnore} // // count := 20 diff --git a/go/test/endtoend/sharded/sharded_keyspace_test.go b/go/test/endtoend/sharded/sharded_keyspace_test.go index 192355fa6ef..3e5f2b3add7 100644 --- a/go/test/endtoend/sharded/sharded_keyspace_test.go +++ b/go/test/endtoend/sharded/sharded_keyspace_test.go @@ -73,7 +73,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { @@ -102,7 +101,6 @@ func TestMain(m *testing.M) { } func TestShardedKeyspace(t *testing.T) { - defer cluster.PanicHandler(t) shard1 := clusterInstance.Keyspaces[0].Shards[0] shard2 := clusterInstance.Keyspaces[0].Shards[1] diff --git a/go/test/endtoend/stress/stress_test.go b/go/test/endtoend/stress/stress_test.go index 30a5ee69c1a..1bf716274d4 100644 --- a/go/test/endtoend/stress/stress_test.go +++ b/go/test/endtoend/stress/stress_test.go @@ -42,7 +42,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -84,7 +83,6 @@ func TestMain(m *testing.M) { // The stressor is started on its own goroutine while the end-to-end test // is executed on the same cluster. func TestSimpleStressTest(t *testing.T) { - defer cluster.PanicHandler(t) cfg := stress.DefaultConfig cfg.ConnParams = &vtParams diff --git a/go/test/endtoend/tabletgateway/buffer/buffer_test_helpers.go b/go/test/endtoend/tabletgateway/buffer/buffer_test_helpers.go index ca4fe5f6094..920e2193453 100644 --- a/go/test/endtoend/tabletgateway/buffer/buffer_test_helpers.go +++ b/go/test/endtoend/tabletgateway/buffer/buffer_test_helpers.go @@ -272,7 +272,6 @@ type BufferingTest struct { } func (bt *BufferingTest) Test(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance, exitCode := bt.createCluster() if exitCode != 0 { t.Fatal("failed to start cluster") diff --git a/go/test/endtoend/tabletgateway/main_test.go b/go/test/endtoend/tabletgateway/main_test.go index 354be6969d3..cf179c49845 100644 --- a/go/test/endtoend/tabletgateway/main_test.go +++ b/go/test/endtoend/tabletgateway/main_test.go @@ -61,7 +61,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/tabletgateway/vtgate_test.go b/go/test/endtoend/tabletgateway/vtgate_test.go index 1f4f8758e16..d9c87fdc7f3 100644 --- a/go/test/endtoend/tabletgateway/vtgate_test.go +++ b/go/test/endtoend/tabletgateway/vtgate_test.go @@ -40,7 +40,6 @@ import ( ) func TestVtgateHealthCheck(t *testing.T) { - defer cluster.PanicHandler(t) // Healthcheck interval on tablet is set to 1s, so sleep for 2s time.Sleep(2 * time.Second) verifyVtgateVariables(t, clusterInstance.VtgateProcess.VerifyURL) @@ -54,7 +53,6 @@ func TestVtgateHealthCheck(t *testing.T) { } func TestVtgateReplicationStatusCheck(t *testing.T) { - defer cluster.PanicHandler(t) // Healthcheck interval on tablet is set to 1s, so sleep for 2s time.Sleep(2 * time.Second) verifyVtgateVariables(t, clusterInstance.VtgateProcess.VerifyURL) @@ -104,7 +102,6 @@ func TestVtgateReplicationStatusCheck(t *testing.T) { } func TestVtgateReplicationStatusCheckWithTabletTypeChange(t *testing.T) { - defer cluster.PanicHandler(t) // Healthcheck interval on tablet is set to 1s, so sleep for 2s time.Sleep(2 * time.Second) verifyVtgateVariables(t, clusterInstance.VtgateProcess.VerifyURL) @@ -180,7 +177,6 @@ func retryNTimes(t *testing.T, maxRetries int, f func() bool) { func TestReplicaTransactions(t *testing.T) { // TODO(deepthi): this test seems to depend on previous test. Fix tearDown so that tests are independent - defer cluster.PanicHandler(t) // Healthcheck interval on tablet is set to 1s, so sleep for 2s time.Sleep(2 * time.Second) ctx := context.Background() @@ -287,7 +283,6 @@ func TestReplicaTransactions(t *testing.T) { // TestStreamingRPCStuck tests that StreamExecute calls don't get stuck on the vttablets if a client stop reading from a stream. func TestStreamingRPCStuck(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtConn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) diff --git a/go/test/endtoend/tabletmanager/commands_test.go b/go/test/endtoend/tabletmanager/commands_test.go index d5d946a164c..67127ab740f 100644 --- a/go/test/endtoend/tabletmanager/commands_test.go +++ b/go/test/endtoend/tabletmanager/commands_test.go @@ -29,7 +29,6 @@ import ( "vitess.io/vitess/go/json2" "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" @@ -44,7 +43,6 @@ var ( // TabletCommands tests the basic tablet commands func TestTabletCommands(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &primaryTabletParams) @@ -185,7 +183,6 @@ func assertExecuteMultiFetch(t *testing.T, qr string) { func TestHook(t *testing.T) { // test a regular program works - defer cluster.PanicHandler(t) runHookAndAssert(t, []string{ "ExecuteHook", primaryTablet.Alias, "test.sh", "--", "--flag1", "--param1=hello"}, 0, false, "") @@ -226,7 +223,6 @@ func runHookAndAssert(t *testing.T, params []string, expectedStatus int64, expec func TestShardReplicationFix(t *testing.T) { // make sure the replica is in the replication graph, 2 nodes: 1 primary, 1 replica - defer cluster.PanicHandler(t) result, err := clusterInstance.VtctldClientProcess.GetShardReplication(keyspaceName, shardName, cell) require.Nil(t, err, "error should be Nil") require.NotNil(t, result[cell], "result should not be Nil") @@ -250,7 +246,6 @@ func TestShardReplicationFix(t *testing.T) { } func TestGetSchema(t *testing.T) { - defer cluster.PanicHandler(t) res, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("GetSchema", "--include-views", "--tables", "t1,v1", diff --git a/go/test/endtoend/tabletmanager/custom_rule_topo_test.go b/go/test/endtoend/tabletmanager/custom_rule_topo_test.go index 0c6e056af36..e692bf94de4 100644 --- a/go/test/endtoend/tabletmanager/custom_rule_topo_test.go +++ b/go/test/endtoend/tabletmanager/custom_rule_topo_test.go @@ -33,7 +33,6 @@ import ( func TestTopoCustomRule(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &primaryTabletParams) require.NoError(t, err) diff --git a/go/test/endtoend/tabletmanager/lock_unlock_test.go b/go/test/endtoend/tabletmanager/lock_unlock_test.go index 79286438698..f636f52c353 100644 --- a/go/test/endtoend/tabletmanager/lock_unlock_test.go +++ b/go/test/endtoend/tabletmanager/lock_unlock_test.go @@ -30,12 +30,10 @@ import ( "github.com/stretchr/testify/assert" "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/test/endtoend/cluster" ) // TestLockAndUnlock tests the lock ability by locking a replica and asserting it does not see changes func TestLockAndUnlock(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &primaryTabletParams) @@ -76,7 +74,6 @@ func TestLockAndUnlock(t *testing.T) { // TestStartReplicationUntilAfter tests by writing three rows, noting the gtid after each, and then replaying them one by one func TestStartReplicationUntilAfter(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &primaryTabletParams) @@ -130,7 +127,6 @@ func TestStartReplicationUntilAfter(t *testing.T) { // TestLockAndTimeout tests that the lock times out and updates can be seen after timeout func TestLockAndTimeout(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() primaryConn, err := mysql.Connect(ctx, &primaryTabletParams) diff --git a/go/test/endtoend/tabletmanager/main_test.go b/go/test/endtoend/tabletmanager/main_test.go index 3c2eb68df5e..b613f061522 100644 --- a/go/test/endtoend/tabletmanager/main_test.go +++ b/go/test/endtoend/tabletmanager/main_test.go @@ -79,7 +79,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -105,7 +104,6 @@ func TestMain(m *testing.M) { "--heartbeat_enable", "--health_check_interval", tabletHealthcheckRefreshInterval.String(), "--unhealthy_threshold", tabletUnhealthyThreshold.String(), - "--twopc_enable", "--twopc_abandon_age", "200", } diff --git a/go/test/endtoend/tabletmanager/primary/tablet_test.go b/go/test/endtoend/tabletmanager/primary/tablet_test.go index 297e5540fac..aaff0ff00a0 100644 --- a/go/test/endtoend/tabletmanager/primary/tablet_test.go +++ b/go/test/endtoend/tabletmanager/primary/tablet_test.go @@ -69,7 +69,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -116,7 +115,6 @@ func TestMain(m *testing.M) { } func TestRepeatedInitShardPrimary(t *testing.T) { - defer cluster.PanicHandler(t) // Test that using InitShardPrimary can go back and forth between 2 hosts. // Make replica tablet as primary @@ -155,7 +153,6 @@ func TestRepeatedInitShardPrimary(t *testing.T) { } func TestPrimaryRestartSetsPTSTimestamp(t *testing.T) { - defer cluster.PanicHandler(t) // Test that PTS timestamp is set when we restart the PRIMARY vttablet. // PTS = PrimaryTermStart. // See StreamHealthResponse.primary_term_start_timestamp for details. diff --git a/go/test/endtoend/tabletmanager/qps_test.go b/go/test/endtoend/tabletmanager/qps_test.go index 0611feada12..0ce41f04a63 100644 --- a/go/test/endtoend/tabletmanager/qps_test.go +++ b/go/test/endtoend/tabletmanager/qps_test.go @@ -24,12 +24,10 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" ) func TestQPS(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ diff --git a/go/test/endtoend/tabletmanager/replication_manager/tablet_test.go b/go/test/endtoend/tabletmanager/replication_manager/tablet_test.go index df8c1f26c4e..75c6e8d4cc8 100644 --- a/go/test/endtoend/tabletmanager/replication_manager/tablet_test.go +++ b/go/test/endtoend/tabletmanager/replication_manager/tablet_test.go @@ -73,7 +73,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/tabletmanager/tablegc/tablegc_test.go b/go/test/endtoend/tabletmanager/tablegc/tablegc_test.go index 685c361cef7..b1abec3a6b7 100644 --- a/go/test/endtoend/tabletmanager/tablegc/tablegc_test.go +++ b/go/test/endtoend/tabletmanager/tablegc/tablegc_test.go @@ -83,7 +83,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/tabletmanager/tablet_health_test.go b/go/test/endtoend/tabletmanager/tablet_health_test.go index bf3747fde29..061528682d2 100644 --- a/go/test/endtoend/tabletmanager/tablet_health_test.go +++ b/go/test/endtoend/tabletmanager/tablet_health_test.go @@ -39,7 +39,6 @@ import ( // TabletReshuffle test if a vttablet can be pointed at an existing mysql func TestTabletReshuffle(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &primaryTabletParams) @@ -92,7 +91,6 @@ func TestTabletReshuffle(t *testing.T) { func TestHealthCheck(t *testing.T) { // Add one replica that starts not initialized - defer cluster.PanicHandler(t) ctx := context.Background() clusterInstance.DisableVTOrcRecoveries(t) defer clusterInstance.EnableVTOrcRecoveries(t) @@ -200,7 +198,6 @@ func TestHealthCheck(t *testing.T) { // TestHealthCheckSchemaChangeSignal tests the tables and views, which report their schemas have changed in the output of a StreamHealth. func TestHealthCheckSchemaChangeSignal(t *testing.T) { // Add one replica that starts not initialized - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := clusterInstance.GetVTParams(keyspaceName) @@ -381,7 +378,6 @@ func TestHealthCheckDrainedStateDoesNotShutdownQueryService(t *testing.T) { // - the query service won't be shutdown // Wait if tablet is not in service state - defer cluster.PanicHandler(t) clusterInstance.DisableVTOrcRecoveries(t) defer clusterInstance.EnableVTOrcRecoveries(t) err := rdonlyTablet.VttabletProcess.WaitForTabletStatus("SERVING") diff --git a/go/test/endtoend/tabletmanager/tablet_security_policy_test.go b/go/test/endtoend/tabletmanager/tablet_security_policy_test.go index b3b11405abb..90397a737ef 100644 --- a/go/test/endtoend/tabletmanager/tablet_security_policy_test.go +++ b/go/test/endtoend/tabletmanager/tablet_security_policy_test.go @@ -29,7 +29,6 @@ import ( ) func TestFallbackSecurityPolicy(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() mTablet := clusterInstance.NewVttabletInstance("replica", 0, "") @@ -84,7 +83,6 @@ func assertAllowedURLTest(t *testing.T, url string) { } func TestDenyAllSecurityPolicy(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() mTablet := clusterInstance.NewVttabletInstance("replica", 0, "") @@ -116,7 +114,6 @@ func TestDenyAllSecurityPolicy(t *testing.T) { } func TestReadOnlySecurityPolicy(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() mTablet := clusterInstance.NewVttabletInstance("replica", 0, "") diff --git a/go/test/endtoend/tabletmanager/tablet_test.go b/go/test/endtoend/tabletmanager/tablet_test.go index 1d8e897a4d2..2ded055230f 100644 --- a/go/test/endtoend/tabletmanager/tablet_test.go +++ b/go/test/endtoend/tabletmanager/tablet_test.go @@ -31,7 +31,6 @@ import ( // TestEnsureDB tests that vttablet creates the db as needed func TestEnsureDB(t *testing.T) { - defer cluster.PanicHandler(t) // Create new tablet tablet := clusterInstance.NewVttabletInstance("replica", 0, "") @@ -67,7 +66,6 @@ func TestEnsureDB(t *testing.T) { // TestResetReplicationParameters tests that the RPC ResetReplicationParameters works as intended. func TestResetReplicationParameters(t *testing.T) { - defer cluster.PanicHandler(t) // Create new tablet tablet := clusterInstance.NewVttabletInstance("replica", 0, "") diff --git a/go/test/endtoend/tabletmanager/throttler_topo/throttler_test.go b/go/test/endtoend/tabletmanager/throttler_topo/throttler_test.go index d5a1053d77d..226238a46c6 100644 --- a/go/test/endtoend/tabletmanager/throttler_topo/throttler_test.go +++ b/go/test/endtoend/tabletmanager/throttler_topo/throttler_test.go @@ -100,7 +100,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -268,7 +267,6 @@ func vtgateExec(t *testing.T, query string, expectError string) *sqltypes.Result } func TestInitialThrottler(t *testing.T) { - defer cluster.PanicHandler(t) t.Run("validating OK response from disabled throttler", func(t *testing.T) { waitForThrottleCheckStatus(t, primaryTablet, tabletmanagerdatapb.CheckThrottlerResponseCode_OK) @@ -425,7 +423,6 @@ func TestInitialThrottler(t *testing.T) { } func TestThrottleViaApplySchema(t *testing.T) { - defer cluster.PanicHandler(t) t.Run("throttling via ApplySchema", func(t *testing.T) { vtctlParams := &cluster.ApplySchemaParams{DDLStrategy: "online"} _, err := clusterInstance.VtctldClientProcess.ApplySchemaWithOutput( @@ -463,12 +460,11 @@ func TestThrottleViaApplySchema(t *testing.T) { require.NotNil(t, keyspace.Keyspace.ThrottlerConfig.ThrottledApps) // ThrottledApps will actually be empty at this point, but more specifically we want to see that "online-ddl" is not there. appRule, ok := keyspace.Keyspace.ThrottlerConfig.ThrottledApps[throttlerapp.OnlineDDLName.String()] - assert.True(t, ok, "app rule: %v", appRule) + assert.False(t, ok, "app rule: %v", appRule) }) } func TestThrottlerAfterMetricsCollected(t *testing.T) { - defer cluster.PanicHandler(t) // By this time metrics will have been collected. We expect no lag, and something like: // {"StatusCode":200,"Value":0.282278,"Threshold":1,"Message":""} @@ -497,7 +493,6 @@ func TestThrottlerAfterMetricsCollected(t *testing.T) { } func TestLag(t *testing.T) { - defer cluster.PanicHandler(t) // Temporarily disable VTOrc recoveries because we want to // STOP replication specifically in order to increase the // lag and we DO NOT want VTOrc to try and fix this. @@ -636,7 +631,6 @@ func TestLag(t *testing.T) { } func TestNoReplicas(t *testing.T) { - defer cluster.PanicHandler(t) t.Run("changing replica to RDONLY", func(t *testing.T) { err := clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", replicaTablet.Alias, "RDONLY") assert.NoError(t, err) @@ -654,7 +648,6 @@ func TestNoReplicas(t *testing.T) { } func TestCustomQuery(t *testing.T) { - defer cluster.PanicHandler(t) t.Run("enabling throttler with custom query and threshold", func(t *testing.T) { req := &vtctldatapb.UpdateThrottlerConfigRequest{Enable: true, Threshold: customThreshold, CustomQuery: customQuery} @@ -722,7 +715,6 @@ func TestCustomQuery(t *testing.T) { } func TestRestoreDefaultQuery(t *testing.T) { - defer cluster.PanicHandler(t) // Validate going back from custom-query to default-query (replication lag) still works. t.Run("enabling throttler with default query and threshold", func(t *testing.T) { diff --git a/go/test/endtoend/topoconncache/main_test.go b/go/test/endtoend/topoconncache/main_test.go index 26eb3918a0b..074bf875165 100644 --- a/go/test/endtoend/topoconncache/main_test.go +++ b/go/test/endtoend/topoconncache/main_test.go @@ -97,7 +97,6 @@ Topology: We create a keyspace with two shards , having 3 tablets each. Primarie to 'zone1' and replicas/rdonly belongs to cell2. */ func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { diff --git a/go/test/endtoend/topoconncache/topo_conn_cache_test.go b/go/test/endtoend/topoconncache/topo_conn_cache_test.go index 082ecc5717f..f676af318cd 100644 --- a/go/test/endtoend/topoconncache/topo_conn_cache_test.go +++ b/go/test/endtoend/topoconncache/topo_conn_cache_test.go @@ -37,7 +37,6 @@ import ( 4. 'ListAllTablets' should return all the new tablets. */ func TestVtctldListAllTablets(t *testing.T) { - defer cluster.PanicHandler(t) url := fmt.Sprintf("http://%s:%d/api/keyspaces/", clusterInstance.Hostname, clusterInstance.VtctldHTTPPort) testURL(t, url, "keyspace url") diff --git a/go/test/endtoend/topotest/consul/main_test.go b/go/test/endtoend/topotest/consul/main_test.go index 0f6fa6ce554..c6d48f44930 100644 --- a/go/test/endtoend/topotest/consul/main_test.go +++ b/go/test/endtoend/topotest/consul/main_test.go @@ -63,7 +63,6 @@ CREATE TABLE t1 ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -99,7 +98,6 @@ func TestMain(m *testing.M) { } func TestTopoRestart(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", diff --git a/go/test/endtoend/topotest/etcd2/main_test.go b/go/test/endtoend/topotest/etcd2/main_test.go index 67b0dbbc8f7..ee2b542109b 100644 --- a/go/test/endtoend/topotest/etcd2/main_test.go +++ b/go/test/endtoend/topotest/etcd2/main_test.go @@ -64,7 +64,6 @@ CREATE TABLE t1 ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -99,7 +98,6 @@ func TestMain(m *testing.M) { } func TestTopoDownServingQuery(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", diff --git a/go/test/endtoend/topotest/zk2/main_test.go b/go/test/endtoend/topotest/zk2/main_test.go index 48636331747..c6569519a3d 100644 --- a/go/test/endtoend/topotest/zk2/main_test.go +++ b/go/test/endtoend/topotest/zk2/main_test.go @@ -63,7 +63,6 @@ CREATE TABLE t1 ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -99,7 +98,6 @@ func TestMain(m *testing.M) { } func TestTopoDownServingQuery(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", diff --git a/go/test/endtoend/transaction/restart/main_test.go b/go/test/endtoend/transaction/restart/main_test.go index 01185b5fa59..caa3111ad49 100644 --- a/go/test/endtoend/transaction/restart/main_test.go +++ b/go/test/endtoend/transaction/restart/main_test.go @@ -41,7 +41,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/transaction/rollback/txn_rollback_shutdown_test.go b/go/test/endtoend/transaction/rollback/txn_rollback_shutdown_test.go index 2dff9f7b95f..c7bef098c05 100644 --- a/go/test/endtoend/transaction/rollback/txn_rollback_shutdown_test.go +++ b/go/test/endtoend/transaction/rollback/txn_rollback_shutdown_test.go @@ -48,7 +48,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -87,7 +86,6 @@ func TestMain(m *testing.M) { } func TestTransactionRollBackWhenShutDown(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) @@ -122,7 +120,6 @@ func TestTransactionRollBackWhenShutDown(t *testing.T) { } func TestErrorInAutocommitSession(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) diff --git a/go/test/endtoend/transaction/single/main_test.go b/go/test/endtoend/transaction/single/main_test.go index ec2dbd6378a..1eab3cea276 100644 --- a/go/test/endtoend/transaction/single/main_test.go +++ b/go/test/endtoend/transaction/single/main_test.go @@ -46,7 +46,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/transaction/twopc/fuzz/main_test.go b/go/test/endtoend/transaction/twopc/fuzz/main_test.go index 15574c8d072..4d168fbdde0 100644 --- a/go/test/endtoend/transaction/twopc/fuzz/main_test.go +++ b/go/test/endtoend/transaction/twopc/fuzz/main_test.go @@ -48,7 +48,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode := func() int { @@ -70,7 +69,6 @@ func TestMain(m *testing.M) { "--tablet_refresh_interval", "2s", ) clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, - "--twopc_enable", "--twopc_abandon_age", "1", "--migration_check_interval", "2s", ) @@ -122,7 +120,6 @@ func start(t *testing.T) (*mysql.Conn, func()) { } func cleanup(t *testing.T) { - cluster.PanicHandler(t) utils.ClearOutTable(t, vtParams, "twopc_fuzzer_insert") utils.ClearOutTable(t, vtParams, "twopc_fuzzer_update") diff --git a/go/test/endtoend/transaction/twopc/main_test.go b/go/test/endtoend/transaction/twopc/main_test.go index ad25b3ea116..58fe45547a5 100644 --- a/go/test/endtoend/transaction/twopc/main_test.go +++ b/go/test/endtoend/transaction/twopc/main_test.go @@ -61,7 +61,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode := func() int { @@ -82,7 +81,6 @@ func TestMain(m *testing.M) { "--grpc_use_effective_callerid", ) clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, - "--twopc_enable", "--twopc_abandon_age", "1", "--queryserver-config-transaction-cap", "3", "--queryserver-config-transaction-timeout", "400s", @@ -140,7 +138,6 @@ func start(t *testing.T) (*mysql.Conn, func()) { } func cleanup(t *testing.T) { - cluster.PanicHandler(t) twopcutil.ClearOutTable(t, vtParams, "twopc_user") twopcutil.ClearOutTable(t, vtParams, "twopc_t1") twopcutil.ClearOutTable(t, vtParams, "twopc_lookup") @@ -168,7 +165,6 @@ func startWithMySQL(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/transaction/twopc/metric/main_test.go b/go/test/endtoend/transaction/twopc/metric/main_test.go index 73cc380a900..61a43017ef9 100644 --- a/go/test/endtoend/transaction/twopc/metric/main_test.go +++ b/go/test/endtoend/transaction/twopc/metric/main_test.go @@ -48,7 +48,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode := func() int { @@ -69,7 +68,6 @@ func TestMain(m *testing.M) { "--grpc_use_effective_callerid", ) clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, - "--twopc_enable", "--twopc_abandon_age", "1", "--queryserver-config-transaction-cap", "100", ) @@ -111,7 +109,6 @@ func start(t *testing.T) (*mysql.Conn, func()) { } func cleanup(t *testing.T) { - cluster.PanicHandler(t) twopcutil.ClearOutTable(t, vtParams, "twopc_user") twopcutil.ClearOutTable(t, vtParams, "twopc_t1") } diff --git a/go/test/endtoend/transaction/twopc/stress/main_test.go b/go/test/endtoend/transaction/twopc/stress/main_test.go index 76cd05df50a..4da4f86bdff 100644 --- a/go/test/endtoend/transaction/twopc/stress/main_test.go +++ b/go/test/endtoend/transaction/twopc/stress/main_test.go @@ -48,7 +48,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode := func() int { @@ -70,7 +69,6 @@ func TestMain(m *testing.M) { "--tablet_refresh_interval", "2s", ) clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, - "--twopc_enable", "--twopc_abandon_age", "1", "--migration_check_interval", "2s", "--onterm_timeout", "1s", @@ -124,7 +122,6 @@ func start(t *testing.T) (*mysql.Conn, func()) { } func cleanup(t *testing.T) { - cluster.PanicHandler(t) utils.ClearOutTable(t, vtParams, "twopc_t1") utils.ClearOutTable(t, vtParams, "twopc_settings") } diff --git a/go/test/endtoend/transaction/twopc/twopc_test.go b/go/test/endtoend/transaction/twopc/twopc_test.go index 1481f679af3..aca3aa306de 100644 --- a/go/test/endtoend/transaction/twopc/twopc_test.go +++ b/go/test/endtoend/transaction/twopc/twopc_test.go @@ -1349,24 +1349,15 @@ func TestSemiSyncRequiredWithTwoPC(t *testing.T) { // cleanup all the old data. conn, closer := start(t) defer closer() - - out, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("SetKeyspaceDurabilityPolicy", keyspaceName, "--durability-policy=none") - require.NoError(t, err, out) defer func() { - clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("SetKeyspaceDurabilityPolicy", keyspaceName, "--durability-policy=semi_sync") - for _, shard := range clusterInstance.Keyspaces[0].Shards { - clusterInstance.VtctldClientProcess.PlannedReparentShard(keyspaceName, shard.Name, shard.Vttablets[0].Alias) - } + reparentAllShards(t, clusterInstance, 0) }() - // After changing the durability policy for the given keyspace to none, we run PRS. - shard := clusterInstance.Keyspaces[0].Shards[2] - newPrimary := shard.Vttablets[1] - _, err = clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput( - "PlannedReparentShard", - fmt.Sprintf("%s/%s", keyspaceName, shard.Name), - "--new-primary", newPrimary.Alias) - require.NoError(t, err) + reparentAllShards(t, clusterInstance, 0) + out, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("SetKeyspaceDurabilityPolicy", keyspaceName, "--durability-policy=none") + require.NoError(t, err, out) + // After changing the durability policy for the given keyspace to none, we run PRS to ensure the changes have taken effect. + reparentAllShards(t, clusterInstance, 1) // A new distributed transaction should fail. utils.Exec(t, conn, "begin") @@ -1376,6 +1367,26 @@ func TestSemiSyncRequiredWithTwoPC(t *testing.T) { _, err = utils.ExecAllowError(t, conn, "commit") require.Error(t, err) require.ErrorContains(t, err, "two-pc is enabled, but semi-sync is not") + + _, err = clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("SetKeyspaceDurabilityPolicy", keyspaceName, "--durability-policy=semi_sync") + require.NoError(t, err) + reparentAllShards(t, clusterInstance, 0) + + // Transaction should now succeed. + utils.Exec(t, conn, "begin") + utils.Exec(t, conn, "insert into twopc_t1(id, col) values(4, 4)") + utils.Exec(t, conn, "insert into twopc_t1(id, col) values(6, 4)") + utils.Exec(t, conn, "insert into twopc_t1(id, col) values(9, 4)") + _, err = utils.ExecAllowError(t, conn, "commit") + require.NoError(t, err) +} + +// reparentAllShards reparents all the shards to the given tablet index for that shard. +func reparentAllShards(t *testing.T, clusterInstance *cluster.LocalProcessCluster, idx int) { + for _, shard := range clusterInstance.Keyspaces[0].Shards { + err := clusterInstance.VtctldClientProcess.PlannedReparentShard(keyspaceName, shard.Name, shard.Vttablets[idx].Alias) + require.NoError(t, err) + } } // TestReadTransactionStatus tests that read transaction state rpc works as expected. diff --git a/go/test/endtoend/transaction/tx_test.go b/go/test/endtoend/transaction/tx_test.go index 753dcfb46bd..89531952b13 100644 --- a/go/test/endtoend/transaction/tx_test.go +++ b/go/test/endtoend/transaction/tx_test.go @@ -46,7 +46,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { @@ -57,7 +56,6 @@ func TestMain(m *testing.M) { clusterInstance.VtgateGrpcPort = clusterInstance.GetAndReservePort() // Set extra tablet args for twopc clusterInstance.VtTabletExtraArgs = []string{ - "--twopc_enable", "--twopc_abandon_age", "3600", } @@ -95,7 +93,6 @@ func TestMain(m *testing.M) { // TestTransactionModes tests transactions using twopc mode func TestTransactionModes(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) @@ -141,7 +138,6 @@ func TestTransactionModes(t *testing.T) { // TestTransactionIsolation tests transaction isolation level. func TestTransactionIsolation(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) @@ -248,6 +244,5 @@ func start(t *testing.T) func() { return func() { deleteAll() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/utils/cmp.go b/go/test/endtoend/utils/cmp.go index 3b47e1f68dc..7d94c181abd 100644 --- a/go/test/endtoend/utils/cmp.go +++ b/go/test/endtoend/utils/cmp.go @@ -215,6 +215,18 @@ func (mcmp *MySQLCompare) Exec(query string) *sqltypes.Result { return vtQr } +// ExecAssert is the same as Exec, but it only does assertions, it won't FailNow +func (mcmp *MySQLCompare) ExecAssert(query string) *sqltypes.Result { + mcmp.t.Helper() + vtQr, err := mcmp.VtConn.ExecuteFetch(query, 1000, true) + assert.NoError(mcmp.t, err, "[Vitess Error] for query: "+query) + + mysqlQr, err := mcmp.MySQLConn.ExecuteFetch(query, 1000, true) + assert.NoError(mcmp.t, err, "[MySQL Error] for query: "+query) + compareVitessAndMySQLResults(mcmp.t, query, mcmp.VtConn, vtQr, mysqlQr, CompareOptions{}) + return vtQr +} + // ExecNoCompare executes the query on vitess and mysql but does not compare the result with each other. func (mcmp *MySQLCompare) ExecNoCompare(query string) (*sqltypes.Result, *sqltypes.Result) { mcmp.t.Helper() diff --git a/go/test/endtoend/utils/mysql_test.go b/go/test/endtoend/utils/mysql_test.go index 41b74583f69..4d3d992e879 100644 --- a/go/test/endtoend/utils/mysql_test.go +++ b/go/test/endtoend/utils/mysql_test.go @@ -50,7 +50,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) exitCode := func() int { clusterInstance = cluster.NewCluster(cell, "localhost") diff --git a/go/test/endtoend/utils/mysqlvsvitess/main_test.go b/go/test/endtoend/utils/mysqlvsvitess/main_test.go index 8f162fae41d..f064afb895d 100644 --- a/go/test/endtoend/utils/mysqlvsvitess/main_test.go +++ b/go/test/endtoend/utils/mysqlvsvitess/main_test.go @@ -64,7 +64,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) exitCode := func() int { clusterInstance = cluster.NewCluster(cell, "localhost") diff --git a/go/test/endtoend/utils/utils.go b/go/test/endtoend/utils/utils.go index 35404981164..baa82821306 100644 --- a/go/test/endtoend/utils/utils.go +++ b/go/test/endtoend/utils/utils.go @@ -32,6 +32,7 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/vt/vtgate/engine" ) // AssertContains ensures the given query result contains the expected results. @@ -160,6 +161,19 @@ func Exec(t testing.TB, conn *mysql.Conn, query string) *sqltypes.Result { return qr } +// ExecTrace executes the given query with trace using the given connection. The trace result is returned. +// The test fails if the query produces an error. +func ExecTrace(t testing.TB, conn *mysql.Conn, query string) engine.PrimitiveDescription { + t.Helper() + qr, err := conn.ExecuteFetch(fmt.Sprintf("vexplain trace %s", query), 10000, false) + require.NoError(t, err, "for query: "+query) + + // Extract the trace result and format it with indentation for pretty printing + pd, err := engine.PrimitiveDescriptionFromString(qr.Rows[0][0].ToString()) + require.NoError(t, err) + return pd +} + // ExecMulti executes the given (potential multi) queries using the given connection. // The test fails if any of the queries produces an error func ExecMulti(t testing.TB, conn *mysql.Conn, query string) error { diff --git a/go/test/endtoend/vault/vault_test.go b/go/test/endtoend/vault/vault_test.go index f8e19c07a0c..aab68159ca3 100644 --- a/go/test/endtoend/vault/vault_test.go +++ b/go/test/endtoend/vault/vault_test.go @@ -99,7 +99,6 @@ var ( ) func TestVaultAuth(t *testing.T) { - defer cluster.PanicHandler(nil) // Instantiate Vitess Cluster objects and start topo initializeClusterEarly(t) diff --git a/go/test/endtoend/versionupgrade/upgrade_test.go b/go/test/endtoend/versionupgrade/upgrade_test.go index 181b5dfc9ad..48e552c3a7c 100644 --- a/go/test/endtoend/versionupgrade/upgrade_test.go +++ b/go/test/endtoend/versionupgrade/upgrade_test.go @@ -72,7 +72,6 @@ var ( // TestMain is the main entry point func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { @@ -131,12 +130,10 @@ func TestMain(m *testing.M) { } func TestShards(t *testing.T) { - defer cluster.PanicHandler(t) assert.Equal(t, 2, len(clusterInstance.Keyspaces[0].Shards)) } func TestDeploySchema(t *testing.T) { - defer cluster.PanicHandler(t) if clusterInstance.ReusingVTDATAROOT { // we assume data is already deployed @@ -163,7 +160,6 @@ func TestDeploySchema(t *testing.T) { } func TestTablesExist(t *testing.T) { - defer cluster.PanicHandler(t) checkTables(t, "", totalTableCount) } diff --git a/go/test/endtoend/vreplication/lookupindex_helper_test.go b/go/test/endtoend/vreplication/lookupindex_helper_test.go new file mode 100644 index 00000000000..864a5e0f7fc --- /dev/null +++ b/go/test/endtoend/vreplication/lookupindex_helper_test.go @@ -0,0 +1,113 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vreplication + +import ( + "fmt" + "strings" + "testing" + + "github.com/stretchr/testify/require" + "github.com/tidwall/gjson" + + "vitess.io/vitess/go/vt/sqlparser" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" +) + +type lookupIndex struct { + typ string + name string + tableKeyspace string + table string + columns []string + ownerTable string + ownerTableKeyspace string + ignoreNulls bool + + t *testing.T +} + +func (li *lookupIndex) String() string { + return li.typ + " " + li.name + " on " + li.tableKeyspace + "." + li.table + " (" + li.columns[0] + ")" +} + +func (li *lookupIndex) create() { + cols := strings.Join(li.columns, ",") + args := []string{ + "LookupVindex", + "--name", li.name, + "--table-keyspace=" + li.ownerTableKeyspace, + "create", + "--keyspace=" + li.tableKeyspace, + "--type=" + li.typ, + "--table-owner=" + li.ownerTable, + "--table-owner-columns=" + cols, + "--tablet-types=PRIMARY", + } + if li.ignoreNulls { + args = append(args, "--ignore-nulls") + } + + err := vc.VtctldClient.ExecuteCommand(args...) + require.NoError(li.t, err, "error executing LookupVindex create: %v", err) + waitForWorkflowState(li.t, vc, fmt.Sprintf("%s.%s", li.ownerTableKeyspace, li.name), binlogdatapb.VReplicationWorkflowState_Running.String()) + li.expectWriteOnly(true) +} + +func (li *lookupIndex) cancel() { + panic("not implemented") +} + +func (li *lookupIndex) externalize() { + args := []string{ + "LookupVindex", + "--name", li.name, + "--table-keyspace=" + li.ownerTableKeyspace, + "externalize", + "--keyspace=" + li.tableKeyspace, + } + err := vc.VtctldClient.ExecuteCommand(args...) + require.NoError(li.t, err, "error executing LookupVindex externalize: %v", err) + li.expectWriteOnly(false) +} + +func (li *lookupIndex) show() error { + return nil +} + +func (li *lookupIndex) expectWriteOnly(expected bool) { + vschema, err := vc.VtctldClient.ExecuteCommandWithOutput("GetVSchema", li.ownerTableKeyspace) + require.NoError(li.t, err, "error executing GetVSchema: %v", err) + vdx := gjson.Get(vschema, fmt.Sprintf("vindexes.%s", li.name)) + require.NotNil(li.t, vdx, "lookup vindex %s not found", li.name) + want := "" + if expected { + want = "true" + } + require.Equal(li.t, want, vdx.Get("params.write_only").String(), "expected write_only parameter to be %s", want) +} + +func getNumRowsInQuery(t *testing.T, query string) int { + stmt, err := sqlparser.NewTestParser().Parse(query) + require.NoError(t, err) + insertStmt, ok := stmt.(*sqlparser.Insert) + require.True(t, ok) + rows, ok := insertStmt.Rows.(sqlparser.Values) + require.True(t, ok) + return len(rows) +} diff --git a/go/test/endtoend/vreplication/lookupindex_test.go b/go/test/endtoend/vreplication/lookupindex_test.go new file mode 100644 index 00000000000..348a0ee5906 --- /dev/null +++ b/go/test/endtoend/vreplication/lookupindex_test.go @@ -0,0 +1,204 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vreplication + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/test/endtoend/cluster" + vttablet "vitess.io/vitess/go/vt/vttablet/common" +) + +type TestClusterSpec struct { + keyspaceName string + vschema string + schema string +} + +var lookupClusterSpec = TestClusterSpec{ + keyspaceName: "lookup", + vschema: ` +{ + "sharded": true, + "vindexes": { + "reverse_bits": { + "type": "reverse_bits" + } + }, + "tables": { + "t1": { + "column_vindexes": [ + { + "column": "c1", + "name": "reverse_bits" + } + ] + } + } +} +`, + schema: ` +create table t1( + c1 int, + c2 int, + val varchar(128), + primary key(c1) +); +`, +} + +func setupLookupIndexKeyspace(t *testing.T) map[string]*cluster.VttabletProcess { + tablets := make(map[string]*cluster.VttabletProcess) + if _, err := vc.AddKeyspace(t, []*Cell{vc.Cells["zone1"]}, lookupClusterSpec.keyspaceName, "-80,80-", + lookupClusterSpec.vschema, lookupClusterSpec.schema, defaultReplicas, defaultRdonly, 200, nil); err != nil { + require.NoError(t, err) + } + defaultCell := vc.Cells[vc.CellNames[0]] + ks := vc.Cells[defaultCell.Name].Keyspaces[lookupClusterSpec.keyspaceName] + targetTab1 = ks.Shards["-80"].Tablets["zone1-200"].Vttablet + targetTab2 = ks.Shards["80-"].Tablets["zone1-300"].Vttablet + tablets["-80"] = targetTab1 + tablets["80-"] = targetTab2 + return tablets +} + +type lookupTestCase struct { + name string + li *lookupIndex + initQuery string + runningQuery string + postExternalizeQuery string + cleanupQuery string +} + +func TestLookupIndex(t *testing.T) { + setSidecarDBName("_vt") + origDefaultReplicas := defaultReplicas + origDefaultRdonly := defaultRdonly + defer func() { + defaultReplicas = origDefaultReplicas + defaultRdonly = origDefaultRdonly + }() + defaultReplicas = 1 + defaultRdonly = 0 + vc = setupMinimalCluster(t) + defer vc.TearDown() + vttablet.InitVReplicationConfigDefaults() + + _ = setupLookupIndexKeyspace(t) + + initQuery := "insert into t1 (c1, c2, val) values (1, 1, 'val1'), (2, 2, 'val2'), (3, 3, 'val3')" + runningQuery := "insert into t1 (c1, c2, val) values (4, 4, 'val4'), (5, 5, 'val5'), (6, 6, 'val6')" + postExternalizeQuery := "insert into t1 (c1, c2, val) values (7, 7, 'val7'), (8, 8, 'val8'), (9, 9, 'val9')" + cleanupQuery := "delete from t1" + + testCases := []lookupTestCase{ + { + name: "non-unique lookup index, one column", + li: &lookupIndex{ + typ: "consistent_lookup", + name: "t1_c2_lookup", + tableKeyspace: lookupClusterSpec.keyspaceName, + table: "t1", + columns: []string{"c2"}, + ownerTable: "t1", + ownerTableKeyspace: lookupClusterSpec.keyspaceName, + ignoreNulls: true, + t: t, + }, + }, + { + name: "lookup index, two columns", + li: &lookupIndex{ + typ: "lookup", + name: "t1_c2_val_lookup", + tableKeyspace: lookupClusterSpec.keyspaceName, + table: "t1", + columns: []string{"c2", "val"}, + ownerTable: "t1", + ownerTableKeyspace: lookupClusterSpec.keyspaceName, + ignoreNulls: true, + t: t, + }, + }, + { + name: "unique lookup index, one column", + li: &lookupIndex{ + typ: "lookup_unique", + name: "t1_c2_unique_lookup", + tableKeyspace: lookupClusterSpec.keyspaceName, + table: "t1", + columns: []string{"c2"}, + ownerTable: "t1", + ownerTableKeyspace: lookupClusterSpec.keyspaceName, + ignoreNulls: true, + t: t, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + tc.initQuery = initQuery + tc.runningQuery = runningQuery + tc.postExternalizeQuery = postExternalizeQuery + tc.cleanupQuery = cleanupQuery + testLookupVindex(t, &tc) + }) + } +} + +func testLookupVindex(t *testing.T, tc *lookupTestCase) { + vtgateConn, cancel := getVTGateConn() + defer cancel() + var totalRows int + li := tc.li + + t.Run("init data", func(t *testing.T) { + totalRows += getNumRowsInQuery(t, tc.initQuery) + _, err := vtgateConn.ExecuteFetch(tc.initQuery, 1000, false) + require.NoError(t, err) + }) + + t.Run("create", func(t *testing.T) { + tc.li.create() + + lks := li.tableKeyspace + vindexName := li.name + waitForRowCount(t, vtgateConn, lks, vindexName, totalRows) + totalRows += getNumRowsInQuery(t, tc.runningQuery) + _, err := vtgateConn.ExecuteFetch(tc.runningQuery, 1000, false) + require.NoError(t, err) + waitForRowCount(t, vtgateConn, tc.li.ownerTableKeyspace, li.name, totalRows) + }) + + t.Run("externalize", func(t *testing.T) { + tc.li.externalize() + totalRows += getNumRowsInQuery(t, tc.postExternalizeQuery) + _, err := vtgateConn.ExecuteFetch(tc.postExternalizeQuery, 1000, false) + require.NoError(t, err) + waitForRowCount(t, vtgateConn, tc.li.ownerTableKeyspace, li.name, totalRows) + }) + + t.Run("cleanup", func(t *testing.T) { + _, err := vtgateConn.ExecuteFetch(tc.cleanupQuery, 1000, false) + require.NoError(t, err) + waitForRowCount(t, vtgateConn, tc.li.ownerTableKeyspace, li.name, 0) + }) +} diff --git a/go/test/endtoend/vtadmin/main_test.go b/go/test/endtoend/vtadmin/main_test.go index fd4ecbdb7fe..9233cd5b0aa 100644 --- a/go/test/endtoend/vtadmin/main_test.go +++ b/go/test/endtoend/vtadmin/main_test.go @@ -51,7 +51,6 @@ create table u_b ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -92,7 +91,6 @@ func TestMain(m *testing.M) { // TestVtadminAPIs tests the vtadmin APIs. func TestVtadminAPIs(t *testing.T) { - defer cluster.PanicHandler(t) // Test the vtadmin APIs t.Run("keyspaces api", func(t *testing.T) { diff --git a/go/test/endtoend/vtgate/concurrentdml/main_test.go b/go/test/endtoend/vtgate/concurrentdml/main_test.go index 6ee5619b742..734962b0d33 100644 --- a/go/test/endtoend/vtgate/concurrentdml/main_test.go +++ b/go/test/endtoend/vtgate/concurrentdml/main_test.go @@ -66,7 +66,6 @@ INSERT INTO t1_seq (id, next_id, cache) values(0, 1, 1000); ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -108,7 +107,6 @@ func TestMain(m *testing.M) { } func TestInsertIgnoreOnLookupUniqueVindex(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", @@ -137,7 +135,6 @@ func TestInsertIgnoreOnLookupUniqueVindex(t *testing.T) { func TestOpenTxBlocksInSerial(t *testing.T) { t.Skip("Update and Insert in same transaction does not work with the unique consistent lookup having same value.") - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", @@ -169,7 +166,6 @@ func TestOpenTxBlocksInSerial(t *testing.T) { func TestOpenTxBlocksInConcurrent(t *testing.T) { t.Skip("Update and Insert in same transaction does not work with the unique consistent lookup having same value.") - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", @@ -207,7 +203,6 @@ func TestOpenTxBlocksInConcurrent(t *testing.T) { } func TestUpdateLookupUniqueVindex(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", diff --git a/go/test/endtoend/vtgate/connectiondrain/main_test.go b/go/test/endtoend/vtgate/connectiondrain/main_test.go index 6dae9b72be9..6257baf8e40 100644 --- a/go/test/endtoend/vtgate/connectiondrain/main_test.go +++ b/go/test/endtoend/vtgate/connectiondrain/main_test.go @@ -40,7 +40,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() os.Exit(m.Run()) } @@ -87,7 +86,6 @@ func start(t *testing.T, vtParams mysql.ConnParams) (*mysql.Conn, func()) { return vtConn, func() { deleteAll() vtConn.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/consolidator/main_test.go b/go/test/endtoend/vtgate/consolidator/main_test.go index 021db7e513e..0d5eae3eca9 100644 --- a/go/test/endtoend/vtgate/consolidator/main_test.go +++ b/go/test/endtoend/vtgate/consolidator/main_test.go @@ -65,7 +65,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/createdb_plugin/main_test.go b/go/test/endtoend/vtgate/createdb_plugin/main_test.go index 5bfec3890b5..e2925bf928d 100644 --- a/go/test/endtoend/vtgate/createdb_plugin/main_test.go +++ b/go/test/endtoend/vtgate/createdb_plugin/main_test.go @@ -43,7 +43,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -81,7 +80,6 @@ func TestMain(m *testing.M) { } func TestDBDDLPlugin(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", diff --git a/go/test/endtoend/vtgate/errors_as_warnings/main_test.go b/go/test/endtoend/vtgate/errors_as_warnings/main_test.go index 71f4a2353f7..374a92f395f 100644 --- a/go/test/endtoend/vtgate/errors_as_warnings/main_test.go +++ b/go/test/endtoend/vtgate/errors_as_warnings/main_test.go @@ -66,7 +66,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/foreignkey/main_test.go b/go/test/endtoend/vtgate/foreignkey/main_test.go index b4d610785b5..fe418c1c0ea 100644 --- a/go/test/endtoend/vtgate/foreignkey/main_test.go +++ b/go/test/endtoend/vtgate/foreignkey/main_test.go @@ -98,7 +98,6 @@ type fkReference struct { } func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -197,7 +196,6 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/foreignkey/stress/fk_stress_test.go b/go/test/endtoend/vtgate/foreignkey/stress/fk_stress_test.go index 34114152e4e..42498ad80f0 100644 --- a/go/test/endtoend/vtgate/foreignkey/stress/fk_stress_test.go +++ b/go/test/endtoend/vtgate/foreignkey/stress/fk_stress_test.go @@ -335,7 +335,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { @@ -653,7 +652,6 @@ func ExecuteFKTest(t *testing.T, tcase *testCase) { } func TestStressFK(t *testing.T) { - defer cluster.PanicHandler(t) t.Run("validate replication health", func(t *testing.T) { validateReplicationIsHealthy(t, replicaNoFK) diff --git a/go/test/endtoend/vtgate/gen4/column_name_test.go b/go/test/endtoend/vtgate/gen4/column_name_test.go index d23c03c9f6b..0f5a83a5092 100644 --- a/go/test/endtoend/vtgate/gen4/column_name_test.go +++ b/go/test/endtoend/vtgate/gen4/column_name_test.go @@ -26,11 +26,9 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/test/endtoend/cluster" ) func TestColumnNames(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) diff --git a/go/test/endtoend/vtgate/gen4/main_test.go b/go/test/endtoend/vtgate/gen4/main_test.go index 4c94e8e2ec8..e8280b3aa06 100644 --- a/go/test/endtoend/vtgate/gen4/main_test.go +++ b/go/test/endtoend/vtgate/gen4/main_test.go @@ -64,7 +64,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -152,6 +151,5 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/gen4/system_schema_test.go b/go/test/endtoend/vtgate/gen4/system_schema_test.go index fc4983935e9..d01d4d972a1 100644 --- a/go/test/endtoend/vtgate/gen4/system_schema_test.go +++ b/go/test/endtoend/vtgate/gen4/system_schema_test.go @@ -28,11 +28,9 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/test/endtoend/cluster" ) func TestDbNameOverride(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.Nil(t, err) @@ -55,7 +53,6 @@ func TestDbNameOverride(t *testing.T) { } func TestInformationSchemaQuery(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) @@ -90,7 +87,6 @@ func assertSingleRowIsReturned(t *testing.T, conn *mysql.Conn, predicate string, } func TestInformationSchemaWithSubquery(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) @@ -101,7 +97,6 @@ func TestInformationSchemaWithSubquery(t *testing.T) { } func TestInformationSchemaQueryGetsRoutedToTheRightTableAndKeyspace(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) @@ -113,7 +108,6 @@ func TestInformationSchemaQueryGetsRoutedToTheRightTableAndKeyspace(t *testing.T } func TestFKConstraintUsingInformationSchema(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) @@ -131,7 +125,6 @@ func TestFKConstraintUsingInformationSchema(t *testing.T) { } func TestConnectWithSystemSchema(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() for _, dbname := range []string{"information_schema", "mysql", "performance_schema", "sys"} { connParams := vtParams @@ -144,7 +137,6 @@ func TestConnectWithSystemSchema(t *testing.T) { } func TestUseSystemSchema(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) @@ -156,7 +148,6 @@ func TestUseSystemSchema(t *testing.T) { } func TestSystemSchemaQueryWithoutQualifier(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) @@ -191,7 +182,6 @@ func TestSystemSchemaQueryWithoutQualifier(t *testing.T) { } func TestMultipleSchemaPredicates(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) @@ -217,7 +207,6 @@ func TestMultipleSchemaPredicates(t *testing.T) { } func TestQuerySystemTables(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) diff --git a/go/test/endtoend/vtgate/godriver/main_test.go b/go/test/endtoend/vtgate/godriver/main_test.go index 587c189d2ea..91605394cf1 100644 --- a/go/test/endtoend/vtgate/godriver/main_test.go +++ b/go/test/endtoend/vtgate/godriver/main_test.go @@ -86,7 +86,6 @@ create table my_message( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -125,7 +124,6 @@ func TestMain(m *testing.M) { } func TestStreamMessaging(t *testing.T) { - defer cluster.PanicHandler(t) cnf := vitessdriver.Configuration{ Protocol: "grpc", diff --git a/go/test/endtoend/vtgate/grpc_api/main_test.go b/go/test/endtoend/vtgate/grpc_api/main_test.go index 3c8605f79a0..87d30f4ce26 100644 --- a/go/test/endtoend/vtgate/grpc_api/main_test.go +++ b/go/test/endtoend/vtgate/grpc_api/main_test.go @@ -75,7 +75,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode := func() int { diff --git a/go/test/endtoend/vtgate/keyspace_watches/keyspace_watch_test.go b/go/test/endtoend/vtgate/keyspace_watches/keyspace_watch_test.go index 4971d03060b..1eb31663577 100644 --- a/go/test/endtoend/vtgate/keyspace_watches/keyspace_watch_test.go +++ b/go/test/endtoend/vtgate/keyspace_watches/keyspace_watch_test.go @@ -117,7 +117,6 @@ func createCluster(extraVTGateArgs []string) (*cluster.LocalProcessCluster, int) } func TestRoutingWithKeyspacesToWatch(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance, exitCode := createCluster(nil) defer clusterInstance.Teardown() @@ -141,7 +140,6 @@ func TestRoutingWithKeyspacesToWatch(t *testing.T) { } func TestVSchemaDDLWithKeyspacesToWatch(t *testing.T) { - defer cluster.PanicHandler(t) extraVTGateArgs := []string{ "--vschema_ddl_authorized_users", "%", diff --git a/go/test/endtoend/vtgate/main_test.go b/go/test/endtoend/vtgate/main_test.go index b276508f269..6c43a70632b 100644 --- a/go/test/endtoend/vtgate/main_test.go +++ b/go/test/endtoend/vtgate/main_test.go @@ -54,7 +54,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { clusterInstance = cluster.NewCluster(Cell, "localhost") @@ -122,6 +121,5 @@ func start(t *testing.T) (*mysql.Conn, func()) { return conn, func() { deleteAll() conn.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/misc_test.go b/go/test/endtoend/vtgate/misc_test.go index f3804a2a45f..bbcb338fa50 100644 --- a/go/test/endtoend/vtgate/misc_test.go +++ b/go/test/endtoend/vtgate/misc_test.go @@ -28,7 +28,6 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/sqlerror" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" ) @@ -783,7 +782,6 @@ func TestJoinWithMergedRouteWithPredicate(t *testing.T) { func TestRowCountExceed(t *testing.T) { conn, _ := start(t) defer func() { - cluster.PanicHandler(t) // needs special delete logic as it exceeds row count. for i := 50; i <= 300; i += 50 { utils.Exec(t, conn, fmt.Sprintf("delete from t1 where id1 < %d", i)) diff --git a/go/test/endtoend/vtgate/mysql80/main_test.go b/go/test/endtoend/vtgate/mysql80/main_test.go index 4f5897d1f59..b970fb66b12 100644 --- a/go/test/endtoend/vtgate/mysql80/main_test.go +++ b/go/test/endtoend/vtgate/mysql80/main_test.go @@ -35,7 +35,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/mysql80/misc_test.go b/go/test/endtoend/vtgate/mysql80/misc_test.go index b29eb13ecdc..5132bf87aba 100644 --- a/go/test/endtoend/vtgate/mysql80/misc_test.go +++ b/go/test/endtoend/vtgate/mysql80/misc_test.go @@ -23,15 +23,12 @@ import ( "vitess.io/vitess/go/test/endtoend/utils" - "vitess.io/vitess/go/test/endtoend/cluster" - "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" ) func TestFunctionInDefault(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) @@ -223,7 +220,6 @@ func BenchmarkReservedConnWhenSettingSysVar(b *testing.B) { } func TestJsonFunctions(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) diff --git a/go/test/endtoend/vtgate/partialfailure/main_test.go b/go/test/endtoend/vtgate/partialfailure/main_test.go index 9e39e7b5dd5..d5b6a639c68 100644 --- a/go/test/endtoend/vtgate/partialfailure/main_test.go +++ b/go/test/endtoend/vtgate/partialfailure/main_test.go @@ -45,7 +45,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/plan_tests/main_test.go b/go/test/endtoend/vtgate/plan_tests/main_test.go new file mode 100644 index 00000000000..d3915af0c8d --- /dev/null +++ b/go/test/endtoend/vtgate/plan_tests/main_test.go @@ -0,0 +1,230 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package plan_tests + +import ( + "encoding/json" + "fmt" + "os" + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/test/endtoend/utils" + "vitess.io/vitess/go/vt/vtgate/engine" + "vitess.io/vitess/go/vt/vtgate/planbuilder" +) + +var ( + clusterInstance *cluster.LocalProcessCluster + vtParams mysql.ConnParams + mysqlParams mysql.ConnParams + uks = "main" + sks = "user" + cell = "plantests" +) + +func TestMain(m *testing.M) { + vschema := readFile("vschemas/schema.json") + userVs := extractUserKS(vschema) + mainVs := extractMainKS(vschema) + sSQL := readFile("schemas/user.sql") + uSQL := readFile("schemas/main.sql") + + exitCode := func() int { + clusterInstance = cluster.NewCluster(cell, "localhost") + defer clusterInstance.Teardown() + + // Start topo server + err := clusterInstance.StartTopo() + if err != nil { + fmt.Println(err.Error()) + return 1 + } + + // Start unsharded keyspace + uKeyspace := &cluster.Keyspace{ + Name: uks, + SchemaSQL: uSQL, + VSchema: mainVs, + } + err = clusterInstance.StartUnshardedKeyspace(*uKeyspace, 0, false) + if err != nil { + fmt.Println(err.Error()) + return 1 + } + + // Start sharded keyspace + skeyspace := &cluster.Keyspace{ + Name: sks, + SchemaSQL: sSQL, + VSchema: userVs, + } + err = clusterInstance.StartKeyspace(*skeyspace, []string{"-80", "80-"}, 0, false) + if err != nil { + fmt.Println(err.Error()) + return 1 + } + + // TODO: (@GuptaManan100/@systay): Also run the tests with normalizer on. + clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs, + "--normalize_queries=false", + "--schema_change_signal=false", + ) + + // Start vtgate + err = clusterInstance.StartVtgate() + if err != nil { + fmt.Println(err.Error()) + return 1 + } + + vtParams = clusterInstance.GetVTParams(sks) + + // create mysql instance and connection parameters + conn, closer, err := utils.NewMySQL(clusterInstance, sks, sSQL, uSQL) + if err != nil { + fmt.Println(err.Error()) + return 1 + } + defer closer() + mysqlParams = conn + + return m.Run() + }() + os.Exit(exitCode) +} + +func readFile(filename string) string { + schema, err := os.ReadFile(locateFile(filename)) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + return string(schema) +} + +func start(t *testing.T) (utils.MySQLCompare, func()) { + mcmp, err := utils.NewMySQLCompare(t, vtParams, mysqlParams) + require.NoError(t, err) + return mcmp, func() { + mcmp.Close() + } +} + +func readJSONTests(filename string) []planbuilder.PlanTest { + var output []planbuilder.PlanTest + file, err := os.Open(locateFile(filename)) + if err != nil { + panic(err) + } + defer file.Close() + dec := json.NewDecoder(file) + err = dec.Decode(&output) + if err != nil { + panic(err) + } + return output +} + +func locateFile(name string) string { + return "../../../../vt/vtgate/planbuilder/testdata/" + name +} + +// verifyTestExpectations verifies the expectations of the test. +func verifyTestExpectations(t *testing.T, pd engine.PrimitiveDescription, test planbuilder.PlanTest) { + // 1. Verify that the Join primitive sees atleast 1 row on the left side. + engine.WalkPrimitiveDescription(pd, func(description engine.PrimitiveDescription) { + if description.OperatorType == "Join" { + require.NotZero(t, description.Inputs[0].RowsReceived[0]) + } + }) + + // 2. Verify that the plan description matches the expected plan description. + planBytes, err := test.Plan.MarshalJSON() + require.NoError(t, err) + mp := make(map[string]any) + err = json.Unmarshal(planBytes, &mp) + require.NoError(t, err) + pdExpected, err := engine.PrimitiveDescriptionFromMap(mp["Instructions"].(map[string]any)) + require.NoError(t, err) + require.Empty(t, pdExpected.Equals(pd), "Expected: %v\nGot: %v", string(planBytes), pd) +} + +func extractUserKS(jsonString string) string { + var result map[string]any + if err := json.Unmarshal([]byte(jsonString), &result); err != nil { + panic(err.Error()) + } + + keyspaces, ok := result["keyspaces"].(map[string]any) + if !ok { + panic("Keyspaces not found") + } + + user, ok := keyspaces["user"].(map[string]any) + if !ok { + panic("User keyspace not found") + } + + tables, ok := user["tables"].(map[string]any) + if !ok { + panic("Tables not found") + } + + userTbl, ok := tables["user"].(map[string]any) + if !ok { + panic("User table not found") + } + + delete(userTbl, "auto_increment") // TODO: we should have an unsharded keyspace where this could live + + // Marshal the inner part back to JSON string + userJson, err := json.Marshal(user) + if err != nil { + panic(err.Error()) + } + + return string(userJson) +} + +func extractMainKS(jsonString string) string { + var result map[string]any + if err := json.Unmarshal([]byte(jsonString), &result); err != nil { + panic(err.Error()) + } + + keyspaces, ok := result["keyspaces"].(map[string]any) + if !ok { + panic("Keyspaces not found") + } + + main, ok := keyspaces["main"].(map[string]any) + if !ok { + panic("main keyspace not found") + } + + // Marshal the inner part back to JSON string + mainJson, err := json.Marshal(main) + if err != nil { + panic(err.Error()) + } + + return string(mainJson) +} diff --git a/go/test/endtoend/vtgate/plan_tests/plan_e2e_test.go b/go/test/endtoend/vtgate/plan_tests/plan_e2e_test.go new file mode 100644 index 00000000000..1594e9b392c --- /dev/null +++ b/go/test/endtoend/vtgate/plan_tests/plan_e2e_test.go @@ -0,0 +1,42 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package plan_tests + +import ( + "testing" + + "vitess.io/vitess/go/test/endtoend/utils" +) + +func TestSelectCases(t *testing.T) { + mcmp, closer := start(t) + defer closer() + tests := readJSONTests("select_cases.json") + for _, test := range tests { + mcmp.Run(test.Comment, func(mcmp *utils.MySQLCompare) { + if test.SkipE2E { + mcmp.AsT().Skip(test.Query) + } + mcmp.Exec(test.Query) + pd := utils.ExecTrace(mcmp.AsT(), mcmp.VtConn, test.Query) + verifyTestExpectations(mcmp.AsT(), pd, test) + if mcmp.VtConn.IsClosed() { + mcmp.AsT().Fatal("vtgate connection is closed") + } + }) + } +} diff --git a/go/test/endtoend/vtgate/prefixfanout/main_test.go b/go/test/endtoend/vtgate/prefixfanout/main_test.go index 928808fd48e..a96ee4ce7f5 100644 --- a/go/test/endtoend/vtgate/prefixfanout/main_test.go +++ b/go/test/endtoend/vtgate/prefixfanout/main_test.go @@ -109,7 +109,6 @@ PRIMARY KEY (c1) ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -158,7 +157,6 @@ func TestMain(m *testing.M) { } func TestCFCPrefixQueryNoHash(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := clusterInstance.GetVTParams(sKs) conn, err := mysql.Connect(ctx, &vtParams) @@ -196,7 +194,6 @@ func TestCFCPrefixQueryNoHash(t *testing.T) { } func TestCFCPrefixQueryWithHash(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := clusterInstance.GetVTParams(sKsMD5) @@ -239,7 +236,6 @@ func TestCFCPrefixQueryWithHash(t *testing.T) { } func TestCFCInsert(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := clusterInstance.GetVTParams(sKs) diff --git a/go/test/endtoend/vtgate/queries/aggregation/aggregation_test.go b/go/test/endtoend/vtgate/queries/aggregation/aggregation_test.go index d206f58e17c..62d23749cd7 100644 --- a/go/test/endtoend/vtgate/queries/aggregation/aggregation_test.go +++ b/go/test/endtoend/vtgate/queries/aggregation/aggregation_test.go @@ -27,7 +27,6 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" ) @@ -66,7 +65,6 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/queries/aggregation/main_test.go b/go/test/endtoend/vtgate/queries/aggregation/main_test.go index 02013a9b0e2..bd1c1aa3b7d 100644 --- a/go/test/endtoend/vtgate/queries/aggregation/main_test.go +++ b/go/test/endtoend/vtgate/queries/aggregation/main_test.go @@ -44,7 +44,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/benchmark/main_test.go b/go/test/endtoend/vtgate/queries/benchmark/main_test.go index 40a215c8007..0410f52993a 100644 --- a/go/test/endtoend/vtgate/queries/benchmark/main_test.go +++ b/go/test/endtoend/vtgate/queries/benchmark/main_test.go @@ -65,7 +65,6 @@ var shards4 = []string{ } func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -169,6 +168,5 @@ func start(b *testing.B) (*mysql.Conn, func()) { return conn, func() { deleteAll() conn.Close() - cluster.PanicHandler(b) } } diff --git a/go/test/endtoend/vtgate/queries/derived/derived_test.go b/go/test/endtoend/vtgate/queries/derived/derived_test.go index cb106564b2f..fe467f31c20 100644 --- a/go/test/endtoend/vtgate/queries/derived/derived_test.go +++ b/go/test/endtoend/vtgate/queries/derived/derived_test.go @@ -21,7 +21,6 @@ import ( "github.com/stretchr/testify/require" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" ) @@ -44,7 +43,6 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/queries/derived/main_test.go b/go/test/endtoend/vtgate/queries/derived/main_test.go index 3b44811f95c..0bab24a966a 100644 --- a/go/test/endtoend/vtgate/queries/derived/main_test.go +++ b/go/test/endtoend/vtgate/queries/derived/main_test.go @@ -44,7 +44,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/dml/main_test.go b/go/test/endtoend/vtgate/queries/dml/main_test.go index 0c4d58aa614..bc72acc1159 100644 --- a/go/test/endtoend/vtgate/queries/dml/main_test.go +++ b/go/test/endtoend/vtgate/queries/dml/main_test.go @@ -66,7 +66,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -148,6 +147,5 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/queries/foundrows/found_rows_test.go b/go/test/endtoend/vtgate/queries/foundrows/found_rows_test.go index f52e2eff532..e50e9210c55 100644 --- a/go/test/endtoend/vtgate/queries/foundrows/found_rows_test.go +++ b/go/test/endtoend/vtgate/queries/foundrows/found_rows_test.go @@ -21,12 +21,10 @@ import ( "github.com/stretchr/testify/require" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" ) func TestFoundRows(t *testing.T) { - defer cluster.PanicHandler(t) mcmp, err := utils.NewMySQLCompare(t, vtParams, mysqlParams) require.NoError(t, err) defer mcmp.Close() diff --git a/go/test/endtoend/vtgate/queries/foundrows/main_test.go b/go/test/endtoend/vtgate/queries/foundrows/main_test.go index 8f992863008..248b6cd9434 100644 --- a/go/test/endtoend/vtgate/queries/foundrows/main_test.go +++ b/go/test/endtoend/vtgate/queries/foundrows/main_test.go @@ -46,7 +46,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/informationschema/informationschema_test.go b/go/test/endtoend/vtgate/queries/informationschema/informationschema_test.go index c5568b2db49..e158bd96e33 100644 --- a/go/test/endtoend/vtgate/queries/informationschema/informationschema_test.go +++ b/go/test/endtoend/vtgate/queries/informationschema/informationschema_test.go @@ -25,7 +25,6 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" ) @@ -47,7 +46,6 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } @@ -114,7 +112,6 @@ func TestFKConstraintUsingInformationSchema(t *testing.T) { } func TestConnectWithSystemSchema(t *testing.T) { - defer cluster.PanicHandler(t) for _, dbname := range []string{"information_schema", "mysql", "performance_schema", "sys"} { vtConnParams := vtParams vtConnParams.DbName = dbname diff --git a/go/test/endtoend/vtgate/queries/informationschema/main_test.go b/go/test/endtoend/vtgate/queries/informationschema/main_test.go index 3696617281e..76d5f44ebae 100644 --- a/go/test/endtoend/vtgate/queries/informationschema/main_test.go +++ b/go/test/endtoend/vtgate/queries/informationschema/main_test.go @@ -52,7 +52,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/kill/main_test.go b/go/test/endtoend/vtgate/queries/kill/main_test.go index 99608030246..61ddec43589 100644 --- a/go/test/endtoend/vtgate/queries/kill/main_test.go +++ b/go/test/endtoend/vtgate/queries/kill/main_test.go @@ -50,7 +50,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/lookup_queries/main_test.go b/go/test/endtoend/vtgate/queries/lookup_queries/main_test.go index a587f124762..818b834511a 100644 --- a/go/test/endtoend/vtgate/queries/lookup_queries/main_test.go +++ b/go/test/endtoend/vtgate/queries/lookup_queries/main_test.go @@ -49,7 +49,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -119,7 +118,6 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/queries/misc/main_test.go b/go/test/endtoend/vtgate/queries/misc/main_test.go index f20072031a8..ee9be542634 100644 --- a/go/test/endtoend/vtgate/queries/misc/main_test.go +++ b/go/test/endtoend/vtgate/queries/misc/main_test.go @@ -48,7 +48,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/misc/misc_test.go b/go/test/endtoend/vtgate/queries/misc/misc_test.go index fd869c4ba5b..8738baf3267 100644 --- a/go/test/endtoend/vtgate/queries/misc/misc_test.go +++ b/go/test/endtoend/vtgate/queries/misc/misc_test.go @@ -30,7 +30,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" ) @@ -50,7 +49,6 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/queries/no_scatter/main_test.go b/go/test/endtoend/vtgate/queries/no_scatter/main_test.go index c4b0974c24b..a1478dcd2ac 100644 --- a/go/test/endtoend/vtgate/queries/no_scatter/main_test.go +++ b/go/test/endtoend/vtgate/queries/no_scatter/main_test.go @@ -40,7 +40,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/no_scatter/queries_test.go b/go/test/endtoend/vtgate/queries/no_scatter/queries_test.go index 7bf702afc15..b302a0f4dc7 100644 --- a/go/test/endtoend/vtgate/queries/no_scatter/queries_test.go +++ b/go/test/endtoend/vtgate/queries/no_scatter/queries_test.go @@ -23,7 +23,6 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" ) @@ -43,7 +42,6 @@ func start(t *testing.T) (*mysql.Conn, func()) { return vtConn, func() { deleteAll() vtConn.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/queries/normalize/main_test.go b/go/test/endtoend/vtgate/queries/normalize/main_test.go index 8f4d97209dd..8c75d38284d 100644 --- a/go/test/endtoend/vtgate/queries/normalize/main_test.go +++ b/go/test/endtoend/vtgate/queries/normalize/main_test.go @@ -39,7 +39,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/orderby/main_test.go b/go/test/endtoend/vtgate/queries/orderby/main_test.go index 9f18377ee3f..353745722b7 100644 --- a/go/test/endtoend/vtgate/queries/orderby/main_test.go +++ b/go/test/endtoend/vtgate/queries/orderby/main_test.go @@ -44,7 +44,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/orderby/orderby_test.go b/go/test/endtoend/vtgate/queries/orderby/orderby_test.go index c36b52a4e6a..716f01fb5c7 100644 --- a/go/test/endtoend/vtgate/queries/orderby/orderby_test.go +++ b/go/test/endtoend/vtgate/queries/orderby/orderby_test.go @@ -22,8 +22,6 @@ import ( "vitess.io/vitess/go/test/endtoend/utils" "github.com/stretchr/testify/require" - - "vitess.io/vitess/go/test/endtoend/cluster" ) func start(t *testing.T) (utils.MySQLCompare, func()) { @@ -44,7 +42,6 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/queries/orderby/without_schematracker/main_test.go b/go/test/endtoend/vtgate/queries/orderby/without_schematracker/main_test.go index 00221e9c9f3..373c1327074 100644 --- a/go/test/endtoend/vtgate/queries/orderby/without_schematracker/main_test.go +++ b/go/test/endtoend/vtgate/queries/orderby/without_schematracker/main_test.go @@ -44,7 +44,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/orderby/without_schematracker/orderby_test.go b/go/test/endtoend/vtgate/queries/orderby/without_schematracker/orderby_test.go index a20c7ad54c6..956815d2a0d 100644 --- a/go/test/endtoend/vtgate/queries/orderby/without_schematracker/orderby_test.go +++ b/go/test/endtoend/vtgate/queries/orderby/without_schematracker/orderby_test.go @@ -22,8 +22,6 @@ import ( "vitess.io/vitess/go/test/endtoend/utils" "github.com/stretchr/testify/require" - - "vitess.io/vitess/go/test/endtoend/cluster" ) func start(t *testing.T) (utils.MySQLCompare, func()) { @@ -44,7 +42,6 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/queries/random/main_test.go b/go/test/endtoend/vtgate/queries/random/main_test.go index e3256f60796..85c8840924d 100644 --- a/go/test/endtoend/vtgate/queries/random/main_test.go +++ b/go/test/endtoend/vtgate/queries/random/main_test.go @@ -44,7 +44,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/random/random_test.go b/go/test/endtoend/vtgate/queries/random/random_test.go index 2d210ee7f99..20c7934d91f 100644 --- a/go/test/endtoend/vtgate/queries/random/random_test.go +++ b/go/test/endtoend/vtgate/queries/random/random_test.go @@ -27,7 +27,6 @@ import ( "github.com/stretchr/testify/require" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" ) @@ -61,7 +60,6 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/queries/reference/main_test.go b/go/test/endtoend/vtgate/queries/reference/main_test.go index c350038bf6e..03ee429e4c0 100644 --- a/go/test/endtoend/vtgate/queries/reference/main_test.go +++ b/go/test/endtoend/vtgate/queries/reference/main_test.go @@ -53,7 +53,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/reference/reference_test.go b/go/test/endtoend/vtgate/queries/reference/reference_test.go index 66d46dfaf15..ce942833729 100644 --- a/go/test/endtoend/vtgate/queries/reference/reference_test.go +++ b/go/test/endtoend/vtgate/queries/reference/reference_test.go @@ -24,8 +24,6 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/utils" - - "vitess.io/vitess/go/test/endtoend/cluster" ) func start(t *testing.T) (*mysql.Conn, func()) { @@ -35,7 +33,6 @@ func start(t *testing.T) (*mysql.Conn, func()) { return vtConn, func() { vtConn.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/queries/subquery/main_test.go b/go/test/endtoend/vtgate/queries/subquery/main_test.go index 9eaf3b4caa0..bc8580bd38d 100644 --- a/go/test/endtoend/vtgate/queries/subquery/main_test.go +++ b/go/test/endtoend/vtgate/queries/subquery/main_test.go @@ -44,7 +44,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/subquery/subquery_test.go b/go/test/endtoend/vtgate/queries/subquery/subquery_test.go index 4298bbe80fc..135b86195a5 100644 --- a/go/test/endtoend/vtgate/queries/subquery/subquery_test.go +++ b/go/test/endtoend/vtgate/queries/subquery/subquery_test.go @@ -25,7 +25,6 @@ import ( "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" ) @@ -47,7 +46,6 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/queries/timeout/main_test.go b/go/test/endtoend/vtgate/queries/timeout/main_test.go index 06e8a786469..81fcfb26095 100644 --- a/go/test/endtoend/vtgate/queries/timeout/main_test.go +++ b/go/test/endtoend/vtgate/queries/timeout/main_test.go @@ -48,7 +48,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/timeout/timeout_test.go b/go/test/endtoend/vtgate/queries/timeout/timeout_test.go index 565c3c07a4f..d1d718add25 100644 --- a/go/test/endtoend/vtgate/queries/timeout/timeout_test.go +++ b/go/test/endtoend/vtgate/queries/timeout/timeout_test.go @@ -25,7 +25,6 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" ) @@ -45,7 +44,6 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/queries/tpch/main_test.go b/go/test/endtoend/vtgate/queries/tpch/main_test.go index 103adb336ab..403ddd510ce 100644 --- a/go/test/endtoend/vtgate/queries/tpch/main_test.go +++ b/go/test/endtoend/vtgate/queries/tpch/main_test.go @@ -43,7 +43,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/tpch/tpch_test.go b/go/test/endtoend/vtgate/queries/tpch/tpch_test.go index c4bf71cafa1..efa322a5e1c 100644 --- a/go/test/endtoend/vtgate/queries/tpch/tpch_test.go +++ b/go/test/endtoend/vtgate/queries/tpch/tpch_test.go @@ -21,7 +21,6 @@ import ( "github.com/stretchr/testify/require" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" ) @@ -43,7 +42,6 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/queries/union/main_test.go b/go/test/endtoend/vtgate/queries/union/main_test.go index 06ec07a6c2f..a5f45f84156 100644 --- a/go/test/endtoend/vtgate/queries/union/main_test.go +++ b/go/test/endtoend/vtgate/queries/union/main_test.go @@ -43,7 +43,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/union/union_test.go b/go/test/endtoend/vtgate/queries/union/union_test.go index 03f98950f44..26371af3c87 100644 --- a/go/test/endtoend/vtgate/queries/union/union_test.go +++ b/go/test/endtoend/vtgate/queries/union/union_test.go @@ -19,7 +19,6 @@ package union import ( "testing" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" "github.com/stretchr/testify/assert" @@ -44,7 +43,6 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/queries/vexplain/main_test.go b/go/test/endtoend/vtgate/queries/vexplain/main_test.go index c1c401bc573..96b6a1c41d1 100644 --- a/go/test/endtoend/vtgate/queries/vexplain/main_test.go +++ b/go/test/endtoend/vtgate/queries/vexplain/main_test.go @@ -43,7 +43,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/vexplain/vexplain_test.go b/go/test/endtoend/vtgate/queries/vexplain/vexplain_test.go index 45baf7af903..1a8ec2b4c37 100644 --- a/go/test/endtoend/vtgate/queries/vexplain/vexplain_test.go +++ b/go/test/endtoend/vtgate/queries/vexplain/vexplain_test.go @@ -24,7 +24,6 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/utils" @@ -49,7 +48,6 @@ func start(t *testing.T) (*mysql.Conn, func()) { return vtConn, func() { deleteAll() vtConn.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/readafterwrite/raw_test.go b/go/test/endtoend/vtgate/readafterwrite/raw_test.go index 0549a9b06b0..ce6db45d24e 100644 --- a/go/test/endtoend/vtgate/readafterwrite/raw_test.go +++ b/go/test/endtoend/vtgate/readafterwrite/raw_test.go @@ -100,7 +100,6 @@ CREATE TABLE test_vdx ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -143,7 +142,6 @@ func TestMain(m *testing.M) { } func TestRAWSettings(t *testing.T) { - defer cluster.PanicHandler(t) conn, err := mysql.Connect(context.Background(), &vtParams) require.NoError(t, err) defer conn.Close() diff --git a/go/test/endtoend/vtgate/reservedconn/main_test.go b/go/test/endtoend/vtgate/reservedconn/main_test.go index 8c0278604f7..00f569d9eb9 100644 --- a/go/test/endtoend/vtgate/reservedconn/main_test.go +++ b/go/test/endtoend/vtgate/reservedconn/main_test.go @@ -101,7 +101,6 @@ CREATE TABLE test_vdx ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/reservedconn/reconnect1/main_test.go b/go/test/endtoend/vtgate/reservedconn/reconnect1/main_test.go index 9a4d7c50dbd..3280d64d433 100644 --- a/go/test/endtoend/vtgate/reservedconn/reconnect1/main_test.go +++ b/go/test/endtoend/vtgate/reservedconn/reconnect1/main_test.go @@ -63,7 +63,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/reservedconn/reconnect2/main_test.go b/go/test/endtoend/vtgate/reservedconn/reconnect2/main_test.go index 915d76051a4..81bb6b90ee5 100644 --- a/go/test/endtoend/vtgate/reservedconn/reconnect2/main_test.go +++ b/go/test/endtoend/vtgate/reservedconn/reconnect2/main_test.go @@ -64,7 +64,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/reservedconn/reconnect3/main_test.go b/go/test/endtoend/vtgate/reservedconn/reconnect3/main_test.go index 20d255941db..decf2f0dfdd 100644 --- a/go/test/endtoend/vtgate/reservedconn/reconnect3/main_test.go +++ b/go/test/endtoend/vtgate/reservedconn/reconnect3/main_test.go @@ -40,7 +40,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/reservedconn/reconnect4/main_test.go b/go/test/endtoend/vtgate/reservedconn/reconnect4/main_test.go index d4a61665a6d..320cbc87172 100644 --- a/go/test/endtoend/vtgate/reservedconn/reconnect4/main_test.go +++ b/go/test/endtoend/vtgate/reservedconn/reconnect4/main_test.go @@ -40,7 +40,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/reservedconn/sysvar_test.go b/go/test/endtoend/vtgate/reservedconn/sysvar_test.go index e7e0cfb0259..8bda7dea121 100644 --- a/go/test/endtoend/vtgate/reservedconn/sysvar_test.go +++ b/go/test/endtoend/vtgate/reservedconn/sysvar_test.go @@ -29,11 +29,9 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/test/endtoend/cluster" ) func TestSetSysVarSingle(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() type queriesWithExpectations struct { name, expr string diff --git a/go/test/endtoend/vtgate/reservedconn/udv_test.go b/go/test/endtoend/vtgate/reservedconn/udv_test.go index 55f4c54c612..14b65dbcd35 100644 --- a/go/test/endtoend/vtgate/reservedconn/udv_test.go +++ b/go/test/endtoend/vtgate/reservedconn/udv_test.go @@ -31,11 +31,9 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/test/endtoend/cluster" ) func TestSetUDV(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() type queriesWithExpectations struct { @@ -123,7 +121,6 @@ func TestSetUDV(t *testing.T) { } func TestMysqlDumpInitialLog(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) diff --git a/go/test/endtoend/vtgate/schema/schema_test.go b/go/test/endtoend/vtgate/schema/schema_test.go index 6b2e8ef7e61..4c28e29ca0d 100644 --- a/go/test/endtoend/vtgate/schema/schema_test.go +++ b/go/test/endtoend/vtgate/schema/schema_test.go @@ -55,7 +55,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { @@ -101,7 +100,6 @@ func TestMain(m *testing.M) { } func TestSchemaChange(t *testing.T) { - defer cluster.PanicHandler(t) testWithInitialSchema(t) testWithAlterSchema(t) testWithAlterDatabase(t) diff --git a/go/test/endtoend/vtgate/schematracker/loadkeyspace/schema_load_keyspace_test.go b/go/test/endtoend/vtgate/schematracker/loadkeyspace/schema_load_keyspace_test.go index 9586206221e..ec201487887 100644 --- a/go/test/endtoend/vtgate/schematracker/loadkeyspace/schema_load_keyspace_test.go +++ b/go/test/endtoend/vtgate/schematracker/loadkeyspace/schema_load_keyspace_test.go @@ -57,7 +57,6 @@ var ( ) func TestLoadKeyspaceWithNoTablet(t *testing.T) { - defer cluster.PanicHandler(t) var err error clusterInstance = cluster.NewCluster(cell, hostname) @@ -100,7 +99,6 @@ func TestLoadKeyspaceWithNoTablet(t *testing.T) { } func TestNoInitialKeyspace(t *testing.T) { - defer cluster.PanicHandler(t) var err error clusterInstance = cluster.NewCluster(cell, hostname) diff --git a/go/test/endtoend/vtgate/schematracker/restarttablet/schema_restart_test.go b/go/test/endtoend/vtgate/schematracker/restarttablet/schema_restart_test.go index 3bb4f6dfd9f..1943fefa9d7 100644 --- a/go/test/endtoend/vtgate/schematracker/restarttablet/schema_restart_test.go +++ b/go/test/endtoend/vtgate/schematracker/restarttablet/schema_restart_test.go @@ -64,7 +64,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode := func() int { @@ -116,7 +115,6 @@ func TestMain(m *testing.M) { } func TestVSchemaTrackerInit(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) @@ -137,7 +135,6 @@ func TestVSchemaTrackerInit(t *testing.T) { // properly handles primary tablet restarts -- meaning that we maintain // the exact same vschema state as before the restart. func TestVSchemaTrackerKeyspaceReInit(t *testing.T) { - defer cluster.PanicHandler(t) primaryTablet := clusterInstance.Keyspaces[0].Shards[0].PrimaryTablet() diff --git a/go/test/endtoend/vtgate/schematracker/sharded/st_sharded_test.go b/go/test/endtoend/vtgate/schematracker/sharded/st_sharded_test.go index 50042f3142a..5f82bd5d71a 100644 --- a/go/test/endtoend/vtgate/schematracker/sharded/st_sharded_test.go +++ b/go/test/endtoend/vtgate/schematracker/sharded/st_sharded_test.go @@ -48,7 +48,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -161,7 +160,6 @@ func TestNewTable(t *testing.T) { } func TestAmbiguousColumnJoin(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) diff --git a/go/test/endtoend/vtgate/schematracker/sharded_prs/st_sharded_test.go b/go/test/endtoend/vtgate/schematracker/sharded_prs/st_sharded_test.go index 09bd97eb9fe..6fbd3f3d33c 100644 --- a/go/test/endtoend/vtgate/schematracker/sharded_prs/st_sharded_test.go +++ b/go/test/endtoend/vtgate/schematracker/sharded_prs/st_sharded_test.go @@ -122,7 +122,6 @@ create table t8( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -208,7 +207,6 @@ func TestMain(m *testing.M) { } func TestAddColumn(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) diff --git a/go/test/endtoend/vtgate/schematracker/unsharded/st_unsharded_test.go b/go/test/endtoend/vtgate/schematracker/unsharded/st_unsharded_test.go index 5ecf89a5db7..4256727915d 100644 --- a/go/test/endtoend/vtgate/schematracker/unsharded/st_unsharded_test.go +++ b/go/test/endtoend/vtgate/schematracker/unsharded/st_unsharded_test.go @@ -49,7 +49,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -112,7 +111,6 @@ func TestMain(m *testing.M) { } func TestNewUnshardedTable(t *testing.T) { - defer cluster.PanicHandler(t) // create a sql connection ctx := context.Background() @@ -182,7 +180,6 @@ func TestNewUnshardedTable(t *testing.T) { // creating two tables having the same name differing only in casing, but other operating systems don't. // More information at https://dev.mysql.com/doc/refman/8.0/en/identifier-case-sensitivity.html#:~:text=Table%20names%20are%20stored%20in,lowercase%20on%20storage%20and%20lookup. func TestCaseSensitiveSchemaTracking(t *testing.T) { - defer cluster.PanicHandler(t) // create a sql connection ctx := context.Background() diff --git a/go/test/endtoend/vtgate/sec_vind/main_test.go b/go/test/endtoend/vtgate/sec_vind/main_test.go index 7aa5df76a83..7ec0d5c0682 100644 --- a/go/test/endtoend/vtgate/sec_vind/main_test.go +++ b/go/test/endtoend/vtgate/sec_vind/main_test.go @@ -44,7 +44,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -101,7 +100,6 @@ func start(t *testing.T) (*mysql.Conn, func()) { return conn, func() { deleteAll() conn.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/sequence/seq_test.go b/go/test/endtoend/vtgate/sequence/seq_test.go index 1bda37094b2..0fc1c810eb3 100644 --- a/go/test/endtoend/vtgate/sequence/seq_test.go +++ b/go/test/endtoend/vtgate/sequence/seq_test.go @@ -174,7 +174,6 @@ CREATE TABLE allDefaults ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -216,7 +215,6 @@ func TestMain(m *testing.M) { } func TestSeq(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", @@ -274,7 +272,6 @@ func TestSeq(t *testing.T) { } func TestDotTableSeq(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", @@ -297,7 +294,6 @@ func TestDotTableSeq(t *testing.T) { } func TestInsertAllDefaults(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", diff --git a/go/test/endtoend/vtgate/tablet_healthcheck/reparent_test.go b/go/test/endtoend/vtgate/tablet_healthcheck/reparent_test.go index d6357ce8f2a..77e9a58cf69 100644 --- a/go/test/endtoend/vtgate/tablet_healthcheck/reparent_test.go +++ b/go/test/endtoend/vtgate/tablet_healthcheck/reparent_test.go @@ -89,7 +89,6 @@ create table corder( // TestMain sets up the vitess cluster for any subsequent tests func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/tablet_healthcheck_cache/correctness_test.go b/go/test/endtoend/vtgate/tablet_healthcheck_cache/correctness_test.go index 50529d9fdf9..3457a2cab3c 100644 --- a/go/test/endtoend/vtgate/tablet_healthcheck_cache/correctness_test.go +++ b/go/test/endtoend/vtgate/tablet_healthcheck_cache/correctness_test.go @@ -86,7 +86,6 @@ create table corder( // TestMain sets up the vitess cluster for any subsequent tests func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/unsharded/main_test.go b/go/test/endtoend/vtgate/unsharded/main_test.go index e1818735ed1..307bb7fcf23 100644 --- a/go/test/endtoend/vtgate/unsharded/main_test.go +++ b/go/test/endtoend/vtgate/unsharded/main_test.go @@ -147,7 +147,6 @@ END; ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -192,7 +191,6 @@ func TestMain(m *testing.M) { func TestSelectIntoAndLoadFrom(t *testing.T) { // Test is skipped because it requires secure-file-priv variable to be set to not NULL or empty. t.Skip() - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", @@ -227,7 +225,6 @@ func TestSelectIntoAndLoadFrom(t *testing.T) { } func TestEmptyStatement(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", @@ -244,7 +241,6 @@ func TestEmptyStatement(t *testing.T) { } func TestTopoDownServingQuery(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", @@ -264,7 +260,6 @@ func TestTopoDownServingQuery(t *testing.T) { } func TestInsertAllDefaults(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", @@ -279,7 +274,6 @@ func TestInsertAllDefaults(t *testing.T) { } func TestDDLUnsharded(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", @@ -300,7 +294,6 @@ func TestDDLUnsharded(t *testing.T) { } func TestCallProcedure(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", @@ -347,7 +340,6 @@ func TestCallProcedure(t *testing.T) { } func TestTempTable(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", @@ -372,7 +364,6 @@ func TestTempTable(t *testing.T) { } func TestReservedConnDML(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", @@ -395,7 +386,6 @@ func TestReservedConnDML(t *testing.T) { } func TestNumericPrecisionScale(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", diff --git a/go/test/endtoend/vtgate/vindex_bindvars/main_test.go b/go/test/endtoend/vtgate/vindex_bindvars/main_test.go index 3251668e155..84c2c825784 100644 --- a/go/test/endtoend/vtgate/vindex_bindvars/main_test.go +++ b/go/test/endtoend/vtgate/vindex_bindvars/main_test.go @@ -265,7 +265,6 @@ CREATE TABLE thex ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -304,7 +303,6 @@ func TestMain(m *testing.M) { } func TestVindexHexTypes(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.Nil(t, err) @@ -326,7 +324,6 @@ func TestVindexHexTypes(t *testing.T) { } func TestVindexBindVarOverlap(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.Nil(t, err) diff --git a/go/test/endtoend/vtgate/vschema/vschema_test.go b/go/test/endtoend/vtgate/vschema/vschema_test.go index eec54f8f47f..5cd01449b71 100644 --- a/go/test/endtoend/vtgate/vschema/vschema_test.go +++ b/go/test/endtoend/vtgate/vschema/vschema_test.go @@ -18,21 +18,25 @@ package vschema import ( "context" + "encoding/json" "flag" "fmt" "os" + "path" "testing" + "time" - "vitess.io/vitess/go/test/endtoend/utils" - + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/test/endtoend/utils" ) var ( clusterInstance *cluster.LocalProcessCluster + configFile string vtParams mysql.ConnParams hostname = "localhost" keyspaceName = "ks" @@ -53,7 +57,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { @@ -66,7 +69,21 @@ func TestMain(m *testing.M) { } // List of users authorized to execute vschema ddl operations - clusterInstance.VtGateExtraArgs = []string{"--vschema_ddl_authorized_users=%", "--schema_change_signal=false"} + if utils.BinaryIsAtLeastAtVersion(22, "vtgate") { + timeNow := time.Now().Unix() + configFile = path.Join(os.TempDir(), fmt.Sprintf("vtgate-config-%d.json", timeNow)) + err := writeConfig(configFile, map[string]string{ + "vschema_ddl_authorized_users": "%", + }) + if err != nil { + return 1, err + } + defer os.Remove(configFile) + + clusterInstance.VtGateExtraArgs = []string{fmt.Sprintf("--config-file=%s", configFile), "--schema_change_signal=false"} + } else { + clusterInstance.VtGateExtraArgs = []string{"--vschema_ddl_authorized_users=%", "--schema_change_signal=false"} + } // Start keyspace keyspace := &cluster.Keyspace{ @@ -96,8 +113,16 @@ func TestMain(m *testing.M) { } +func writeConfig(path string, cfg map[string]string) error { + file, err := os.Create(path) + if err != nil { + return err + } + defer file.Close() + return json.NewEncoder(file).Encode(cfg) +} + func TestVSchema(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) @@ -138,4 +163,15 @@ func TestVSchema(t *testing.T) { utils.AssertMatches(t, conn, "delete from vt_user", `[]`) + if utils.BinaryIsAtLeastAtVersion(22, "vtgate") { + writeConfig(configFile, map[string]string{ + "vschema_ddl_authorized_users": "", + }) + + require.EventuallyWithT(t, func(t *assert.CollectT) { + _, err = conn.ExecuteFetch("ALTER VSCHEMA DROP TABLE main", 1000, false) + assert.Error(t, err) + assert.ErrorContains(t, err, "is not authorized to perform vschema operations") + }, 5*time.Second, 100*time.Millisecond) + } } diff --git a/go/test/endtoend/vtorc/api/api_test.go b/go/test/endtoend/vtorc/api/api_test.go index 638ea5fa72e..3fe43fa8f8f 100644 --- a/go/test/endtoend/vtorc/api/api_test.go +++ b/go/test/endtoend/vtorc/api/api_test.go @@ -33,7 +33,6 @@ import ( // TestAPIEndpoints tests the various API endpoints that VTOrc offers. func TestAPIEndpoints(t *testing.T) { - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{ PreventCrossCellFailover: true, }, 1, "") diff --git a/go/test/endtoend/vtorc/api/config_test.go b/go/test/endtoend/vtorc/api/config_test.go index 71cc6291be7..821b0f8071e 100644 --- a/go/test/endtoend/vtorc/api/config_test.go +++ b/go/test/endtoend/vtorc/api/config_test.go @@ -30,7 +30,6 @@ import ( // TestDynamicConfigs tests the dyanamic configurations that VTOrc offers. func TestDynamicConfigs(t *testing.T) { - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{}, 1, "") vtorc := clusterInfo.ClusterInstance.VTOrcProcesses[0] diff --git a/go/test/endtoend/vtorc/api/main_test.go b/go/test/endtoend/vtorc/api/main_test.go index f89326bc856..cc3e796b293 100644 --- a/go/test/endtoend/vtorc/api/main_test.go +++ b/go/test/endtoend/vtorc/api/main_test.go @@ -21,7 +21,6 @@ import ( "os" "testing" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/vtorc/utils" ) @@ -53,8 +52,6 @@ func TestMain(m *testing.M) { return m.Run(), nil }() - cluster.PanicHandler(nil) - if clusterInfo != nil { // stop vtorc first otherwise its logs get polluted // with instances being unreachable triggering unnecessary operations diff --git a/go/test/endtoend/vtorc/general/main_test.go b/go/test/endtoend/vtorc/general/main_test.go index 6db0792de3a..0cd88cd378c 100644 --- a/go/test/endtoend/vtorc/general/main_test.go +++ b/go/test/endtoend/vtorc/general/main_test.go @@ -21,7 +21,6 @@ import ( "os" "testing" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/vtorc/utils" ) @@ -47,8 +46,6 @@ func TestMain(m *testing.M) { return m.Run(), nil }() - cluster.PanicHandler(nil) - if clusterInfo != nil { // stop vtorc first otherwise its logs get polluted // with instances being unreachable triggering unnecessary operations diff --git a/go/test/endtoend/vtorc/general/vtorc_test.go b/go/test/endtoend/vtorc/general/vtorc_test.go index 9cc931897bf..a4ed71945be 100644 --- a/go/test/endtoend/vtorc/general/vtorc_test.go +++ b/go/test/endtoend/vtorc/general/vtorc_test.go @@ -40,7 +40,6 @@ import ( // verify that with multiple vtorc instances, we still only have 1 PlannedReparentShard call func TestPrimaryElection(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{ PreventCrossCellFailover: true, }, 2, "") @@ -66,7 +65,6 @@ func TestPrimaryElection(t *testing.T) { // if it has an errant GTID. func TestErrantGTIDOnPreviousPrimary(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 3, 0, []string{"--change-tablets-with-errant-gtid-to-drained"}, cluster.VTOrcConfiguration{}, 1, "") keyspace := &clusterInfo.ClusterInstance.Keyspaces[0] shard0 := &keyspace.Shards[0] @@ -126,7 +124,6 @@ func TestErrantGTIDOnPreviousPrimary(t *testing.T) { // verify replication is setup func TestSingleKeyspace(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 1, 1, []string{"--clusters_to_watch", "ks"}, cluster.VTOrcConfiguration{ PreventCrossCellFailover: true, }, 1, "") @@ -145,7 +142,6 @@ func TestSingleKeyspace(t *testing.T) { // verify replication is setup func TestKeyspaceShard(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 1, 1, []string{"--clusters_to_watch", "ks/0"}, cluster.VTOrcConfiguration{ PreventCrossCellFailover: true, }, 1, "") @@ -167,7 +163,6 @@ func TestKeyspaceShard(t *testing.T) { // 6. disable recoveries and make sure the detected problems are set correctly. func TestVTOrcRepairs(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 3, 0, []string{"--change-tablets-with-errant-gtid-to-drained"}, cluster.VTOrcConfiguration{ PreventCrossCellFailover: true, }, 1, "") @@ -346,7 +341,6 @@ func TestRepairAfterTER(t *testing.T) { // test fails intermittently on CI, skip until it can be fixed. t.SkipNow() defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 0, nil, cluster.VTOrcConfiguration{ PreventCrossCellFailover: true, }, 1, "") @@ -480,7 +474,6 @@ func TestSemiSync(t *testing.T) { // TestVTOrcWithPrs tests that VTOrc works fine even when PRS is called from vtctld func TestVTOrcWithPrs(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 4, 0, nil, cluster.VTOrcConfiguration{ PreventCrossCellFailover: true, }, 1, "") @@ -530,7 +523,6 @@ func TestVTOrcWithPrs(t *testing.T) { // TestMultipleDurabilities tests that VTOrc works with 2 keyspaces having 2 different durability policies func TestMultipleDurabilities(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) // Setup a normal cluster and start vtorc utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 1, 1, nil, cluster.VTOrcConfiguration{}, 1, "") // Setup a semi-sync cluster @@ -551,7 +543,6 @@ func TestMultipleDurabilities(t *testing.T) { // TestDrainedTablet tests that we don't forget drained tablets and they still show up in the vtorc output. func TestDrainedTablet(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) // Setup a normal cluster and start vtorc utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 0, nil, cluster.VTOrcConfiguration{}, 1, "") @@ -639,7 +630,6 @@ func TestDurabilityPolicySetLater(t *testing.T) { func TestFullStatusConnectionPooling(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 4, 0, []string{ "--tablet_manager_grpc_concurrency=1", }, cluster.VTOrcConfiguration{ diff --git a/go/test/endtoend/vtorc/primaryfailure/main_test.go b/go/test/endtoend/vtorc/primaryfailure/main_test.go index a3e50bd0cc9..cd03df01bd6 100644 --- a/go/test/endtoend/vtorc/primaryfailure/main_test.go +++ b/go/test/endtoend/vtorc/primaryfailure/main_test.go @@ -21,7 +21,6 @@ import ( "os" "testing" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/vtorc/utils" ) @@ -53,8 +52,6 @@ func TestMain(m *testing.M) { return m.Run(), nil }() - cluster.PanicHandler(nil) - if clusterInfo != nil { // stop vtorc first otherwise its logs get polluted // with instances being unreachable triggering unnecessary operations diff --git a/go/test/endtoend/vtorc/primaryfailure/primary_failure_test.go b/go/test/endtoend/vtorc/primaryfailure/primary_failure_test.go index a46e3789730..9017d35a8c5 100644 --- a/go/test/endtoend/vtorc/primaryfailure/primary_failure_test.go +++ b/go/test/endtoend/vtorc/primaryfailure/primary_failure_test.go @@ -39,7 +39,6 @@ import ( // Also tests that VTOrc can handle multiple failures, if the durability policies allow it func TestDownPrimary(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) // We specify the --wait-replicas-timeout to a small value because we spawn a cross-cell replica later in the test. // If that replica is more advanced than the same-cell-replica, then we try to promote the cross-cell replica as an intermediate source. // If we don't specify a small value of --wait-replicas-timeout, then we would end up waiting for 30 seconds for the dead-primary to respond, failing this test. @@ -116,7 +115,6 @@ func TestDownPrimary(t *testing.T) { // bring down primary before VTOrc has started, let vtorc repair. func TestDownPrimaryBeforeVTOrc(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{}, 0, "none") keyspace := &clusterInfo.ClusterInstance.Keyspaces[0] shard0 := &keyspace.Shards[0] @@ -172,7 +170,6 @@ func TestDownPrimaryBeforeVTOrc(t *testing.T) { // delete the primary record and let vtorc repair. func TestDeletedPrimaryTablet(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, []string{"--remote_operation_timeout=10s"}, cluster.VTOrcConfiguration{}, 1, "none") keyspace := &clusterInfo.ClusterInstance.Keyspaces[0] shard0 := &keyspace.Shards[0] @@ -239,7 +236,6 @@ func TestDeletedPrimaryTablet(t *testing.T) { // that primary is unreachable. This help us save few seconds depending on value of `RemoteOperationTimeout` flag. func TestDeadPrimaryRecoversImmediately(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) // We specify the --wait-replicas-timeout to a small value because we spawn a cross-cell replica later in the test. // If that replica is more advanced than the same-cell-replica, then we try to promote the cross-cell replica as an intermediate source. // If we don't specify a small value of --wait-replicas-timeout, then we would end up waiting for 30 seconds for the dead-primary to respond, failing this test. @@ -322,7 +318,6 @@ func TestDeadPrimaryRecoversImmediately(t *testing.T) { // covers part of the test case master-failover-lost-replicas from orchestrator func TestCrossDataCenterFailure(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{ PreventCrossCellFailover: true, }, 1, "") @@ -368,7 +363,6 @@ func TestCrossDataCenterFailure(t *testing.T) { // In case of no viable candidates, we should error out func TestCrossDataCenterFailureError(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 1, 1, nil, cluster.VTOrcConfiguration{ PreventCrossCellFailover: true, }, 1, "") @@ -415,7 +409,6 @@ func TestLostRdonlyOnPrimaryFailure(t *testing.T) { // were detected by vtorc and could be configured to have their sources detached t.Skip() defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 2, nil, cluster.VTOrcConfiguration{ PreventCrossCellFailover: true, }, 1, "") @@ -495,7 +488,6 @@ func TestLostRdonlyOnPrimaryFailure(t *testing.T) { // covers the test case master-failover-fail-promotion-lag-minutes-success from orchestrator func TestPromotionLagSuccess(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{ ReplicationLagQuery: "select 59", FailPrimaryPromotionOnLagMinutes: 1, @@ -545,7 +537,6 @@ func TestPromotionLagFailure(t *testing.T) { // was smaller than the configured value, otherwise it would fail the promotion t.Skip() defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 3, 1, nil, cluster.VTOrcConfiguration{ ReplicationLagQuery: "select 61", FailPrimaryPromotionOnLagMinutes: 1, @@ -598,7 +589,6 @@ func TestPromotionLagFailure(t *testing.T) { // That is the replica which should be promoted in case of primary failure func TestDownPrimaryPromotionRule(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{ LockShardTimeoutSeconds: 5, }, 1, "test") @@ -646,7 +636,6 @@ func TestDownPrimaryPromotionRule(t *testing.T) { // It should also be caught up when it is promoted func TestDownPrimaryPromotionRuleWithLag(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{ LockShardTimeoutSeconds: 5, }, 1, "test") @@ -726,7 +715,6 @@ func TestDownPrimaryPromotionRuleWithLag(t *testing.T) { // It should also be caught up when it is promoted func TestDownPrimaryPromotionRuleWithLagCrossCenter(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{ LockShardTimeoutSeconds: 5, PreventCrossCellFailover: true, diff --git a/go/test/fuzzing/oss_fuzz_build.sh b/go/test/fuzzing/oss_fuzz_build.sh index 2cb991c4215..cde31291af1 100755 --- a/go/test/fuzzing/oss_fuzz_build.sh +++ b/go/test/fuzzing/oss_fuzz_build.sh @@ -54,10 +54,10 @@ mv api_marshal_fuzzer.go $SRC/vitess/go/test/fuzzing/ compile_go_fuzzer vitess.io/vitess/go/test/fuzzing FuzzAPIMarshal api_marshal_fuzzer # collation fuzzer -mv ./go/mysql/collations/uca_test.go \ - ./go/mysql/collations/uca_test_fuzz.go +mv ./go/mysql/collations/colldata/uca_test.go \ + ./go/mysql/collations/colldata/uca_test_fuzz.go -compile_go_fuzzer vitess.io/vitess/go/mysql/collations FuzzCollations fuzz_collations +compile_go_fuzzer vitess.io/vitess/go/mysql/collations/colldata FuzzCollations fuzz_collations compile_go_fuzzer vitess.io/vitess/go/vt/vtgate/planbuilder FuzzTestBuilder fuzz_test_builder gofuzz diff --git a/go/viperutil/internal/sync/sync.go b/go/viperutil/internal/sync/sync.go index 6bee1a14e72..f69829f734d 100644 --- a/go/viperutil/internal/sync/sync.go +++ b/go/viperutil/internal/sync/sync.go @@ -86,7 +86,7 @@ func (v *Viper) Set(key string, value any) { v.m.Lock() defer v.m.Unlock() - // We must not update v.disk here; explicit calls to Set will supercede all + // We must not update v.disk here; explicit calls to Set will supersede all // future config reloads. v.live.Set(key, value) diff --git a/go/vt/logutil/logger.go b/go/vt/logutil/logger.go index 47c3f124238..46d7c0052da 100644 --- a/go/vt/logutil/logger.go +++ b/go/vt/logutil/logger.go @@ -20,6 +20,7 @@ import ( "fmt" "io" "runtime" + "slices" "strings" "sync" "time" @@ -246,6 +247,12 @@ func (ml *MemoryLogger) Clear() { ml.mu.Unlock() } +func (ml *MemoryLogger) LogEvents() []*logutilpb.Event { + ml.mu.Lock() + defer ml.mu.Unlock() + return slices.Clone(ml.Events) +} + // LoggerWriter is an adapter that implements the io.Writer interface. type LoggerWriter struct { logger Logger diff --git a/go/vt/mysqlctl/azblobbackupstorage/azblob.go b/go/vt/mysqlctl/azblobbackupstorage/azblob.go index 3ba6b187a2f..dbd146495e8 100644 --- a/go/vt/mysqlctl/azblobbackupstorage/azblob.go +++ b/go/vt/mysqlctl/azblobbackupstorage/azblob.go @@ -32,8 +32,9 @@ import ( "github.com/Azure/azure-storage-blob-go/azblob" "github.com/spf13/pflag" + "vitess.io/vitess/go/vt/mysqlctl/errors" + "vitess.io/vitess/go/viperutil" - "vitess.io/vitess/go/vt/concurrency" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/mysqlctl/backupstorage" "vitess.io/vitess/go/vt/servenv" @@ -203,9 +204,9 @@ type AZBlobBackupHandle struct { name string readOnly bool waitGroup sync.WaitGroup - errors concurrency.AllErrorRecorder ctx context.Context cancel context.CancelFunc + errors.PerFileErrorRecorder } // Directory implements BackupHandle. @@ -218,21 +219,6 @@ func (bh *AZBlobBackupHandle) Name() string { return bh.name } -// RecordError is part of the concurrency.ErrorRecorder interface. -func (bh *AZBlobBackupHandle) RecordError(err error) { - bh.errors.RecordError(err) -} - -// HasErrors is part of the concurrency.ErrorRecorder interface. -func (bh *AZBlobBackupHandle) HasErrors() bool { - return bh.errors.HasErrors() -} - -// Error is part of the concurrency.ErrorRecorder interface. -func (bh *AZBlobBackupHandle) Error() error { - return bh.errors.Error() -} - // AddFile implements BackupHandle. func (bh *AZBlobBackupHandle) AddFile(ctx context.Context, filename string, filesize int64) (io.WriteCloser, error) { if bh.readOnly { @@ -263,7 +249,7 @@ func (bh *AZBlobBackupHandle) AddFile(ctx context.Context, filename string, file }) if err != nil { reader.CloseWithError(err) - bh.RecordError(err) + bh.RecordError(filename, err) } }() diff --git a/go/vt/mysqlctl/backupengine.go b/go/vt/mysqlctl/backupengine.go index fb3d0e2d125..eeb14039d01 100644 --- a/go/vt/mysqlctl/backupengine.go +++ b/go/vt/mysqlctl/backupengine.go @@ -272,7 +272,7 @@ func getBackupManifestInto(ctx context.Context, backup backupstorage.BackupHandl if err := json.NewDecoder(file).Decode(outManifest); err != nil { return vterrors.Wrap(err, "can't decode MANIFEST") } - return nil + return backup.Error() } // IncrementalBackupDetails lists some incremental backup specific information diff --git a/go/vt/mysqlctl/backupstorage/interface.go b/go/vt/mysqlctl/backupstorage/interface.go index 92bc71d63aa..4fd37b3163a 100644 --- a/go/vt/mysqlctl/backupstorage/interface.go +++ b/go/vt/mysqlctl/backupstorage/interface.go @@ -25,7 +25,8 @@ import ( "github.com/spf13/pflag" - "vitess.io/vitess/go/vt/concurrency" + "vitess.io/vitess/go/vt/mysqlctl/errors" + "vitess.io/vitess/go/vt/servenv" ) @@ -89,9 +90,9 @@ type BackupHandle interface { // ReadCloser is closed. ReadFile(ctx context.Context, filename string) (io.ReadCloser, error) - // concurrency.ErrorRecorder is embedded here to coordinate reporting and - // handling of errors among all the components involved in taking a backup. - concurrency.ErrorRecorder + // BackupErrorRecorder is embedded here to coordinate reporting and + // handling of errors among all the components involved in taking/restoring a backup. + errors.BackupErrorRecorder } // BackupStorage is the interface to the storage system diff --git a/go/vt/mysqlctl/backup_blackbox_race_test.go b/go/vt/mysqlctl/blackbox/backup_race_test.go similarity index 97% rename from go/vt/mysqlctl/backup_blackbox_race_test.go rename to go/vt/mysqlctl/blackbox/backup_race_test.go index 5414ebc5fa6..fd39dfe4b06 100644 --- a/go/vt/mysqlctl/backup_blackbox_race_test.go +++ b/go/vt/mysqlctl/blackbox/backup_race_test.go @@ -16,8 +16,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package mysqlctl_test is the blackbox tests for package mysqlctl. -package mysqlctl_test +// Package blackbox is the blackbox tests for package mysqlctl. +package blackbox import ( "fmt" @@ -75,7 +75,7 @@ func TestExecuteBackupWithFailureOnLastFile(t *testing.T) { require.NoError(t, createBackupFiles(path.Join(dataDir, "test2"), 2, "ibd")) defer os.RemoveAll(backupRoot) - needIt, err := needInnoDBRedoLogSubdir() + needIt, err := NeedInnoDBRedoLogSubdir() require.NoError(t, err) if needIt { fpath := path.Join("log", mysql.DynamicRedoLogSubdir) @@ -144,7 +144,7 @@ func TestExecuteBackupWithFailureOnLastFile(t *testing.T) { TopoServer: ts, Keyspace: keyspace, Shard: shard, - MysqlShutdownTimeout: mysqlShutdownTimeout, + MysqlShutdownTimeout: MysqlShutdownTimeout, }, bh) require.ErrorContains(t, err, "cannot add file: 3") diff --git a/go/vt/mysqlctl/backup_blackbox_test.go b/go/vt/mysqlctl/blackbox/backup_test.go similarity index 57% rename from go/vt/mysqlctl/backup_blackbox_test.go rename to go/vt/mysqlctl/blackbox/backup_test.go index 15244fb8782..b7e35304904 100644 --- a/go/vt/mysqlctl/backup_blackbox_test.go +++ b/go/vt/mysqlctl/blackbox/backup_test.go @@ -14,12 +14,15 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package mysqlctl_test is the blackbox tests for package mysqlctl. -package mysqlctl_test +// Package blackbox is the blackbox tests for package mysqlctl. +package blackbox import ( + "bytes" "context" + "errors" "fmt" + "io" "os" "path" "strings" @@ -31,7 +34,6 @@ import ( "vitess.io/vitess/go/test/utils" - "vitess.io/vitess/go/mysql/capabilities" "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/sqltypes" @@ -48,40 +50,6 @@ import ( "vitess.io/vitess/go/vt/topo/memorytopo" ) -const mysqlShutdownTimeout = 1 * time.Minute - -func setBuiltinBackupMysqldDeadline(t time.Duration) time.Duration { - old := mysqlctl.BuiltinBackupMysqldTimeout - mysqlctl.BuiltinBackupMysqldTimeout = t - - return old -} - -func createBackupDir(root string, dirs ...string) error { - for _, dir := range dirs { - if err := os.MkdirAll(path.Join(root, dir), 0755); err != nil { - return err - } - } - - return nil -} - -func createBackupFiles(root string, fileCount int, ext string) error { - for i := 0; i < fileCount; i++ { - f, err := os.Create(path.Join(root, fmt.Sprintf("%d.%s", i, ext))) - if err != nil { - return err - } - if _, err := f.Write([]byte("hello, world!")); err != nil { - return err - } - defer f.Close() - } - - return nil -} - func TestExecuteBackup(t *testing.T) { ctx := utils.LeakCheckContext(t) @@ -97,7 +65,7 @@ func TestExecuteBackup(t *testing.T) { require.NoError(t, createBackupFiles(path.Join(dataDir, "test2"), 2, "ibd")) defer os.RemoveAll(backupRoot) - needIt, err := needInnoDBRedoLogSubdir() + needIt, err := NeedInnoDBRedoLogSubdir() require.NoError(t, err) if needIt { fpath := path.Join("log", mysql.DynamicRedoLogSubdir) @@ -133,8 +101,8 @@ func TestExecuteBackup(t *testing.T) { be := &mysqlctl.BuiltinBackupEngine{} // Configure a tight deadline to force a timeout - oldDeadline := setBuiltinBackupMysqldDeadline(time.Second) - defer setBuiltinBackupMysqldDeadline(oldDeadline) + oldDeadline := SetBuiltinBackupMysqldDeadline(time.Second) + defer SetBuiltinBackupMysqldDeadline(oldDeadline) bh := filebackupstorage.NewBackupHandle(nil, "", "", false) @@ -163,7 +131,7 @@ func TestExecuteBackup(t *testing.T) { Keyspace: keyspace, Shard: shard, Stats: fakeStats, - MysqlShutdownTimeout: mysqlShutdownTimeout, + MysqlShutdownTimeout: MysqlShutdownTimeout, }, bh) require.NoError(t, err) @@ -221,7 +189,7 @@ func TestExecuteBackup(t *testing.T) { TopoServer: ts, Keyspace: keyspace, Shard: shard, - MysqlShutdownTimeout: mysqlShutdownTimeout, + MysqlShutdownTimeout: MysqlShutdownTimeout, }, bh) assert.Error(t, err) @@ -243,7 +211,7 @@ func TestExecuteBackupWithSafeUpgrade(t *testing.T) { require.NoError(t, createBackupFiles(path.Join(dataDir, "test2"), 2, "ibd")) defer os.RemoveAll(backupRoot) - needIt, err := needInnoDBRedoLogSubdir() + needIt, err := NeedInnoDBRedoLogSubdir() require.NoError(t, err) if needIt { fpath := path.Join("log", mysql.DynamicRedoLogSubdir) @@ -279,8 +247,8 @@ func TestExecuteBackupWithSafeUpgrade(t *testing.T) { be := &mysqlctl.BuiltinBackupEngine{} // Configure a tight deadline to force a timeout - oldDeadline := setBuiltinBackupMysqldDeadline(time.Second) - defer setBuiltinBackupMysqldDeadline(oldDeadline) + oldDeadline := SetBuiltinBackupMysqldDeadline(time.Second) + defer SetBuiltinBackupMysqldDeadline(oldDeadline) bh := filebackupstorage.NewBackupHandle(nil, "", "", false) @@ -310,7 +278,7 @@ func TestExecuteBackupWithSafeUpgrade(t *testing.T) { Shard: shard, Stats: backupstats.NewFakeStats(), UpgradeSafe: true, - MysqlShutdownTimeout: mysqlShutdownTimeout, + MysqlShutdownTimeout: MysqlShutdownTimeout, }, bh) require.NoError(t, err) @@ -336,7 +304,7 @@ func TestExecuteBackupWithCanceledContext(t *testing.T) { require.NoError(t, createBackupFiles(path.Join(dataDir, "test2"), 2, "ibd")) defer os.RemoveAll(backupRoot) - needIt, err := needInnoDBRedoLogSubdir() + needIt, err := NeedInnoDBRedoLogSubdir() require.NoError(t, err) if needIt { fpath := path.Join("log", mysql.DynamicRedoLogSubdir) @@ -397,12 +365,12 @@ func TestExecuteBackupWithCanceledContext(t *testing.T) { TopoServer: ts, Keyspace: keyspace, Shard: shard, - MysqlShutdownTimeout: mysqlShutdownTimeout, + MysqlShutdownTimeout: MysqlShutdownTimeout, }, bh) require.Error(t, err) // all four files will fail - require.ErrorContains(t, err, "context canceled;context canceled;context canceled;context canceled") + require.ErrorContains(t, err, "context canceled; context canceled; context canceled; context canceled") assert.Equal(t, mysqlctl.BackupUnusable, backupResult) } @@ -425,7 +393,7 @@ func TestExecuteRestoreWithTimedOutContext(t *testing.T) { require.NoError(t, createBackupFiles(path.Join(dataDir, "test2"), 2, "ibd")) defer os.RemoveAll(backupRoot) - needIt, err := needInnoDBRedoLogSubdir() + needIt, err := NeedInnoDBRedoLogSubdir() require.NoError(t, err) if needIt { fpath := path.Join("log", mysql.DynamicRedoLogSubdir) @@ -482,7 +450,7 @@ func TestExecuteRestoreWithTimedOutContext(t *testing.T) { TopoServer: ts, Keyspace: keyspace, Shard: shard, - MysqlShutdownTimeout: mysqlShutdownTimeout, + MysqlShutdownTimeout: MysqlShutdownTimeout, }, bh) require.NoError(t, err) @@ -521,7 +489,7 @@ func TestExecuteRestoreWithTimedOutContext(t *testing.T) { RestoreToTimestamp: time.Time{}, DryRun: false, Stats: fakeStats, - MysqlShutdownTimeout: mysqlShutdownTimeout, + MysqlShutdownTimeout: MysqlShutdownTimeout, } // Successful restore. @@ -587,24 +555,374 @@ func TestExecuteRestoreWithTimedOutContext(t *testing.T) { } } -// needInnoDBRedoLogSubdir indicates whether we need to create a redo log subdirectory. -// Starting with MySQL 8.0.30, the InnoDB redo logs are stored in a subdirectory of the -// (/. by default) called "#innodb_redo". See: -// -// https://dev.mysql.com/doc/refman/8.0/en/innodb-redo-log.html#innodb-modifying-redo-log-capacity -func needInnoDBRedoLogSubdir() (needIt bool, err error) { - mysqldVersionStr, err := mysqlctl.GetVersionString() - if err != nil { - return needIt, err +type rwCloseFailFirstCall struct { + *bytes.Buffer + firstDone bool +} + +func (w *rwCloseFailFirstCall) Write(p []byte) (n int, err error) { + if w.firstDone { + return w.Buffer.Write(p) } - _, sv, err := mysqlctl.ParseVersionString(mysqldVersionStr) - if err != nil { - return needIt, err + w.firstDone = true + return 0, errors.New("failing first write") +} + +func (w *rwCloseFailFirstCall) Read(p []byte) (n int, err error) { + if w.firstDone { + return w.Buffer.Read(p) } - versionStr := fmt.Sprintf("%d.%d.%d", sv.Major, sv.Minor, sv.Patch) - capableOf := mysql.ServerVersionCapableOf(versionStr) - if capableOf == nil { - return needIt, fmt.Errorf("cannot determine database flavor details for version %s", versionStr) + w.firstDone = true + return 0, errors.New("failing first read") +} + +func (w *rwCloseFailFirstCall) Close() error { + return nil +} + +func newWriteCloseFailFirstWrite(firstWriteDone bool) *rwCloseFailFirstCall { + return &rwCloseFailFirstCall{ + Buffer: bytes.NewBuffer(nil), + firstDone: firstWriteDone, } - return capableOf(capabilities.DynamicRedoLogCapacityFlavorCapability) +} + +func TestExecuteBackupFailToWriteEachFileOnlyOnce(t *testing.T) { + ctx := utils.LeakCheckContext(t) + backupRoot, keyspace, shard, ts := SetupCluster(ctx, t, 2, 2) + + bufferPerFiles := make(map[string]*rwCloseFailFirstCall) + be := &mysqlctl.BuiltinBackupEngine{} + bh := &mysqlctl.FakeBackupHandle{} + bh.AddFileReturnF = func(filename string) mysqlctl.FakeBackupHandleAddFileReturn { + // This mimics what happens with the other BackupHandles where doing AddFile will either truncate or override + // any existing data if the same filename already exists. + _, isRetry := bufferPerFiles[filename] + newBuffer := newWriteCloseFailFirstWrite(isRetry) + bufferPerFiles[filename] = newBuffer + return mysqlctl.FakeBackupHandleAddFileReturn{WriteCloser: newBuffer} + } + + // Spin up a fake daemon to be used in backups. It needs to be allowed to receive: + // "STOP REPLICA", "START REPLICA", in that order. + fakedb := fakesqldb.New(t) + defer fakedb.Close() + mysqld := mysqlctl.NewFakeMysqlDaemon(fakedb) + defer mysqld.Close() + mysqld.ExpectedExecuteSuperQueryList = []string{"STOP REPLICA", "START REPLICA"} + + logger := logutil.NewMemoryLogger() + ctx, cancel := context.WithCancel(ctx) + backupResult, err := be.ExecuteBackup(ctx, mysqlctl.BackupParams{ + Logger: logger, + Mysqld: mysqld, + Cnf: &mysqlctl.Mycnf{ + InnodbDataHomeDir: path.Join(backupRoot, "innodb"), + InnodbLogGroupHomeDir: path.Join(backupRoot, "log"), + DataDir: path.Join(backupRoot, "datadir"), + }, + Stats: backupstats.NewFakeStats(), + Concurrency: 1, + HookExtraEnv: map[string]string{}, + TopoServer: ts, + Keyspace: keyspace, + Shard: shard, + MysqlShutdownTimeout: MysqlShutdownTimeout, + }, bh) + cancel() + + expectedLogs := []string{ + "Backing up file: test1/0.ibd (attempt 1/2)", + "Backing up file: test1/1.ibd (attempt 1/2)", + "Backing up file: test2/0.ibd (attempt 1/2)", + "Backing up file: test2/1.ibd (attempt 1/2)", + + "Backing up file: test1/0.ibd (attempt 2/2)", + "Backing up file: test1/1.ibd (attempt 2/2)", + "Backing up file: test2/0.ibd (attempt 2/2)", + "Backing up file: test2/1.ibd (attempt 2/2)", + + "Backing up file MANIFEST (attempt 1/2)", + "Failed backing up MANIFEST (attempt 1/2)", + "Backing up file MANIFEST (attempt 2/2)", + "Completed backing up MANIFEST (attempt 2/2)", + } + + // Sleep just long enough for everything to complete. + // It's not flaky, the race detector detects a race when there isn't, + // the machine is just too slow to propagate the ctxCancel() to all goroutines. + time.Sleep(2 * time.Second) + AssertLogs(t, expectedLogs, logger) + + require.NoError(t, err) + require.Equal(t, mysqlctl.BackupUsable, backupResult) +} + +func TestExecuteBackupFailToWriteFileTwice(t *testing.T) { + ctx := utils.LeakCheckContext(t) + backupRoot, keyspace, shard, ts := SetupCluster(ctx, t, 1, 1) + + bufferPerFiles := make(map[string]*rwCloseFailFirstCall) + be := &mysqlctl.BuiltinBackupEngine{} + bh := &mysqlctl.FakeBackupHandle{} + bh.AddFileReturnF = func(filename string) mysqlctl.FakeBackupHandleAddFileReturn { + newBuffer := newWriteCloseFailFirstWrite(false) + bufferPerFiles[filename] = newBuffer + + return mysqlctl.FakeBackupHandleAddFileReturn{WriteCloser: newBuffer} + } + + // Spin up a fake daemon to be used in backups. It needs to be allowed to receive: + // "STOP REPLICA", "START REPLICA", in that order. + fakedb := fakesqldb.New(t) + defer fakedb.Close() + mysqld := mysqlctl.NewFakeMysqlDaemon(fakedb) + defer mysqld.Close() + mysqld.ExpectedExecuteSuperQueryList = []string{"STOP REPLICA", "START REPLICA"} + + logger := logutil.NewMemoryLogger() + fakeStats := backupstats.NewFakeStats() + ctx, cancel := context.WithCancel(ctx) + backupResult, err := be.ExecuteBackup(ctx, mysqlctl.BackupParams{ + Logger: logger, + Mysqld: mysqld, + Cnf: &mysqlctl.Mycnf{ + InnodbDataHomeDir: path.Join(backupRoot, "innodb"), + InnodbLogGroupHomeDir: path.Join(backupRoot, "log"), + DataDir: path.Join(backupRoot, "datadir"), + }, + Stats: fakeStats, + Concurrency: 1, + HookExtraEnv: map[string]string{}, + TopoServer: ts, + Keyspace: keyspace, + Shard: shard, + MysqlShutdownTimeout: MysqlShutdownTimeout, + }, bh) + cancel() + + // Sleep just long enough for everything to complete. + // It's not flaky, the race detector detects a race when there isn't, + // the machine is just too slow to propagate the ctxCancel() to all goroutines. + time.Sleep(2 * time.Second) + + expectedLogs := []string{ + "Backing up file: test1/0.ibd (attempt 1/2)", + "Backing up file: test1/0.ibd (attempt 2/2)", + } + AssertLogs(t, expectedLogs, logger) + + ss := GetStats(fakeStats) + require.Equal(t, 2, ss.DestinationCloseStats) + require.Equal(t, 2, ss.DestinationOpenStats) + require.Equal(t, 2, ss.DestinationWriteStats) + require.Equal(t, 2, ss.SourceCloseStats) + require.Equal(t, 2, ss.SourceOpenStats) + require.Equal(t, 2, ss.SourceReadStats) + require.ErrorContains(t, err, "failing first write") + require.Equal(t, mysqlctl.BackupUnusable, backupResult) +} + +func TestExecuteRestoreFailToReadEachFileOnlyOnce(t *testing.T) { + ctx := utils.LeakCheckContext(t) + backupRoot, keyspace, shard, ts := SetupCluster(ctx, t, 2, 2) + + be := &mysqlctl.BuiltinBackupEngine{} + bufferPerFiles := make(map[string]*rwCloseFailFirstCall) + bh := &mysqlctl.FakeBackupHandle{} + bh.AddFileReturnF = func(filename string) mysqlctl.FakeBackupHandleAddFileReturn { + // let's never make it fail for now + newBuffer := newWriteCloseFailFirstWrite(true) + bufferPerFiles[filename] = newBuffer + return mysqlctl.FakeBackupHandleAddFileReturn{WriteCloser: newBuffer} + } + + // Spin up a fake daemon to be used in backups. It needs to be allowed to receive: + // "STOP REPLICA", "START REPLICA", in that order. + fakedb := fakesqldb.New(t) + defer fakedb.Close() + mysqld := mysqlctl.NewFakeMysqlDaemon(fakedb) + defer mysqld.Close() + mysqld.ExpectedExecuteSuperQueryList = []string{"STOP REPLICA", "START REPLICA"} + + backupResult, err := be.ExecuteBackup(ctx, mysqlctl.BackupParams{ + Logger: logutil.NewConsoleLogger(), + Mysqld: mysqld, + Cnf: &mysqlctl.Mycnf{ + InnodbDataHomeDir: path.Join(backupRoot, "innodb"), + InnodbLogGroupHomeDir: path.Join(backupRoot, "log"), + DataDir: path.Join(backupRoot, "datadir"), + }, + Stats: backupstats.NewFakeStats(), + Concurrency: 1, + HookExtraEnv: map[string]string{}, + TopoServer: ts, + Keyspace: keyspace, + Shard: shard, + MysqlShutdownTimeout: MysqlShutdownTimeout, + }, bh) + + require.NoError(t, err) + require.Equal(t, mysqlctl.BackupUsable, backupResult) + + // let's mark each file in the buffer as if it is their first read + for key := range bufferPerFiles { + bufferPerFiles[key].firstDone = false + } + + // Now try to restore the above backup. + fakeBh := &mysqlctl.FakeBackupHandle{} + fakeBh.ReadFileReturnF = func(ctx context.Context, filename string) (io.ReadCloser, error) { + return bufferPerFiles[filename], nil + } + + fakedb = fakesqldb.New(t) + defer fakedb.Close() + mysqld = mysqlctl.NewFakeMysqlDaemon(fakedb) + defer mysqld.Close() + mysqld.ExpectedExecuteSuperQueryList = []string{"STOP REPLICA", "START REPLICA"} + + fakeStats := backupstats.NewFakeStats() + logger := logutil.NewMemoryLogger() + + restoreParams := mysqlctl.RestoreParams{ + Cnf: &mysqlctl.Mycnf{ + InnodbDataHomeDir: path.Join(backupRoot, "innodb"), + InnodbLogGroupHomeDir: path.Join(backupRoot, "log"), + DataDir: path.Join(backupRoot, "datadir"), + BinLogPath: path.Join(backupRoot, "binlog"), + RelayLogPath: path.Join(backupRoot, "relaylog"), + RelayLogIndexPath: path.Join(backupRoot, "relaylogindex"), + RelayLogInfoPath: path.Join(backupRoot, "relayloginfo"), + }, + Logger: logger, + Mysqld: mysqld, + Concurrency: 1, + HookExtraEnv: map[string]string{}, + DeleteBeforeRestore: false, + DbName: "test", + Keyspace: "test", + Shard: "-", + StartTime: time.Now(), + RestoreToPos: replication.Position{}, + RestoreToTimestamp: time.Time{}, + DryRun: false, + Stats: fakeStats, + MysqlShutdownTimeout: MysqlShutdownTimeout, + } + + // Successful restore. + bm, err := be.ExecuteRestore(ctx, restoreParams, fakeBh) + assert.NoError(t, err) + assert.NotNil(t, bm) + + ss := GetStats(fakeStats) + require.Equal(t, 8, ss.DestinationCloseStats) + require.Equal(t, 8, ss.DestinationOpenStats) + require.Equal(t, 4, ss.DestinationWriteStats) + require.Equal(t, 8, ss.SourceCloseStats) + require.Equal(t, 8, ss.SourceOpenStats) + require.Equal(t, 8, ss.SourceReadStats) +} + +func TestExecuteRestoreFailToReadEachFileTwice(t *testing.T) { + ctx := utils.LeakCheckContext(t) + backupRoot, keyspace, shard, ts := SetupCluster(ctx, t, 2, 2) + + be := &mysqlctl.BuiltinBackupEngine{} + bufferPerFiles := make(map[string]*rwCloseFailFirstCall) + bh := &mysqlctl.FakeBackupHandle{} + bh.AddFileReturnF = func(filename string) mysqlctl.FakeBackupHandleAddFileReturn { + // let's never make it fail for now + newBuffer := newWriteCloseFailFirstWrite(true) + bufferPerFiles[filename] = newBuffer + return mysqlctl.FakeBackupHandleAddFileReturn{WriteCloser: newBuffer} + } + + // Spin up a fake daemon to be used in backups. It needs to be allowed to receive: + // "STOP REPLICA", "START REPLICA", in that order. + fakedb := fakesqldb.New(t) + defer fakedb.Close() + mysqld := mysqlctl.NewFakeMysqlDaemon(fakedb) + defer mysqld.Close() + mysqld.ExpectedExecuteSuperQueryList = []string{"STOP REPLICA", "START REPLICA"} + + backupResult, err := be.ExecuteBackup(ctx, mysqlctl.BackupParams{ + Logger: logutil.NewConsoleLogger(), + Mysqld: mysqld, + Cnf: &mysqlctl.Mycnf{ + InnodbDataHomeDir: path.Join(backupRoot, "innodb"), + InnodbLogGroupHomeDir: path.Join(backupRoot, "log"), + DataDir: path.Join(backupRoot, "datadir"), + }, + Stats: backupstats.NewFakeStats(), + Concurrency: 1, + HookExtraEnv: map[string]string{}, + TopoServer: ts, + Keyspace: keyspace, + Shard: shard, + MysqlShutdownTimeout: MysqlShutdownTimeout, + }, bh) + + require.NoError(t, err) + require.Equal(t, mysqlctl.BackupUsable, backupResult) + + // Now try to restore the above backup. + fakeBh := &mysqlctl.FakeBackupHandle{} + fakeBh.ReadFileReturnF = func(ctx context.Context, filename string) (io.ReadCloser, error) { + // always make it fail, expect if it is the MANIFEST file, otherwise we won't start restoring the other files + buffer := bufferPerFiles[filename] + if filename != "MANIFEST" { + buffer.firstDone = false + } + return buffer, nil + } + + fakedb = fakesqldb.New(t) + defer fakedb.Close() + mysqld = mysqlctl.NewFakeMysqlDaemon(fakedb) + defer mysqld.Close() + mysqld.ExpectedExecuteSuperQueryList = []string{"STOP REPLICA", "START REPLICA"} + + fakeStats := backupstats.NewFakeStats() + logger := logutil.NewMemoryLogger() + + restoreParams := mysqlctl.RestoreParams{ + Cnf: &mysqlctl.Mycnf{ + InnodbDataHomeDir: path.Join(backupRoot, "innodb"), + InnodbLogGroupHomeDir: path.Join(backupRoot, "log"), + DataDir: path.Join(backupRoot, "datadir"), + BinLogPath: path.Join(backupRoot, "binlog"), + RelayLogPath: path.Join(backupRoot, "relaylog"), + RelayLogIndexPath: path.Join(backupRoot, "relaylogindex"), + RelayLogInfoPath: path.Join(backupRoot, "relayloginfo"), + }, + Logger: logger, + Mysqld: mysqld, + Concurrency: 1, + HookExtraEnv: map[string]string{}, + DeleteBeforeRestore: false, + DbName: "test", + Keyspace: "test", + Shard: "-", + StartTime: time.Now(), + RestoreToPos: replication.Position{}, + RestoreToTimestamp: time.Time{}, + DryRun: false, + Stats: fakeStats, + MysqlShutdownTimeout: MysqlShutdownTimeout, + } + + // Successful restore. + bm, err := be.ExecuteRestore(ctx, restoreParams, fakeBh) + assert.ErrorContains(t, err, "failing first read") + assert.Nil(t, bm) + + ss := GetStats(fakeStats) + require.Equal(t, 5, ss.DestinationCloseStats) + require.Equal(t, 5, ss.DestinationOpenStats) + require.Equal(t, 0, ss.DestinationWriteStats) + require.Equal(t, 5, ss.SourceCloseStats) + require.Equal(t, 5, ss.SourceOpenStats) + require.Equal(t, 5, ss.SourceReadStats) } diff --git a/go/vt/mysqlctl/blackbox/utils.go b/go/vt/mysqlctl/blackbox/utils.go new file mode 100644 index 00000000000..c7d34ae3cf6 --- /dev/null +++ b/go/vt/mysqlctl/blackbox/utils.go @@ -0,0 +1,196 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package blackbox + +import ( + "context" + "fmt" + "os" + "path" + "slices" + "strconv" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/capabilities" + "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/mysqlctl" + "vitess.io/vitess/go/vt/mysqlctl/backupstats" + "vitess.io/vitess/go/vt/mysqlctl/filebackupstorage" + logutilpb "vitess.io/vitess/go/vt/proto/logutil" + "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/proto/vttime" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/memorytopo" +) + +type StatSummary struct { + DestinationCloseStats int + DestinationOpenStats int + DestinationWriteStats int + SourceCloseStats int + SourceOpenStats int + SourceReadStats int +} + +func GetStats(stats *backupstats.FakeStats) StatSummary { + var ss StatSummary + + for _, sr := range stats.ScopeReturns { + switch sr.ScopeV[backupstats.ScopeOperation] { + case "Destination:Close": + ss.DestinationCloseStats += len(sr.TimedIncrementCalls) + case "Destination:Open": + ss.DestinationOpenStats += len(sr.TimedIncrementCalls) + case "Destination:Write": + if len(sr.TimedIncrementBytesCalls) > 0 { + ss.DestinationWriteStats++ + } + case "Source:Close": + ss.SourceCloseStats += len(sr.TimedIncrementCalls) + case "Source:Open": + ss.SourceOpenStats += len(sr.TimedIncrementCalls) + case "Source:Read": + if len(sr.TimedIncrementBytesCalls) > 0 { + ss.SourceReadStats++ + } + } + } + return ss +} + +func AssertLogs(t *testing.T, expectedLogs []string, logger *logutil.MemoryLogger) { + for _, log := range expectedLogs { + require.Truef(t, slices.ContainsFunc(logger.LogEvents(), func(event *logutilpb.Event) bool { + return event.GetValue() == log + }), "%s is missing from the logs", log) + } +} + +func SetupCluster(ctx context.Context, t *testing.T, dirs, filesPerDir int) (backupRoot string, keyspace string, shard string, ts *topo.Server) { + // Set up local backup directory + id := fmt.Sprintf("%d", time.Now().UnixNano()) + backupRoot = fmt.Sprintf("testdata/builtinbackup_test_%s", id) + filebackupstorage.FileBackupStorageRoot = backupRoot + require.NoError(t, createBackupDir(backupRoot, "innodb", "log", "datadir")) + dataDir := path.Join(backupRoot, "datadir") + // Add some files under data directory to force backup to execute semaphore acquire inside + // backupFiles() method (https://github.com/vitessio/vitess/blob/main/go/vt/mysqlctl/builtinbackupengine.go#L483). + for dirI := range dirs { + dirName := "test" + strconv.Itoa(dirI+1) + require.NoError(t, createBackupDir(dataDir, dirName)) + require.NoError(t, createBackupFiles(path.Join(dataDir, dirName), filesPerDir, "ibd")) + } + t.Cleanup(func() { + require.NoError(t, os.RemoveAll(backupRoot)) + }) + + needIt, err := NeedInnoDBRedoLogSubdir() + require.NoError(t, err) + if needIt { + fpath := path.Join("log", mysql.DynamicRedoLogSubdir) + if err := createBackupDir(backupRoot, fpath); err != nil { + require.Failf(t, err.Error(), "failed to create directory: %s", fpath) + } + } + + // Set up topo + keyspace, shard = "mykeyspace", "-" + ts = memorytopo.NewServer(ctx, "cell1") + t.Cleanup(func() { + ts.Close() + }) + + require.NoError(t, ts.CreateKeyspace(ctx, keyspace, &topodata.Keyspace{})) + require.NoError(t, ts.CreateShard(ctx, keyspace, shard)) + + tablet := topo.NewTablet(100, "cell1", "mykeyspace-00-80-0100") + tablet.Keyspace = keyspace + tablet.Shard = shard + + require.NoError(t, ts.CreateTablet(ctx, tablet)) + + _, err = ts.UpdateShardFields(ctx, keyspace, shard, func(si *topo.ShardInfo) error { + si.PrimaryAlias = &topodata.TabletAlias{Uid: 100, Cell: "cell1"} + + now := time.Now() + si.PrimaryTermStartTime = &vttime.Time{Seconds: int64(now.Second()), Nanoseconds: int32(now.Nanosecond())} + + return nil + }) + require.NoError(t, err) + return backupRoot, keyspace, shard, ts +} + +// NeedInnoDBRedoLogSubdir indicates whether we need to create a redo log subdirectory. +// Starting with MySQL 8.0.30, the InnoDB redo logs are stored in a subdirectory of the +// (/. by default) called "#innodb_redo". See: +// +// https://dev.mysql.com/doc/refman/8.0/en/innodb-redo-log.html#innodb-modifying-redo-log-capacity +func NeedInnoDBRedoLogSubdir() (needIt bool, err error) { + mysqldVersionStr, err := mysqlctl.GetVersionString() + if err != nil { + return needIt, err + } + _, sv, err := mysqlctl.ParseVersionString(mysqldVersionStr) + if err != nil { + return needIt, err + } + versionStr := fmt.Sprintf("%d.%d.%d", sv.Major, sv.Minor, sv.Patch) + capableOf := mysql.ServerVersionCapableOf(versionStr) + if capableOf == nil { + return needIt, fmt.Errorf("cannot determine database flavor details for version %s", versionStr) + } + return capableOf(capabilities.DynamicRedoLogCapacityFlavorCapability) +} + +const MysqlShutdownTimeout = 1 * time.Minute + +func SetBuiltinBackupMysqldDeadline(t time.Duration) time.Duration { + old := mysqlctl.BuiltinBackupMysqldTimeout + mysqlctl.BuiltinBackupMysqldTimeout = t + + return old +} + +func createBackupDir(root string, dirs ...string) error { + for _, dir := range dirs { + if err := os.MkdirAll(path.Join(root, dir), 0755); err != nil { + return err + } + } + + return nil +} + +func createBackupFiles(root string, fileCount int, ext string) error { + for i := 0; i < fileCount; i++ { + f, err := os.Create(path.Join(root, fmt.Sprintf("%d.%s", i, ext))) + if err != nil { + return err + } + if _, err := f.Write([]byte("hello, world!")); err != nil { + return err + } + defer f.Close() + } + + return nil +} diff --git a/go/vt/mysqlctl/builtinbackupengine.go b/go/vt/mysqlctl/builtinbackupengine.go index 5aa759f6f7a..2046a238400 100644 --- a/go/vt/mysqlctl/builtinbackupengine.go +++ b/go/vt/mysqlctl/builtinbackupengine.go @@ -29,18 +29,17 @@ import ( "os" "path" "path/filepath" - "sync" + "strconv" "sync/atomic" "time" "github.com/spf13/pflag" - "golang.org/x/sync/semaphore" + "golang.org/x/sync/errgroup" "vitess.io/vitess/go/ioutil" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/protoutil" - "vitess.io/vitess/go/vt/concurrency" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logutil" stats "vitess.io/vitess/go/vt/mysqlctl/backupstats" @@ -60,6 +59,8 @@ const ( builtinBackupEngineName = "builtin" AutoIncrementalFromPos = "auto" dataDictionaryFile = "mysql.ibd" + + maxRetriesPerFile = 1 ) var ( @@ -149,6 +150,13 @@ type FileEntry struct { // ParentPath is an optional prefix to the Base path. If empty, it is ignored. Useful // for writing files in a temporary directory ParentPath string + + // RetryCount specifies how many times we retried restoring/backing up this FileEntry. + // If we fail to restore/backup this FileEntry, we will retry up to maxRetriesPerFile times. + // Every time the builtin backup engine retries this file, we increment this field by 1. + // We don't care about adding this information to the MANIFEST and also to not cause any compatibility issue + // we are adding the - json tag to let Go know it can ignore the field. + RetryCount int `json:"-"` } func init() { @@ -585,6 +593,11 @@ func (be *BuiltinBackupEngine) backupFiles( mysqlVersion string, incrDetails *IncrementalBackupDetails, ) (finalErr error) { + // backupFiles always wait for AddFiles to finish its work before returning, unless there has been a + // non-recoverable error in the process, in both cases we can cancel the context safely. + ctx, cancel := context.WithCancel(ctx) + defer cancel() + // Get the files to backup. // We don't care about totalSize because we add each file separately. var fes []FileEntry @@ -599,43 +612,82 @@ func (be *BuiltinBackupEngine) backupFiles( } params.Logger.Infof("found %v files to backup", len(fes)) - // Backup with the provided concurrency. - sema := semaphore.NewWeighted(int64(params.Concurrency)) - wg := sync.WaitGroup{} + // The error here can be ignored safely. Failed FileEntry's are handled in the next 'if' statement. + _ = be.backupFileEntries(ctx, fes, bh, params) + // BackupHandle supports the BackupErrorRecorder interface for tracking errors + // across any goroutines that fan out to take the backup. This means that we + // don't need a local error recorder and can put everything through the bh. + // + // This handles the scenario where bh.AddFile() encounters an error asynchronously, + // which ordinarily would be lost in the context of `be.backupFile`, i.e. if an + // error were encountered + // [here](https://github.com/vitessio/vitess/blob/d26b6c7975b12a87364e471e2e2dfa4e253c2a5b/go/vt/mysqlctl/s3backupstorage/s3.go#L139-L142). + // + // All the errors are grouped per file, if one or more files failed, we back them up + // once more concurrently, if any of the retry fail, we fail-fast by canceling the context + // and return an error. There is no reason to continue processing the other retries, if + // one of them failed. + if files := bh.GetFailedFiles(); len(files) > 0 { + newFEs := make([]FileEntry, len(fes)) + for _, file := range files { + fileNb, err := strconv.Atoi(file) + if err != nil { + return vterrors.Wrapf(err, "failed to retry file '%s'", file) + } + oldFes := fes[fileNb] + newFEs[fileNb] = FileEntry{ + Base: oldFes.Base, + Name: oldFes.Name, + ParentPath: oldFes.ParentPath, + RetryCount: 1, + } + bh.ResetErrorForFile(file) + } + err = be.backupFileEntries(ctx, newFEs, bh, params) + if err != nil { + return err + } + } + + // Backup the MANIFEST file and apply retry logic. + var manifestErr error + for currentRetry := 0; currentRetry <= maxRetriesPerFile; currentRetry++ { + manifestErr = be.backupManifest(ctx, params, bh, backupPosition, purgedPosition, fromPosition, fromBackupName, serverUUID, mysqlVersion, incrDetails, fes, currentRetry) + if manifestErr == nil { + break + } + bh.ResetErrorForFile(backupManifestFileName) + } + if manifestErr != nil { + return manifestErr + } + return nil +} + +// backupFileEntries iterates over a slice of FileEntry, backing them up concurrently up to the defined concurrency limit. +// This function will ignore empty FileEntry, allowing the retry mechanism to send a partially empty slice, to not +// mess up the index of retriable FileEntry. +// This function does not leave any background operation behind itself, all calls to bh.AddFile will be finished or canceled. +func (be *BuiltinBackupEngine) backupFileEntries(ctx context.Context, fes []FileEntry, bh backupstorage.BackupHandle, params BackupParams) error { ctxCancel, cancel := context.WithCancel(ctx) defer func() { - // We may still have operations in flight that require a valid context, such as adding files to S3. - // Unless we encountered an error, we should not cancel the context, this is taken care of later - // in the process. If we encountered an error however, we can safely cancel the context as we should - // no longer work on anything and exit fast. - if finalErr != nil { - cancel() - } + // If we reached this defer in all cases we can cancel the context. + // The only ways to get here are: a panic, an error when ending the backup, a successful backup. + // For all three options, it is safe to cancel the context, there should be no pending operations + // that 1) haven't completed, 2) we care about anymore. + cancel() }() + g := errgroup.Group{} + g.SetLimit(params.Concurrency) for i := range fes { - wg.Add(1) - go func(i int) { - defer wg.Done() + if fes[i].Name == "" { + continue + } + g.Go(func() error { fe := &fes[i] - // Wait until we are ready to go, return if we encounter an error - acqErr := sema.Acquire(ctxCancel, 1) - if acqErr != nil { - log.Errorf("Unable to acquire semaphore needed to backup file: %s, err: %s", fe.Name, acqErr.Error()) - bh.RecordError(acqErr) - cancel() - return - } - defer sema.Release(1) - - // First check if we have any error, if we have, there is no point trying backing up this file. - // We check for errors before checking if the context is canceled on purpose, if there was an - // error, the context would have been canceled already. - if bh.HasErrors() { - params.Logger.Errorf("Failed to restore files due to error: %v", bh.Error()) - return - } + name := fmt.Sprintf("%v", i) // Check for context cancellation explicitly because, the way semaphore code is written, theoretically we might // end up not throwing an error even after cancellation. Please see https://cs.opensource.google/go/x/sync/+/refs/tags/v0.1.0:semaphore/semaphore.go;l=66, @@ -644,83 +696,30 @@ func (be *BuiltinBackupEngine) backupFiles( select { case <-ctxCancel.Done(): log.Errorf("Context canceled or timed out during %q backup", fe.Name) - bh.RecordError(vterrors.Errorf(vtrpc.Code_CANCELED, "context canceled")) - return + bh.RecordError(name, vterrors.Errorf(vtrpc.Code_CANCELED, "context canceled")) + return nil default: } // Backup the individual file. - name := fmt.Sprintf("%v", i) - if err := be.backupFile(ctxCancel, params, bh, fe, name); err != nil { - bh.RecordError(err) - cancel() + var errBackupFile error + if errBackupFile = be.backupFile(ctxCancel, params, bh, fe, name); errBackupFile != nil { + bh.RecordError(name, vterrors.Wrapf(errBackupFile, "failed to backup file '%s'", name)) + if fe.RetryCount >= maxRetriesPerFile { + // this is the last attempt, and we have an error, we can cancel everything and fail fast. + cancel() + } } - }(i) + return nil + }) } + _ = g.Wait() - wg.Wait() - - // BackupHandle supports the ErrorRecorder interface for tracking errors - // across any goroutines that fan out to take the backup. This means that we - // don't need a local error recorder and can put everything through the bh. - // - // This handles the scenario where bh.AddFile() encounters an error asynchronously, - // which ordinarily would be lost in the context of `be.backupFile`, i.e. if an - // error were encountered - // [here](https://github.com/vitessio/vitess/blob/d26b6c7975b12a87364e471e2e2dfa4e253c2a5b/go/vt/mysqlctl/s3backupstorage/s3.go#L139-L142). - if bh.HasErrors() { - return bh.Error() - } - - // open the MANIFEST - wc, err := bh.AddFile(ctx, backupManifestFileName, backupstorage.FileSizeUnknown) + err := bh.EndBackup(ctx) if err != nil { - return vterrors.Wrapf(err, "cannot add %v to backup", backupManifestFileName) - } - defer func() { - closeErr := wc.Close() - if finalErr == nil { - finalErr = closeErr - } - }() - - // JSON-encode and write the MANIFEST - bm := &builtinBackupManifest{ - // Common base fields - BackupManifest: BackupManifest{ - BackupName: bh.Name(), - BackupMethod: builtinBackupEngineName, - Position: backupPosition, - PurgedPosition: purgedPosition, - FromPosition: fromPosition, - FromBackup: fromBackupName, - Incremental: !fromPosition.IsZero(), - ServerUUID: serverUUID, - TabletAlias: params.TabletAlias, - Keyspace: params.Keyspace, - Shard: params.Shard, - BackupTime: params.BackupTime.UTC().Format(time.RFC3339), - FinishedTime: time.Now().UTC().Format(time.RFC3339), - MySQLVersion: mysqlVersion, - UpgradeSafe: params.UpgradeSafe, - IncrementalDetails: incrDetails, - }, - - // Builtin-specific fields - FileEntries: fes, - SkipCompress: !backupStorageCompress, - CompressionEngine: CompressionEngineName, - ExternalDecompressor: ManifestExternalDecompressorCmd, - } - data, err := json.MarshalIndent(bm, "", " ") - if err != nil { - return vterrors.Wrapf(err, "cannot JSON encode %v", backupManifestFileName) - } - if _, err := wc.Write([]byte(data)); err != nil { - return vterrors.Wrapf(err, "cannot write %v", backupManifestFileName) + return err } - - return nil + return bh.Error() } type backupPipe struct { @@ -733,6 +732,7 @@ type backupPipe struct { crc32 hash.Hash32 nn int64 done chan struct{} + failed chan struct{} closed int32 } @@ -743,6 +743,7 @@ func newBackupWriter(filename string, writerBufferSize int, maxSize int64, w io. filename: filename, maxSize: maxSize, done: make(chan struct{}), + failed: make(chan struct{}), } } @@ -752,10 +753,16 @@ func newBackupReader(filename string, maxSize int64, r io.Reader) *backupPipe { r: r, filename: filename, done: make(chan struct{}), + failed: make(chan struct{}), maxSize: maxSize, } } +func retryToString(retry int) string { + // We convert the retry number to an attempt number, increasing retry by one, so it looks more human friendly + return fmt.Sprintf("(attempt %d/%d)", retry+1, maxRetriesPerFile+1) +} + func (bp *backupPipe) Read(p []byte) (int, error) { nn, err := bp.r.Read(p) _, _ = bp.crc32.Write(p[:nn]) @@ -770,9 +777,17 @@ func (bp *backupPipe) Write(p []byte) (int, error) { return nn, err } -func (bp *backupPipe) Close() error { +func (bp *backupPipe) Close(isDone bool) (err error) { if atomic.CompareAndSwapInt32(&bp.closed, 0, 1) { - close(bp.done) + // If we fail to Flush the writer we must report this backup as a failure. + defer func() { + if isDone && err == nil { + close(bp.done) + return + } + close(bp.failed) + }() + if bp.w != nil { if err := bp.w.Flush(); err != nil { return err @@ -786,28 +801,31 @@ func (bp *backupPipe) HashString() string { return hex.EncodeToString(bp.crc32.Sum(nil)) } -func (bp *backupPipe) ReportProgress(ctx context.Context, period time.Duration, logger logutil.Logger, restore bool) { - messageStr := "restoring " +func (bp *backupPipe) ReportProgress(ctx context.Context, period time.Duration, logger logutil.Logger, restore bool, retryStr string) { + messageStr := "restoring" if !restore { - messageStr = "backing up " + messageStr = "backing up" } tick := time.NewTicker(period) defer tick.Stop() for { select { case <-ctx.Done(): - logger.Infof("Canceled %s of %q file", messageStr, bp.filename) + logger.Infof("Canceled %s of %q file %s", messageStr, bp.filename, retryStr) return case <-bp.done: - logger.Infof("Completed %s %q", messageStr, bp.filename) + logger.Infof("Completed %s %q %s", messageStr, bp.filename, retryStr) + return + case <-bp.failed: + logger.Infof("Failed %s %q %s", messageStr, bp.filename, retryStr) return case <-tick.C: written := float64(atomic.LoadInt64(&bp.nn)) if bp.maxSize == 0 { - logger.Infof("%s %q: %.02fkb", messageStr, bp.filename, written/1024.0) + logger.Infof("%s %q %s: %.02fkb", messageStr, bp.filename, retryStr, written/1024.0) } else { maxSize := float64(bp.maxSize) - logger.Infof("%s %q: %.02f%% (%.02f/%.02fkb)", messageStr, bp.filename, 100.0*written/maxSize, written/1024.0, maxSize/1024.0) + logger.Infof("%s %q %s: %.02f%% (%.02f/%.02fkb)", messageStr, bp.filename, retryStr, 100.0*written/maxSize, written/1024.0, maxSize/1024.0) } } } @@ -815,12 +833,15 @@ func (bp *backupPipe) ReportProgress(ctx context.Context, period time.Duration, // backupFile backs up an individual file. func (be *BuiltinBackupEngine) backupFile(ctx context.Context, params BackupParams, bh backupstorage.BackupHandle, fe *FileEntry, name string) (finalErr error) { - ctx, cancel := context.WithCancel(ctx) - defer func() { - if finalErr != nil { - cancel() - } - }() + // We need another context that does not live outside of this function. + // Reporting progress, compressing and writing are operations that will be + // over by the time we exit this function, they can use this cancelable context. + // However, AddFile is something that may continue in the background even after + // this function exits. In this case, we give it the parent context so the caller + // has more control over when to cancel AddFile. + cancelableCtx, cancel := context.WithCancel(ctx) + defer cancel() + // Open the source file for reading. openSourceAt := time.Now() source, err := fe.open(params.Cnf, true) @@ -843,11 +864,12 @@ func (be *BuiltinBackupEngine) backupFile(ctx context.Context, params BackupPara return err } + retryStr := retryToString(fe.RetryCount) br := newBackupReader(fe.Name, fi.Size(), timedSource) - go br.ReportProgress(ctx, builtinBackupProgress, params.Logger, false /*restore*/) + go br.ReportProgress(cancelableCtx, builtinBackupProgress, params.Logger, false /*restore*/, retryStr) // Open the destination file for writing, and a buffer. - params.Logger.Infof("Backing up file: %v", fe.Name) + params.Logger.Infof("Backing up file: %v %s", fe.Name, retryStr) openDestAt := time.Now() dest, err := bh.AddFile(ctx, name, fi.Size()) if err != nil { @@ -879,19 +901,20 @@ func (be *BuiltinBackupEngine) backupFile(ctx context.Context, params BackupPara defer func() { // Close the backupPipe to finish writing on destination. - if err := bw.Close(); err != nil { + if err := bw.Close(createAndCopyErr == nil); err != nil { createAndCopyErr = errors.Join(createAndCopyErr, vterrors.Wrapf(err, "cannot flush destination: %v", name)) } - if err := br.Close(); err != nil { + if err := br.Close(createAndCopyErr == nil); err != nil { createAndCopyErr = errors.Join(createAndCopyErr, vterrors.Wrap(err, "failed to close the source reader")) } + }() // Create the gzip compression pipe, if necessary. if backupStorageCompress { var compressor io.WriteCloser if ExternalCompressorCmd != "" { - compressor, err = newExternalCompressor(ctx, ExternalCompressorCmd, writer, params.Logger) + compressor, err = newExternalCompressor(cancelableCtx, ExternalCompressorCmd, writer, params.Logger) } else { compressor, err = newBuiltinCompressor(CompressionEngineName, writer, params.Logger) } @@ -902,13 +925,13 @@ func (be *BuiltinBackupEngine) backupFile(ctx context.Context, params BackupPara compressStats := params.Stats.Scope(stats.Operation("Compressor:Write")) writer = ioutil.NewMeteredWriter(compressor, compressStats.TimedIncrementBytes) - closer := ioutil.NewTimeoutCloser(ctx, compressor, closeTimeout) + closer := ioutil.NewTimeoutCloser(cancelableCtx, compressor, closeTimeout) defer func() { // Close gzip to flush it, after that all data is sent to writer. closeCompressorAt := time.Now() - params.Logger.Infof("closing compressor") + params.Logger.Infof("Closing compressor for file: %s %s", fe.Name, retryStr) if cerr := closer.Close(); err != nil { - cerr = vterrors.Wrapf(cerr, "failed to close compressor %v", name) + cerr = vterrors.Wrapf(cerr, "failed to close compressor %v", fe.Name) params.Logger.Error(cerr) createAndCopyErr = errors.Join(createAndCopyErr, cerr) } @@ -938,6 +961,94 @@ func (be *BuiltinBackupEngine) backupFile(ctx context.Context, params BackupPara return nil } +func (be *BuiltinBackupEngine) backupManifest( + ctx context.Context, + params BackupParams, + bh backupstorage.BackupHandle, + backupPosition replication.Position, + purgedPosition replication.Position, + fromPosition replication.Position, + fromBackupName string, + serverUUID string, + mysqlVersion string, + incrDetails *IncrementalBackupDetails, + fes []FileEntry, + currentAttempt int, +) (finalErr error) { + retryStr := retryToString(currentAttempt) + params.Logger.Infof("Backing up file %s %s", backupManifestFileName, retryStr) + defer func() { + state := "Completed" + if finalErr != nil { + state = "Failed" + } + params.Logger.Infof("%s backing up %s %s", state, backupManifestFileName, retryStr) + }() + + // Creating this function allows us to ensure we always close the writer no matter what, + // and in case of success that we close it before calling bh.EndBackup. + addAndWrite := func() (addAndWriteError error) { + // open the MANIFEST + wc, err := bh.AddFile(ctx, backupManifestFileName, backupstorage.FileSizeUnknown) + if err != nil { + return vterrors.Wrapf(err, "cannot add %v to backup %s", backupManifestFileName, retryStr) + } + defer func() { + if err := wc.Close(); err != nil { + addAndWriteError = errors.Join(addAndWriteError, vterrors.Wrapf(err, "cannot close backup: %v", backupManifestFileName)) + } + }() + + // JSON-encode and write the MANIFEST + bm := &builtinBackupManifest{ + // Common base fields + BackupManifest: BackupManifest{ + BackupName: bh.Name(), + BackupMethod: builtinBackupEngineName, + Position: backupPosition, + PurgedPosition: purgedPosition, + FromPosition: fromPosition, + FromBackup: fromBackupName, + Incremental: !fromPosition.IsZero(), + ServerUUID: serverUUID, + TabletAlias: params.TabletAlias, + Keyspace: params.Keyspace, + Shard: params.Shard, + BackupTime: params.BackupTime.UTC().Format(time.RFC3339), + FinishedTime: time.Now().UTC().Format(time.RFC3339), + MySQLVersion: mysqlVersion, + UpgradeSafe: params.UpgradeSafe, + IncrementalDetails: incrDetails, + }, + + // Builtin-specific fields + FileEntries: fes, + SkipCompress: !backupStorageCompress, + CompressionEngine: CompressionEngineName, + ExternalDecompressor: ManifestExternalDecompressorCmd, + } + data, err := json.MarshalIndent(bm, "", " ") + if err != nil { + return vterrors.Wrapf(err, "cannot JSON encode %v %s", backupManifestFileName, retryStr) + } + if _, err := wc.Write(data); err != nil { + return vterrors.Wrapf(err, "cannot write %v %s", backupManifestFileName, retryStr) + } + return nil + } + + err := addAndWrite() + if err != nil { + return err + } + + err = bh.EndBackup(ctx) + if err != nil { + return err + } + return bh.Error() +} + // executeRestoreFullBackup restores the files from a full backup. The underlying mysql database service is expected to be stopped. func (be *BuiltinBackupEngine) executeRestoreFullBackup(ctx context.Context, params RestoreParams, bh backupstorage.BackupHandle, bm builtinBackupManifest) error { if err := prepareToRestore(ctx, params.Cnf, params.Mysqld, params.Logger, params.MysqlShutdownTimeout); err != nil { @@ -997,8 +1108,8 @@ func (be *BuiltinBackupEngine) executeRestoreIncrementalBackup(ctx context.Conte // we return the position from which replication should start // otherwise an error is returned func (be *BuiltinBackupEngine) ExecuteRestore(ctx context.Context, params RestoreParams, bh backupstorage.BackupHandle) (*BackupManifest, error) { - var bm builtinBackupManifest - if err := getBackupManifestInto(ctx, bh, &bm); err != nil { + bm, err := be.restoreManifest(ctx, params, bh) + if err != nil { return nil, err } @@ -1007,7 +1118,6 @@ func (be *BuiltinBackupEngine) ExecuteRestore(ctx context.Context, params Restor return nil, err } - var err error if bm.Incremental { err = be.executeRestoreIncrementalBackup(ctx, params, bh, bm) } else { @@ -1020,6 +1130,26 @@ func (be *BuiltinBackupEngine) ExecuteRestore(ctx context.Context, params Restor return &bm.BackupManifest, nil } +func (be *BuiltinBackupEngine) restoreManifest(ctx context.Context, params RestoreParams, bh backupstorage.BackupHandle) (bm builtinBackupManifest, finalErr error) { + var retryCount int + defer func() { + state := "Completed" + if finalErr != nil { + state = "Failed" + } + params.Logger.Infof("%s restoring %s %s", state, backupManifestFileName, retryToString(retryCount)) + }() + + for ; retryCount <= maxRetriesPerFile; retryCount++ { + params.Logger.Infof("Restoring file %s %s", backupManifestFileName, retryToString(retryCount)) + if finalErr = getBackupManifestInto(ctx, bh, &bm); finalErr == nil { + break + } + params.Logger.Infof("Failed restoring %s %s", backupManifestFileName, retryToString(retryCount)) + } + return +} + // restoreFiles will copy all the files from the BackupStorage to the // right place. func (be *BuiltinBackupEngine) restoreFiles(ctx context.Context, params RestoreParams, bh backupstorage.BackupHandle, bm builtinBackupManifest) (createdDir string, err error) { @@ -1040,75 +1170,79 @@ func (be *BuiltinBackupEngine) restoreFiles(ctx context.Context, params RestoreP } } fes := bm.FileEntries - sema := semaphore.NewWeighted(int64(params.Concurrency)) - rec := concurrency.AllErrorRecorder{} - wg := sync.WaitGroup{} - - ctxCancel, cancel := context.WithCancel(ctx) - defer func() { - // We may still have operations in flight that require a valid context, such as adding files to S3. - // Unless we encountered an error, we should not cancel the context. This is taken care of later - // in the process. If we encountered an error however, we can safely cancel the context as we should - // no longer work on anything and exit fast. + _ = be.restoreFileEntries(ctx, fes, bh, bm, params, createdDir) + if files := bh.GetFailedFiles(); len(files) > 0 { + newFEs := make([]FileEntry, len(fes)) + for _, file := range files { + fileNb, err := strconv.Atoi(file) + if err != nil { + return "", vterrors.Wrapf(err, "failed to retry file '%s'", file) + } + oldFes := fes[fileNb] + newFEs[fileNb] = FileEntry{ + Base: oldFes.Base, + Name: oldFes.Name, + ParentPath: oldFes.ParentPath, + Hash: oldFes.Hash, + RetryCount: 1, + } + bh.ResetErrorForFile(file) + } + err = be.restoreFileEntries(ctx, newFEs, bh, bm, params, createdDir) if err != nil { - cancel() + return "", err } - }() + } + return createdDir, nil +} + +func (be *BuiltinBackupEngine) restoreFileEntries(ctx context.Context, fes []FileEntry, bh backupstorage.BackupHandle, bm builtinBackupManifest, params RestoreParams, createdDir string) error { + g, ctx := errgroup.WithContext(ctx) + g.SetLimit(params.Concurrency) for i := range fes { - wg.Add(1) - go func(i int) { - defer wg.Done() + if fes[i].Name == "" { + continue + } + g.Go(func() error { fe := &fes[i] - // Wait until we are ready to go, return if we encounter an error - acqErr := sema.Acquire(ctxCancel, 1) - if acqErr != nil { - log.Errorf("Unable to acquire semaphore needed to restore file: %s, err: %s", fe.Name, acqErr.Error()) - rec.RecordError(acqErr) - cancel() - return - } - defer sema.Release(1) - - // First check if we have any error, if we have, there is no point trying to restore this file. - // We check for errors before checking if the context is canceled on purpose, if there was an - // error, the context would have been canceled already. - if rec.HasErrors() { - params.Logger.Errorf("Failed to restore files due to error: %v", bh.Error()) - return - } - + name := fmt.Sprintf("%v", i) // Check for context cancellation explicitly because, the way semaphore code is written, theoretically we might // end up not throwing an error even after cancellation. Please see https://cs.opensource.google/go/x/sync/+/refs/tags/v0.1.0:semaphore/semaphore.go;l=66, // which suggests that if the context is already done, `Acquire()` may still succeed without blocking. This introduces // unpredictability in my test cases, so in order to avoid that, I am adding this cancellation check. select { - case <-ctxCancel.Done(): + case <-ctx.Done(): log.Errorf("Context canceled or timed out during %q restore", fe.Name) - rec.RecordError(vterrors.Errorf(vtrpc.Code_CANCELED, "context canceled")) - return + bh.RecordError(name, vterrors.Errorf(vtrpc.Code_CANCELED, "context canceled")) + return nil default: } fe.ParentPath = createdDir + // And restore the file. - name := fmt.Sprintf("%v", i) - params.Logger.Infof("Copying file %v: %v", name, fe.Name) - err := be.restoreFile(ctxCancel, params, bh, fe, bm, name) - if err != nil { - rec.RecordError(vterrors.Wrapf(err, "can't restore file %v to %v", name, fe.Name)) - cancel() + params.Logger.Infof("Copying file %v: %v %s", name, fe.Name, retryToString(fe.RetryCount)) + if errRestore := be.restoreFile(ctx, params, bh, fe, bm, name); errRestore != nil { + bh.RecordError(name, vterrors.Wrapf(errRestore, "failed to restore file %v to %v", name, fe.Name)) + if fe.RetryCount >= maxRetriesPerFile { + // this is the last attempt, and we have an error, we can return an error, which will let errgroup + // know it can cancel the context + return errRestore + } } - }(i) + return nil + }) } - wg.Wait() - return createdDir, rec.Error() + _ = g.Wait() + return bh.Error() } // restoreFile restores an individual file. func (be *BuiltinBackupEngine) restoreFile(ctx context.Context, params RestoreParams, bh backupstorage.BackupHandle, fe *FileEntry, bm builtinBackupManifest, name string) (finalErr error) { ctx, cancel := context.WithCancel(ctx) defer cancel() + // Open the source file for reading. openSourceAt := time.Now() source, err := bh.ReadFile(ctx, name) @@ -1126,8 +1260,15 @@ func (be *BuiltinBackupEngine) restoreFile(ctx context.Context, params RestorePa params.Stats.Scope(stats.Operation("Source:Close")).TimedIncrement(time.Since(closeSourceAt)) }() - br := newBackupReader(name, 0, timedSource) - go br.ReportProgress(ctx, builtinBackupProgress, params.Logger, true) + // Create the backup/source reader and start reporting progress + retryStr := retryToString(fe.RetryCount) + br := newBackupReader(fe.Name, 0, timedSource) + go br.ReportProgress(ctx, builtinBackupProgress, params.Logger, true, retryStr) + defer func() { + if err := br.Close(finalErr == nil); err != nil { + finalErr = vterrors.Wrap(finalErr, "failed to close the source reader") + } + }() var reader io.Reader = br // Open the destination file for writing. @@ -1213,10 +1354,6 @@ func (be *BuiltinBackupEngine) restoreFile(ctx context.Context, params RestorePa return vterrors.Wrap(err, "failed to flush destination buffer") } - if err := br.Close(); err != nil { - return vterrors.Wrap(err, "failed to close the source reader") - } - return nil } diff --git a/go/vt/mysqlctl/cephbackupstorage/ceph.go b/go/vt/mysqlctl/cephbackupstorage/ceph.go index f8e33dbe641..62330b869f0 100644 --- a/go/vt/mysqlctl/cephbackupstorage/ceph.go +++ b/go/vt/mysqlctl/cephbackupstorage/ceph.go @@ -32,9 +32,10 @@ import ( minio "github.com/minio/minio-go" "github.com/spf13/pflag" - "vitess.io/vitess/go/vt/concurrency" - "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/mysqlctl/backupstorage" + + "vitess.io/vitess/go/vt/log" + errorsbackup "vitess.io/vitess/go/vt/mysqlctl/errors" "vitess.io/vitess/go/vt/servenv" ) @@ -69,23 +70,8 @@ type CephBackupHandle struct { dir string name string readOnly bool - errors concurrency.AllErrorRecorder waitGroup sync.WaitGroup -} - -// RecordError is part of the concurrency.ErrorRecorder interface. -func (bh *CephBackupHandle) RecordError(err error) { - bh.errors.RecordError(err) -} - -// HasErrors is part of the concurrency.ErrorRecorder interface. -func (bh *CephBackupHandle) HasErrors() bool { - return bh.errors.HasErrors() -} - -// Error is part of the concurrency.ErrorRecorder interface. -func (bh *CephBackupHandle) Error() error { - return bh.errors.Error() + errorsbackup.PerFileErrorRecorder } // Directory implements BackupHandle. @@ -109,7 +95,7 @@ func (bh *CephBackupHandle) AddFile(ctx context.Context, filename string, filesi defer bh.waitGroup.Done() // ceph bucket name is where the backups will go - //backup handle dir field contains keyspace/shard value + // backup handle dir field contains keyspace/shard value bucket := alterBucketName(bh.dir) // Give PutObject() the read end of the pipe. @@ -120,7 +106,7 @@ func (bh *CephBackupHandle) AddFile(ctx context.Context, filename string, filesi // Signal the writer that an error occurred, in case it's not done writing yet. reader.CloseWithError(err) // In case the error happened after the writer finished, we need to remember it. - bh.RecordError(err) + bh.RecordError(filename, err) } }() // Give our caller the write end of the pipe. diff --git a/go/vt/mysqlctl/errors/errors.go b/go/vt/mysqlctl/errors/errors.go new file mode 100644 index 00000000000..02485901e50 --- /dev/null +++ b/go/vt/mysqlctl/errors/errors.go @@ -0,0 +1,106 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package errors + +import ( + "errors" + "strings" + "sync" +) + +type BackupErrorRecorder interface { + RecordError(string, error) + HasErrors() bool + Error() error + GetFailedFiles() []string + ResetErrorForFile(string) +} + +// PerFileErrorRecorder records errors and group them by filename. +// This is particularly useful when processing several files at the same time +// and wanting to know which files failed. +type PerFileErrorRecorder struct { + mu sync.Mutex + errors map[string][]error +} + +// RecordError records a possible error: +// - does nothing if err is nil +func (pfer *PerFileErrorRecorder) RecordError(filename string, err error) { + if err == nil { + return + } + + pfer.mu.Lock() + defer pfer.mu.Unlock() + + if pfer.errors == nil { + pfer.errors = make(map[string][]error, 1) + } + pfer.errors[filename] = append(pfer.errors[filename], err) +} + +// HasErrors returns true if we ever recorded an error +func (pfer *PerFileErrorRecorder) HasErrors() bool { + pfer.mu.Lock() + defer pfer.mu.Unlock() + return len(pfer.errors) > 0 +} + +// Error returns all the errors that were recorded +func (pfer *PerFileErrorRecorder) Error() error { + pfer.mu.Lock() + defer pfer.mu.Unlock() + if pfer.errors == nil { + return nil + } + + var errs []string + for _, fileErrs := range pfer.errors { + for _, err := range fileErrs { + errs = append(errs, err.Error()) + } + } + if len(errs) == 0 { + return nil + } + return errors.New(strings.Join(errs, "; ")) +} + +// GetFailedFiles returns a slice of filenames, each of this file have at least 1 error. +func (pfer *PerFileErrorRecorder) GetFailedFiles() []string { + pfer.mu.Lock() + defer pfer.mu.Unlock() + if pfer.errors == nil { + return nil + } + files := make([]string, 0, len(pfer.errors)) + for filename := range pfer.errors { + files = append(files, filename) + } + return files +} + +// ResetErrorForFile removes all the errors of a given file. +func (pfer *PerFileErrorRecorder) ResetErrorForFile(filename string) { + pfer.mu.Lock() + defer pfer.mu.Unlock() + if pfer.errors == nil { + return + } + delete(pfer.errors, filename) +} diff --git a/go/vt/mysqlctl/fakebackupstorage.go b/go/vt/mysqlctl/fakebackupstorage.go index 75587191157..582b422cf58 100644 --- a/go/vt/mysqlctl/fakebackupstorage.go +++ b/go/vt/mysqlctl/fakebackupstorage.go @@ -21,20 +21,21 @@ import ( "fmt" "io" - "vitess.io/vitess/go/vt/concurrency" "vitess.io/vitess/go/vt/mysqlctl/backupstorage" + "vitess.io/vitess/go/vt/mysqlctl/errors" ) type FakeBackupHandle struct { Dir string NameV string ReadOnly bool - Errors concurrency.AllErrorRecorder + errors.PerFileErrorRecorder AbortBackupCalls []context.Context AbortBackupReturn error AddFileCalls []FakeBackupHandleAddFileCall AddFileReturn FakeBackupHandleAddFileReturn + AddFileReturnF func(filename string) FakeBackupHandleAddFileReturn EndBackupCalls []context.Context EndBackupReturn error ReadFileCalls []FakeBackupHandleReadFileCall @@ -57,18 +58,6 @@ type FakeBackupHandleReadFileCall struct { Filename string } -func (fbh *FakeBackupHandle) RecordError(err error) { - fbh.Errors.RecordError(err) -} - -func (fbh *FakeBackupHandle) HasErrors() bool { - return fbh.Errors.HasErrors() -} - -func (fbh *FakeBackupHandle) Error() error { - return fbh.Errors.Error() -} - func (fbh *FakeBackupHandle) Directory() string { return fbh.Dir } @@ -79,6 +68,11 @@ func (fbh *FakeBackupHandle) Name() string { func (fbh *FakeBackupHandle) AddFile(ctx context.Context, filename string, filesize int64) (io.WriteCloser, error) { fbh.AddFileCalls = append(fbh.AddFileCalls, FakeBackupHandleAddFileCall{ctx, filename, filesize}) + + if fbh.AddFileReturnF != nil { + r := fbh.AddFileReturnF(filename) + return r.WriteCloser, r.Err + } return fbh.AddFileReturn.WriteCloser, fbh.AddFileReturn.Err } diff --git a/go/vt/mysqlctl/filebackupstorage/file.go b/go/vt/mysqlctl/filebackupstorage/file.go index 99148d9169b..bd73c55e70c 100644 --- a/go/vt/mysqlctl/filebackupstorage/file.go +++ b/go/vt/mysqlctl/filebackupstorage/file.go @@ -27,8 +27,9 @@ import ( "github.com/spf13/pflag" + "vitess.io/vitess/go/vt/mysqlctl/errors" + "vitess.io/vitess/go/ioutil" - "vitess.io/vitess/go/vt/concurrency" stats "vitess.io/vitess/go/vt/mysqlctl/backupstats" "vitess.io/vitess/go/vt/mysqlctl/backupstorage" "vitess.io/vitess/go/vt/servenv" @@ -59,7 +60,7 @@ type FileBackupHandle struct { dir string name string readOnly bool - errors concurrency.AllErrorRecorder + errors.PerFileErrorRecorder } func NewBackupHandle( @@ -79,21 +80,6 @@ func NewBackupHandle( } } -// RecordError is part of the concurrency.ErrorRecorder interface. -func (fbh *FileBackupHandle) RecordError(err error) { - fbh.errors.RecordError(err) -} - -// HasErrors is part of the concurrency.ErrorRecorder interface. -func (fbh *FileBackupHandle) HasErrors() bool { - return fbh.errors.HasErrors() -} - -// Error is part of the concurrency.ErrorRecorder interface. -func (fbh *FileBackupHandle) Error() error { - return fbh.errors.Error() -} - // Directory is part of the BackupHandle interface func (fbh *FileBackupHandle) Directory() string { return fbh.dir diff --git a/go/vt/mysqlctl/gcsbackupstorage/gcs.go b/go/vt/mysqlctl/gcsbackupstorage/gcs.go index 814395a225a..adecbb9bbba 100644 --- a/go/vt/mysqlctl/gcsbackupstorage/gcs.go +++ b/go/vt/mysqlctl/gcsbackupstorage/gcs.go @@ -32,8 +32,9 @@ import ( "google.golang.org/api/iterator" "google.golang.org/api/option" + "vitess.io/vitess/go/vt/mysqlctl/errors" + "vitess.io/vitess/go/trace" - "vitess.io/vitess/go/vt/concurrency" "vitess.io/vitess/go/vt/mysqlctl/backupstorage" "vitess.io/vitess/go/vt/servenv" ) @@ -65,22 +66,7 @@ type GCSBackupHandle struct { dir string name string readOnly bool - errors concurrency.AllErrorRecorder -} - -// RecordError is part of the concurrency.ErrorRecorder interface. -func (bh *GCSBackupHandle) RecordError(err error) { - bh.errors.RecordError(err) -} - -// HasErrors is part of the concurrency.ErrorRecorder interface. -func (bh *GCSBackupHandle) HasErrors() bool { - return bh.errors.HasErrors() -} - -// Error is part of the concurrency.ErrorRecorder interface. -func (bh *GCSBackupHandle) Error() error { - return bh.errors.Error() + errors.PerFileErrorRecorder } // Directory implements BackupHandle. diff --git a/go/vt/mysqlctl/mysqlshellbackupengine.go b/go/vt/mysqlctl/mysqlshellbackupengine.go index b7405ce7eaa..681e62d10cd 100644 --- a/go/vt/mysqlctl/mysqlshellbackupengine.go +++ b/go/vt/mysqlctl/mysqlshellbackupengine.go @@ -56,8 +56,6 @@ var ( // disable redo logging and double write buffer mysqlShellSpeedUpRestore = false - mysqlShellBackupBinaryName = "mysqlsh" - // use when checking if we need to create the directory on the local filesystem or not. knownObjectStoreParams = []string{"s3BucketName", "osBucketName", "azureContainerName"} @@ -87,7 +85,9 @@ type MySQLShellBackupManifest struct { } func init() { - BackupRestoreEngineMap[mysqlShellBackupEngineName] = &MySQLShellBackupEngine{} + BackupRestoreEngineMap[mysqlShellBackupEngineName] = &MySQLShellBackupEngine{ + binaryName: "mysqlsh", + } for _, cmd := range []string{"vtcombo", "vttablet", "vtbackup", "vttestserver", "vtctldclient"} { servenv.OnParseFor(cmd, registerMysqlShellBackupEngineFlags) @@ -106,6 +106,7 @@ func registerMysqlShellBackupEngineFlags(fs *pflag.FlagSet) { // MySQLShellBackupEngine encapsulates the logic to implement the restoration // of a mysql-shell based backup. type MySQLShellBackupEngine struct { + binaryName string } const ( @@ -164,41 +165,37 @@ func (be *MySQLShellBackupEngine) ExecuteBackup(ctx context.Context, params Back return BackupUnusable, vterrors.Wrap(err, "failed to fetch position") } - cmd := exec.CommandContext(ctx, mysqlShellBackupBinaryName, args...) + cmd := exec.CommandContext(ctx, be.binaryName, args...) params.Logger.Infof("running %s", cmd.String()) - cmdOut, err := cmd.StdoutPipe() - if err != nil { - return BackupUnusable, vterrors.Wrap(err, "cannot create stdout pipe") - } - cmdOriginalErr, err := cmd.StderrPipe() - if err != nil { - return BackupUnusable, vterrors.Wrap(err, "cannot create stderr pipe") - } - if err := cmd.Start(); err != nil { - return BackupUnusable, vterrors.Wrap(err, "can't start mysqlshell") - } + stdoutReader, stdoutWriter := io.Pipe() + stderrReader, stderrWriter := io.Pipe() + lockWaiterReader, lockWaiterWriter := io.Pipe() - pipeReader, pipeWriter := io.Pipe() - cmdErr := io.TeeReader(cmdOriginalErr, pipeWriter) + cmd.Stdout = stdoutWriter + cmd.Stderr = stderrWriter + combinedErr := io.TeeReader(stderrReader, lockWaiterWriter) cmdWg := &sync.WaitGroup{} cmdWg.Add(3) - go releaseReadLock(ctx, pipeReader, params, cmdWg, lockAcquired) - go scanLinesToLogger(mysqlShellBackupEngineName+" stdout", cmdOut, params.Logger, cmdWg.Done) - go scanLinesToLogger(mysqlShellBackupEngineName+" stderr", cmdErr, params.Logger, cmdWg.Done) + go releaseReadLock(ctx, lockWaiterReader, params, cmdWg, lockAcquired) + go scanLinesToLogger(mysqlShellBackupEngineName+" stdout", stdoutReader, params.Logger, cmdWg.Done) + go scanLinesToLogger(mysqlShellBackupEngineName+" stderr", combinedErr, params.Logger, cmdWg.Done) - // Get exit status. - if err := cmd.Wait(); err != nil { - pipeWriter.Close() // make sure we close the writer so the goroutines above will complete. - return BackupUnusable, vterrors.Wrap(err, mysqlShellBackupEngineName+" failed") - } + // we run the command, wait for it to complete and close all pipes so the goroutines can complete on their own. + // after that we can process if an error has happened or not. + err = cmd.Run() - // close the pipeWriter and wait for the goroutines to have read all the logs - pipeWriter.Close() + stdoutWriter.Close() + stderrWriter.Close() + lockWaiterWriter.Close() cmdWg.Wait() + if err != nil { + return BackupUnusable, vterrors.Wrap(err, mysqlShellBackupEngineName+" failed") + } + // open the MANIFEST params.Logger.Infof("Writing backup MANIFEST") mwc, err := bh.AddFile(ctx, backupManifestFileName, backupstorage.FileSizeUnknown) @@ -371,7 +368,7 @@ func (be *MySQLShellBackupEngine) ExecuteRestore(ctx context.Context, params Res if err := cmd.Wait(); err != nil { return nil, vterrors.Wrap(err, mysqlShellBackupEngineName+" failed") } - params.Logger.Infof("%s completed successfully", mysqlShellBackupBinaryName) + params.Logger.Infof("%s completed successfully", be.binaryName) // disable local_infile now that the restore is done. err = params.Mysqld.ExecuteSuperQuery(ctx, "SET GLOBAL LOCAL_INFILE=0") diff --git a/go/vt/mysqlctl/mysqlshellbackupengine_test.go b/go/vt/mysqlctl/mysqlshellbackupengine_test.go index 67f27b5382e..80491ec2afc 100644 --- a/go/vt/mysqlctl/mysqlshellbackupengine_test.go +++ b/go/vt/mysqlctl/mysqlshellbackupengine_test.go @@ -325,13 +325,10 @@ func generateTestFile(t *testing.T, name, contents string) { // during ExecuteBackup(), even if the backup didn't succeed. func TestMySQLShellBackupEngine_ExecuteBackup_ReleaseLock(t *testing.T) { originalLocation := mysqlShellBackupLocation - originalBinary := mysqlShellBackupBinaryName mysqlShellBackupLocation = "logical" - mysqlShellBackupBinaryName = path.Join(t.TempDir(), "test.sh") - defer func() { // restore the original values. + defer func() { // restore the original value. mysqlShellBackupLocation = originalLocation - mysqlShellBackupBinaryName = originalBinary }() logger := logutil.NewMemoryLogger() @@ -340,7 +337,6 @@ func TestMySQLShellBackupEngine_ExecuteBackup_ReleaseLock(t *testing.T) { mysql := NewFakeMysqlDaemon(fakedb) defer mysql.Close() - be := &MySQLShellBackupEngine{} params := BackupParams{ TabletAlias: "test", Logger: logger, @@ -351,6 +347,7 @@ func TestMySQLShellBackupEngine_ExecuteBackup_ReleaseLock(t *testing.T) { } t.Run("lock released if we see the mysqlsh lock being acquired", func(t *testing.T) { + be := &MySQLShellBackupEngine{binaryName: path.Join(t.TempDir(), "mysqlsh.sh")} logger.Clear() manifestBuffer := ioutil.NewBytesBufferWriter() bs.StartBackupReturn.BackupHandle = &FakeBackupHandle{ @@ -359,7 +356,8 @@ func TestMySQLShellBackupEngine_ExecuteBackup_ReleaseLock(t *testing.T) { } // this simulates mysql shell completing without any issues. - generateTestFile(t, mysqlShellBackupBinaryName, fmt.Sprintf("#!/bin/bash\n>&2 echo %s", mysqlShellLockMessage)) + generateTestFile(t, be.binaryName, fmt.Sprintf( + "#!/bin/bash\n>&2 echo %s; echo \"backup completed\"; sleep 0.01", mysqlShellLockMessage)) bh, err := bs.StartBackup(context.Background(), t.TempDir(), t.Name()) require.NoError(t, err) @@ -380,7 +378,8 @@ func TestMySQLShellBackupEngine_ExecuteBackup_ReleaseLock(t *testing.T) { "failed to release the global lock after mysqlsh") }) - t.Run("lock released if when we don't see mysqlsh released it", func(t *testing.T) { + t.Run("lock released if we don't see mysqlsh release it", func(t *testing.T) { + be := &MySQLShellBackupEngine{binaryName: path.Join(t.TempDir(), "mysqlsh.sh")} mysql.GlobalReadLock = false // clear lock status. logger.Clear() manifestBuffer := ioutil.NewBytesBufferWriter() @@ -390,7 +389,7 @@ func TestMySQLShellBackupEngine_ExecuteBackup_ReleaseLock(t *testing.T) { } // this simulates mysqlshell completing, but we don't see the message that is released its lock. - generateTestFile(t, mysqlShellBackupBinaryName, "#!/bin/bash\nexit 0") + generateTestFile(t, be.binaryName, "#!/bin/bash\nexit 0") bh, err := bs.StartBackup(context.Background(), t.TempDir(), t.Name()) require.NoError(t, err) @@ -407,6 +406,7 @@ func TestMySQLShellBackupEngine_ExecuteBackup_ReleaseLock(t *testing.T) { }) t.Run("lock released when backup fails", func(t *testing.T) { + be := &MySQLShellBackupEngine{binaryName: path.Join(t.TempDir(), "mysqlsh.sh")} mysql.GlobalReadLock = false // clear lock status. logger.Clear() manifestBuffer := ioutil.NewBytesBufferWriter() @@ -416,7 +416,7 @@ func TestMySQLShellBackupEngine_ExecuteBackup_ReleaseLock(t *testing.T) { } // this simulates the backup process failing. - generateTestFile(t, mysqlShellBackupBinaryName, "#!/bin/bash\nexit 1") + generateTestFile(t, be.binaryName, "#!/bin/bash\nexit 1") bh, err := bs.StartBackup(context.Background(), t.TempDir(), t.Name()) require.NoError(t, err) diff --git a/go/vt/mysqlctl/s3backupstorage/s3.go b/go/vt/mysqlctl/s3backupstorage/s3.go index b3a8117aafa..97861e83729 100644 --- a/go/vt/mysqlctl/s3backupstorage/s3.go +++ b/go/vt/mysqlctl/s3backupstorage/s3.go @@ -40,6 +40,7 @@ import ( "time" "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/config" "github.com/aws/aws-sdk-go-v2/feature/s3/manager" "github.com/aws/aws-sdk-go-v2/service/s3" @@ -48,7 +49,8 @@ import ( "github.com/aws/smithy-go/middleware" "github.com/spf13/pflag" - "vitess.io/vitess/go/vt/concurrency" + errorsbackup "vitess.io/vitess/go/vt/mysqlctl/errors" + "vitess.io/vitess/go/vt/log" stats "vitess.io/vitess/go/vt/mysqlctl/backupstats" "vitess.io/vitess/go/vt/mysqlctl/backupstorage" @@ -144,8 +146,8 @@ type S3BackupHandle struct { dir string name string readOnly bool - errors concurrency.AllErrorRecorder waitGroup sync.WaitGroup + errorsbackup.PerFileErrorRecorder } // Directory is part of the backupstorage.BackupHandle interface. @@ -158,39 +160,23 @@ func (bh *S3BackupHandle) Name() string { return bh.name } -// RecordError is part of the concurrency.ErrorRecorder interface. -func (bh *S3BackupHandle) RecordError(err error) { - bh.errors.RecordError(err) -} - -// HasErrors is part of the concurrency.ErrorRecorder interface. -func (bh *S3BackupHandle) HasErrors() bool { - return bh.errors.HasErrors() -} - -// Error is part of the concurrency.ErrorRecorder interface. -func (bh *S3BackupHandle) Error() error { - return bh.errors.Error() -} - // AddFile is part of the backupstorage.BackupHandle interface. func (bh *S3BackupHandle) AddFile(ctx context.Context, filename string, filesize int64) (io.WriteCloser, error) { if bh.readOnly { return nil, fmt.Errorf("AddFile cannot be called on read-only backup") } - // Calculate s3 upload part size using the source filesize - partSizeBytes := manager.DefaultUploadPartSize - if filesize > 0 { - minimumPartSize := float64(filesize) / float64(manager.MaxUploadParts) - // Round up to ensure large enough partsize - calculatedPartSizeBytes := int64(math.Ceil(minimumPartSize)) - if calculatedPartSizeBytes > partSizeBytes { - partSizeBytes = calculatedPartSizeBytes - } - } + partSizeBytes := calculateUploadPartSize(filesize) reader, writer := io.Pipe() + bh.handleAddFile(ctx, filename, partSizeBytes, reader, func(err error) { + reader.CloseWithError(err) + }) + + return writer, nil +} + +func (bh *S3BackupHandle) handleAddFile(ctx context.Context, filename string, partSizeBytes int64, reader io.Reader, closer func(error)) { bh.waitGroup.Add(1) go func() { @@ -221,12 +207,24 @@ func (bh *S3BackupHandle) AddFile(ctx context.Context, filename string, filesize }) }) if err != nil { - reader.CloseWithError(err) - bh.RecordError(err) + closer(err) + bh.RecordError(filename, err) } }() +} - return writer, nil +func calculateUploadPartSize(filesize int64) int64 { + // Calculate s3 upload part size using the source filesize + partSizeBytes := manager.DefaultUploadPartSize + if filesize > 0 { + minimumPartSize := float64(filesize) / float64(manager.MaxUploadParts) + // Round up to ensure large enough partsize + calculatedPartSizeBytes := int64(math.Ceil(minimumPartSize)) + if calculatedPartSizeBytes > partSizeBytes { + partSizeBytes = calculatedPartSizeBytes + } + } + return partSizeBytes } // EndBackup is part of the backupstorage.BackupHandle interface. @@ -505,13 +503,24 @@ func (bs *S3BackupStorage) client() (*s3.Client, error) { return nil, err } - bs._client = s3.NewFromConfig(cfg, func(o *s3.Options) { - o.UsePathStyle = forcePath - if retryCount >= 0 { - o.RetryMaxAttempts = retryCount - o.Retryer = &ClosedConnectionRetryer{} - } - }, s3.WithEndpointResolverV2(newEndpointResolver())) + options := []func(options *s3.Options){ + func(o *s3.Options) { + o.UsePathStyle = forcePath + if retryCount >= 0 { + o.RetryMaxAttempts = retryCount + o.Retryer = &ClosedConnectionRetryer{ + awsRetryer: retry.NewStandard(func(options *retry.StandardOptions) { + options.MaxAttempts = retryCount + }), + } + } + }, + } + if endpoint != "" { + options = append(options, s3.WithEndpointResolverV2(newEndpointResolver())) + } + + bs._client = s3.NewFromConfig(cfg, options...) if len(bucket) == 0 { return nil, fmt.Errorf("--s3_backup_storage_bucket required") diff --git a/go/vt/mysqlctl/s3backupstorage/s3_mock.go b/go/vt/mysqlctl/s3backupstorage/s3_mock.go new file mode 100644 index 00000000000..f244c4d63b1 --- /dev/null +++ b/go/vt/mysqlctl/s3backupstorage/s3_mock.go @@ -0,0 +1,223 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package s3backupstorage + +import ( + "context" + "errors" + "fmt" + "io" + "sync" + + "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/mysqlctl/backupstats" + "vitess.io/vitess/go/vt/mysqlctl/backupstorage" +) + +type FakeS3BackupHandle struct { + *S3BackupHandle + + AddFileReturnF func(s3 *S3BackupHandle, ctx context.Context, filename string, filesize int64, firstAdd bool) (io.WriteCloser, error) + ReadFileReturnF func(s3 *S3BackupHandle, ctx context.Context, filename string, firstRead bool) (io.ReadCloser, error) + + mu sync.Mutex + addPerFile map[string]int + readPerFile map[string]int +} + +type FakeConfig struct { + Region string + Endpoint string + Bucket string + ForcePath bool +} + +func InitFlag(cfg FakeConfig) { + region = cfg.Region + endpoint = cfg.Endpoint + bucket = cfg.Bucket + forcePath = cfg.ForcePath +} + +func NewFakeS3BackupHandle(ctx context.Context, dir, name string, logger logutil.Logger, stats backupstats.Stats) (*FakeS3BackupHandle, error) { + s := newS3BackupStorage() + bs := s.WithParams(backupstorage.Params{ + Logger: logger, + Stats: stats, + }) + bh, err := bs.StartBackup(ctx, dir, name) + if err != nil { + return nil, err + } + return &FakeS3BackupHandle{ + S3BackupHandle: bh.(*S3BackupHandle), + addPerFile: make(map[string]int), + readPerFile: make(map[string]int), + }, nil +} + +func NewFakeS3RestoreHandle(ctx context.Context, dir string, logger logutil.Logger, stats backupstats.Stats) (*FakeS3BackupHandle, error) { + s := newS3BackupStorage() + bs := s.WithParams(backupstorage.Params{ + Logger: logger, + Stats: stats, + }) + bhs, err := bs.ListBackups(ctx, dir) + if err != nil { + return nil, err + } + return &FakeS3BackupHandle{ + S3BackupHandle: bhs[0].(*S3BackupHandle), + addPerFile: make(map[string]int), + readPerFile: make(map[string]int), + }, nil +} + +func (fbh *FakeS3BackupHandle) Directory() string { + return fbh.S3BackupHandle.Directory() +} + +func (fbh *FakeS3BackupHandle) Name() string { + return fbh.S3BackupHandle.Name() +} + +func (fbh *FakeS3BackupHandle) AddFile(ctx context.Context, filename string, filesize int64) (io.WriteCloser, error) { + fbh.mu.Lock() + defer func() { + fbh.addPerFile[filename] += 1 + fbh.mu.Unlock() + }() + + if fbh.AddFileReturnF != nil { + return fbh.AddFileReturnF(fbh.S3BackupHandle, ctx, filename, filesize, fbh.addPerFile[filename] == 0) + } + return fbh.S3BackupHandle.AddFile(ctx, filename, filesize) +} + +func (fbh *FakeS3BackupHandle) EndBackup(ctx context.Context) error { + return fbh.S3BackupHandle.EndBackup(ctx) +} + +func (fbh *FakeS3BackupHandle) AbortBackup(ctx context.Context) error { + return fbh.S3BackupHandle.AbortBackup(ctx) +} + +func (fbh *FakeS3BackupHandle) ReadFile(ctx context.Context, filename string) (io.ReadCloser, error) { + fbh.mu.Lock() + defer func() { + fbh.readPerFile[filename] += 1 + fbh.mu.Unlock() + }() + + if fbh.ReadFileReturnF != nil { + return fbh.ReadFileReturnF(fbh.S3BackupHandle, ctx, filename, fbh.readPerFile[filename] == 0) + } + return fbh.S3BackupHandle.ReadFile(ctx, filename) +} + +func (fbh *FakeS3BackupHandle) RecordError(s string, err error) { + fbh.S3BackupHandle.RecordError(s, err) +} + +func (fbh *FakeS3BackupHandle) HasErrors() bool { + return fbh.S3BackupHandle.HasErrors() +} + +func (fbh *FakeS3BackupHandle) Error() error { + return fbh.S3BackupHandle.Error() +} + +func (fbh *FakeS3BackupHandle) GetFailedFiles() []string { + return fbh.S3BackupHandle.GetFailedFiles() +} + +func (fbh *FakeS3BackupHandle) ResetErrorForFile(s string) { + fbh.S3BackupHandle.ResetErrorForFile(s) +} + +type failReadPipeReader struct { + *io.PipeReader +} + +func (fwr *failReadPipeReader) Read(p []byte) (n int, err error) { + return 0, errors.New("failing read") +} + +func FailFirstWrite(s3bh *S3BackupHandle, ctx context.Context, filename string, filesize int64, firstAdd bool) (io.WriteCloser, error) { + if s3bh.readOnly { + return nil, fmt.Errorf("AddFile cannot be called on read-only backup") + } + + partSizeBytes := calculateUploadPartSize(filesize) + reader, writer := io.Pipe() + r := io.Reader(reader) + + if firstAdd { + r = &failReadPipeReader{PipeReader: reader} + } + + s3bh.handleAddFile(ctx, filename, partSizeBytes, r, func(err error) { + reader.CloseWithError(err) + }) + return writer, nil +} + +func FailAllWrites(s3bh *S3BackupHandle, ctx context.Context, filename string, filesize int64, _ bool) (io.WriteCloser, error) { + if s3bh.readOnly { + return nil, fmt.Errorf("AddFile cannot be called on read-only backup") + } + + partSizeBytes := calculateUploadPartSize(filesize) + reader, writer := io.Pipe() + r := &failReadPipeReader{PipeReader: reader} + + s3bh.handleAddFile(ctx, filename, partSizeBytes, r, func(err error) { + r.PipeReader.CloseWithError(err) + }) + return writer, nil +} + +type failRead struct{} + +func (fr *failRead) Read(p []byte) (n int, err error) { + return 0, errors.New("failing read") +} + +func (fr *failRead) Close() error { + return nil +} + +func FailFirstRead(s3bh *S3BackupHandle, ctx context.Context, filename string, firstRead bool) (io.ReadCloser, error) { + rc, err := s3bh.ReadFile(ctx, filename) + if err != nil { + return nil, err + } + if firstRead { + return &failRead{}, nil + } + return rc, nil +} + +// FailAllReadExpectManifest is used to fail every attempt at reading a file from S3. +// Only the MANIFEST file is allowed to be read, because otherwise we wouldn't even try to read the normal files. +func FailAllReadExpectManifest(s3bh *S3BackupHandle, ctx context.Context, filename string, _ bool) (io.ReadCloser, error) { + const manifestFileName = "MANIFEST" + if filename == manifestFileName { + return s3bh.ReadFile(ctx, filename) + } + return &failRead{}, nil +} diff --git a/go/vt/proto/replicationdata/replicationdata.pb.go b/go/vt/proto/replicationdata/replicationdata.pb.go index ab307dba9fa..d5462e1ea2b 100644 --- a/go/vt/proto/replicationdata/replicationdata.pb.go +++ b/go/vt/proto/replicationdata/replicationdata.pb.go @@ -371,9 +371,8 @@ type StopReplicationStatus struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Before *Status `protobuf:"bytes,1,opt,name=before,proto3" json:"before,omitempty"` - After *Status `protobuf:"bytes,2,opt,name=after,proto3" json:"after,omitempty"` - BackupRunning bool `protobuf:"varint,3,opt,name=backup_running,json=backupRunning,proto3" json:"backup_running,omitempty"` + Before *Status `protobuf:"bytes,1,opt,name=before,proto3" json:"before,omitempty"` + After *Status `protobuf:"bytes,2,opt,name=after,proto3" json:"after,omitempty"` } func (x *StopReplicationStatus) Reset() { @@ -420,13 +419,6 @@ func (x *StopReplicationStatus) GetAfter() *Status { return nil } -func (x *StopReplicationStatus) GetBackupRunning() bool { - if x != nil { - return x.BackupRunning - } - return false -} - // PrimaryStatus is the replication status for a MySQL primary (returned by 'show binary log status'). type PrimaryStatus struct { state protoimpl.MessageState @@ -775,100 +767,98 @@ var file_replicationdata_proto_rawDesc = []byte{ 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x2e, 0x0a, 0x13, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x5f, 0x6e, 0x65, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x11, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x4e, 0x65, - 0x74, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, 0x9e, 0x01, 0x0a, 0x15, 0x53, 0x74, 0x6f, - 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x12, 0x2f, 0x0a, 0x06, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x62, 0x65, 0x66, - 0x6f, 0x72, 0x65, 0x12, 0x2d, 0x0a, 0x05, 0x61, 0x66, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x05, 0x61, 0x66, 0x74, - 0x65, 0x72, 0x12, 0x25, 0x0a, 0x0e, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x72, 0x75, 0x6e, - 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x62, 0x61, 0x63, 0x6b, - 0x75, 0x70, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x22, 0x71, 0x0a, 0x0d, 0x50, 0x72, 0x69, - 0x6d, 0x61, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, - 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, - 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x70, - 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x66, - 0x69, 0x6c, 0x65, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x75, 0x75, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x55, 0x75, 0x69, 0x64, 0x22, 0xc8, 0x08, 0x0a, - 0x0a, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x5f, 0x75, 0x75, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x55, 0x75, 0x69, 0x64, 0x12, 0x46, 0x0a, 0x12, 0x72, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x11, - 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x12, 0x45, 0x0a, 0x0e, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x73, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x72, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x72, 0x69, 0x6d, - 0x61, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0d, 0x70, 0x72, 0x69, 0x6d, 0x61, - 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x67, 0x74, 0x69, 0x64, - 0x5f, 0x70, 0x75, 0x72, 0x67, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x67, - 0x74, 0x69, 0x64, 0x50, 0x75, 0x72, 0x67, 0x65, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x63, - 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x76, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, - 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x67, 0x74, 0x69, - 0x64, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x67, 0x74, - 0x69, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, - 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x62, - 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x62, - 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x5f, 0x72, 0x6f, 0x77, 0x5f, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x18, - 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x52, 0x6f, 0x77, - 0x49, 0x6d, 0x61, 0x67, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x6c, 0x6f, 0x67, 0x5f, 0x62, 0x69, 0x6e, - 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, - 0x6c, 0x6f, 0x67, 0x42, 0x69, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x2e, 0x0a, - 0x13, 0x6c, 0x6f, 0x67, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x5f, 0x75, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x6c, 0x6f, 0x67, 0x52, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x73, 0x12, 0x39, 0x0a, + 0x74, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, 0x77, 0x0a, 0x15, 0x53, 0x74, 0x6f, 0x70, + 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x12, 0x2f, 0x0a, 0x06, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x17, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x62, 0x65, 0x66, 0x6f, + 0x72, 0x65, 0x12, 0x2d, 0x0a, 0x05, 0x61, 0x66, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x17, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x05, 0x61, 0x66, 0x74, 0x65, + 0x72, 0x22, 0x71, 0x0a, 0x0d, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, + 0x0a, 0x0d, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x66, 0x69, 0x6c, 0x65, 0x50, 0x6f, 0x73, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x75, 0x75, + 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x55, 0x75, 0x69, 0x64, 0x22, 0xc8, 0x08, 0x0a, 0x0a, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x49, 0x64, + 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x75, 0x75, 0x69, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x55, 0x75, 0x69, + 0x64, 0x12, 0x46, 0x0a, 0x12, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, + 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x11, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x45, 0x0a, 0x0e, 0x70, 0x72, 0x69, + 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1e, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x52, 0x0d, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x12, 0x1f, 0x0a, 0x0b, 0x67, 0x74, 0x69, 0x64, 0x5f, 0x70, 0x75, 0x72, 0x67, 0x65, 0x64, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x67, 0x74, 0x69, 0x64, 0x50, 0x75, 0x72, 0x67, 0x65, + 0x64, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6d, + 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, + 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, + 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x67, 0x74, 0x69, 0x64, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x09, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x67, 0x74, 0x69, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x23, + 0x0a, 0x0d, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, + 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x5f, 0x72, 0x6f, + 0x77, 0x5f, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x62, + 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x52, 0x6f, 0x77, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x12, 0x26, 0x0a, + 0x0f, 0x6c, 0x6f, 0x67, 0x5f, 0x62, 0x69, 0x6e, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, + 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x6c, 0x6f, 0x67, 0x42, 0x69, 0x6e, 0x45, 0x6e, + 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x13, 0x6c, 0x6f, 0x67, 0x5f, 0x72, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x73, 0x18, 0x0d, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x11, 0x6c, 0x6f, 0x67, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x19, 0x73, 0x65, 0x6d, 0x69, 0x5f, 0x73, 0x79, + 0x6e, 0x63, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, + 0x65, 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x16, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, + 0x6e, 0x63, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, + 0x12, 0x39, 0x0a, 0x19, 0x73, 0x65, 0x6d, 0x69, 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x72, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x0f, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x16, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x18, 0x73, + 0x65, 0x6d, 0x69, 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, + 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x73, + 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x12, 0x37, 0x0a, 0x18, 0x73, 0x65, 0x6d, 0x69, 0x5f, 0x73, 0x79, 0x6e, + 0x63, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, + 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x39, 0x0a, 0x19, 0x73, 0x65, 0x6d, 0x69, 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, - 0x72, 0x79, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, + 0x72, 0x79, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, - 0x79, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x39, 0x0a, 0x19, 0x73, 0x65, 0x6d, 0x69, - 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x5f, 0x65, 0x6e, - 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x16, 0x73, 0x65, 0x6d, - 0x69, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x45, 0x6e, 0x61, 0x62, - 0x6c, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x18, 0x73, 0x65, 0x6d, 0x69, 0x5f, 0x73, 0x79, 0x6e, 0x63, - 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, - 0x10, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x50, - 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x37, 0x0a, 0x18, - 0x73, 0x65, 0x6d, 0x69, 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, - 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x39, 0x0a, 0x19, 0x73, 0x65, 0x6d, 0x69, 0x5f, 0x73, 0x79, - 0x6e, 0x63, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, - 0x74, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, - 0x6e, 0x63, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, - 0x12, 0x39, 0x0a, 0x19, 0x73, 0x65, 0x6d, 0x69, 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x70, 0x72, - 0x69, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x13, 0x20, - 0x01, 0x28, 0x04, 0x52, 0x16, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x50, 0x72, 0x69, - 0x6d, 0x61, 0x72, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x45, 0x0a, 0x20, 0x73, - 0x65, 0x6d, 0x69, 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x66, 0x6f, - 0x72, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, - 0x14, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x1b, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x57, - 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x43, 0x6f, 0x75, - 0x6e, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x75, 0x70, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x61, 0x64, - 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x15, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x75, 0x70, - 0x65, 0x72, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x5b, 0x0a, 0x19, 0x72, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, - 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x18, 0x72, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x3b, 0x0a, 0x13, 0x53, 0x74, 0x6f, 0x70, 0x52, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x12, - 0x0a, 0x0e, 0x49, 0x4f, 0x41, 0x4e, 0x44, 0x53, 0x51, 0x4c, 0x54, 0x48, 0x52, 0x45, 0x41, 0x44, - 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x49, 0x4f, 0x54, 0x48, 0x52, 0x45, 0x41, 0x44, 0x4f, 0x4e, - 0x4c, 0x59, 0x10, 0x01, 0x42, 0x2e, 0x5a, 0x2c, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, - 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x64, 0x61, 0x74, 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x79, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x39, 0x0a, 0x19, 0x73, 0x65, 0x6d, 0x69, + 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x13, 0x20, 0x01, 0x28, 0x04, 0x52, 0x16, 0x73, 0x65, 0x6d, + 0x69, 0x53, 0x79, 0x6e, 0x63, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x54, 0x69, 0x6d, 0x65, + 0x6f, 0x75, 0x74, 0x12, 0x45, 0x0a, 0x20, 0x73, 0x65, 0x6d, 0x69, 0x5f, 0x73, 0x79, 0x6e, 0x63, + 0x5f, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x1b, 0x73, + 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x57, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x52, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x75, + 0x70, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x15, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x75, 0x70, 0x65, 0x72, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x6e, + 0x6c, 0x79, 0x12, 0x5b, 0x0a, 0x19, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x18, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2a, + 0x3b, 0x0a, 0x13, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x12, 0x0a, 0x0e, 0x49, 0x4f, 0x41, 0x4e, 0x44, 0x53, + 0x51, 0x4c, 0x54, 0x48, 0x52, 0x45, 0x41, 0x44, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x49, 0x4f, + 0x54, 0x48, 0x52, 0x45, 0x41, 0x44, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, 0x42, 0x2e, 0x5a, 0x2c, + 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, + 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x72, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/go/vt/proto/replicationdata/replicationdata_vtproto.pb.go b/go/vt/proto/replicationdata/replicationdata_vtproto.pb.go index 2bf3cb7788c..b3d638a1327 100644 --- a/go/vt/proto/replicationdata/replicationdata_vtproto.pb.go +++ b/go/vt/proto/replicationdata/replicationdata_vtproto.pb.go @@ -85,7 +85,6 @@ func (m *StopReplicationStatus) CloneVT() *StopReplicationStatus { r := new(StopReplicationStatus) r.Before = m.Before.CloneVT() r.After = m.After.CloneVT() - r.BackupRunning = m.BackupRunning if len(m.unknownFields) > 0 { r.unknownFields = make([]byte, len(m.unknownFields)) copy(r.unknownFields, m.unknownFields) @@ -446,16 +445,6 @@ func (m *StopReplicationStatus) MarshalToSizedBufferVT(dAtA []byte) (int, error) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.BackupRunning { - i-- - if m.BackupRunning { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - } if m.After != nil { size, err := m.After.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { @@ -878,9 +867,6 @@ func (m *StopReplicationStatus) SizeVT() (n int) { l = m.After.SizeVT() n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) } - if m.BackupRunning { - n += 2 - } n += len(m.unknownFields) return n } @@ -1799,26 +1785,6 @@ func (m *StopReplicationStatus) UnmarshalVT(dAtA []byte) error { return err } iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field BackupRunning", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return protohelpers.ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.BackupRunning = bool(v != 0) default: iNdEx = preIndex skippy, err := protohelpers.Skip(dAtA[iNdEx:]) diff --git a/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go b/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go index 303a3a3c53b..67eae2395ad 100644 --- a/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go +++ b/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go @@ -3098,8 +3098,7 @@ type ReplicationStatusResponse struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Status *replicationdata.Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` - BackupRunning bool `protobuf:"varint,2,opt,name=backup_running,json=backupRunning,proto3" json:"backup_running,omitempty"` + Status *replicationdata.Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` } func (x *ReplicationStatusResponse) Reset() { @@ -3139,13 +3138,6 @@ func (x *ReplicationStatusResponse) GetStatus() *replicationdata.Status { return nil } -func (x *ReplicationStatusResponse) GetBackupRunning() bool { - if x != nil { - return x.BackupRunning - } - return false -} - type PrimaryStatusRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -5084,8 +5076,7 @@ type StopReplicationAndGetStatusResponse struct { unknownFields protoimpl.UnknownFields // Status represents the replication status call right before, and right after telling the replica to stop. - Status *replicationdata.StopReplicationStatus `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` - BackupRunning bool `protobuf:"varint,3,opt,name=backup_running,json=backupRunning,proto3" json:"backup_running,omitempty"` + Status *replicationdata.StopReplicationStatus `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` } func (x *StopReplicationAndGetStatusResponse) Reset() { @@ -5125,13 +5116,6 @@ func (x *StopReplicationAndGetStatusResponse) GetStatus() *replicationdata.StopR return nil } -func (x *StopReplicationAndGetStatusResponse) GetBackupRunning() bool { - if x != nil { - return x.BackupRunning - } - return false -} - type PromoteReplicaRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -8468,351 +8452,242 @@ var file_tabletmanagerdata_proto_rawDesc = []byte{ 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x0b, 0x48, 0x6f, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x22, 0x1a, 0x0a, 0x18, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x22, 0x73, 0x0a, 0x19, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x73, 0x74, 0x22, 0x4c, 0x0a, 0x19, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x12, 0x25, 0x0a, 0x0e, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, - 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, - 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x22, 0x16, 0x0a, 0x14, 0x50, 0x72, 0x69, 0x6d, 0x61, - 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, - 0x4f, 0x0a, 0x15, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x61, - 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x22, 0x18, 0x0a, 0x16, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x50, 0x6f, 0x73, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x35, 0x0a, 0x17, 0x50, 0x72, - 0x69, 0x6d, 0x61, 0x72, 0x79, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x22, 0x34, 0x0a, 0x16, 0x57, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x50, 0x6f, 0x73, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, - 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, - 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x19, 0x0a, 0x17, 0x57, 0x61, 0x69, 0x74, 0x46, - 0x6f, 0x72, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x18, 0x0a, 0x16, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x19, 0x0a, 0x17, - 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x5e, 0x0a, 0x1d, 0x53, 0x74, 0x6f, 0x70, 0x52, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, - 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x74, 0x69, 0x6d, - 0x65, 0x6f, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x77, 0x61, 0x69, 0x74, - 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, 0x3c, 0x0a, 0x1e, 0x53, 0x74, 0x6f, 0x70, 0x52, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, - 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x35, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x22, 0x1a, 0x0a, 0x18, - 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x62, 0x0a, 0x21, 0x53, 0x74, 0x61, 0x72, - 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x6e, 0x74, 0x69, - 0x6c, 0x41, 0x66, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, - 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x77, 0x61, 0x69, - 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x0b, 0x77, 0x61, 0x69, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, 0x24, 0x0a, 0x22, - 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x55, 0x6e, 0x74, 0x69, 0x6c, 0x41, 0x66, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x14, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x2b, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x52, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x14, 0x0a, 0x05, 0x61, 0x64, 0x64, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, - 0x61, 0x64, 0x64, 0x72, 0x73, 0x22, 0x19, 0x0a, 0x17, 0x52, 0x65, 0x73, 0x65, 0x74, 0x52, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x22, 0x1a, 0x0a, 0x18, 0x52, 0x65, 0x73, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2f, 0x0a, 0x17, - 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x65, 0x63, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x22, 0x46, 0x0a, - 0x18, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x65, - 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, - 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, - 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0x4b, 0x0a, 0x1d, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x50, 0x6f, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x22, 0x20, 0x0a, 0x1e, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x57, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x50, 0x6f, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x30, 0x0a, 0x12, 0x49, 0x6e, 0x69, 0x74, 0x50, 0x72, 0x69, 0x6d, - 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, - 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x65, - 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x22, 0x31, 0x0a, 0x13, 0x49, 0x6e, 0x69, 0x74, 0x50, 0x72, - 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, - 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xd8, 0x01, 0x0a, 0x1e, 0x50, 0x6f, - 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x4a, 0x6f, - 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0f, - 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6e, 0x73, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x64, 0x4e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x3a, 0x0a, 0x0d, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, - 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, - 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, - 0x69, 0x61, 0x73, 0x52, 0x0c, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x41, 0x6c, 0x69, 0x61, - 0x73, 0x12, 0x31, 0x0a, 0x14, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x13, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x21, 0x0a, 0x1f, 0x50, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, - 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x4a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x20, 0x0a, 0x1e, 0x52, 0x65, 0x61, 0x64, 0x52, - 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x4a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x6e, - 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x39, 0x0a, 0x1f, 0x52, 0x65, 0x61, - 0x64, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x4a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, - 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, - 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6c, 0x65, - 0x6e, 0x67, 0x74, 0x68, 0x22, 0xba, 0x01, 0x0a, 0x12, 0x49, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x06, 0x70, - 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, - 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, - 0x61, 0x73, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x31, 0x0a, 0x14, 0x72, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x26, 0x0a, - 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6e, 0x73, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x72, 0x65, 0x61, - 0x74, 0x65, 0x64, 0x4e, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, - 0x63, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, - 0x63, 0x22, 0x15, 0x0a, 0x13, 0x49, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x0a, 0x14, 0x44, 0x65, 0x6d, 0x6f, - 0x74, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x22, 0x64, 0x0a, 0x15, 0x44, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, - 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x45, 0x0a, 0x0e, 0x70, 0x72, 0x69, - 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x22, 0x16, 0x0a, 0x14, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x4f, 0x0a, 0x15, 0x50, 0x72, 0x69, 0x6d, + 0x61, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x36, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x52, 0x0d, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, 0x36, 0x0a, 0x18, 0x55, 0x6e, 0x64, 0x6f, 0x44, 0x65, - 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x22, 0x1b, - 0x0a, 0x19, 0x55, 0x6e, 0x64, 0x6f, 0x44, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x72, 0x69, 0x6d, - 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1b, 0x0a, 0x19, 0x52, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x57, 0x61, 0x73, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, - 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x1c, 0x0a, 0x1a, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x57, 0x61, 0x73, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x23, 0x0a, 0x21, 0x52, 0x65, 0x73, 0x65, 0x74, 0x52, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, - 0x74, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x24, 0x0a, 0x22, 0x52, - 0x65, 0x73, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, - 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x13, 0x0a, 0x11, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x49, 0x0a, 0x12, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x06, - 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x72, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, - 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x22, 0x9c, 0x02, 0x0a, 0x1b, 0x53, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x2d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, - 0x12, 0x26, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, - 0x5f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74, 0x69, 0x6d, 0x65, 0x43, - 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x4e, 0x73, 0x12, 0x36, 0x0a, 0x17, 0x66, 0x6f, 0x72, 0x63, - 0x65, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x66, 0x6f, 0x72, 0x63, 0x65, - 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x23, 0x0a, 0x0d, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x77, 0x61, 0x69, 0x74, 0x50, 0x6f, 0x73, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, - 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, - 0x63, 0x12, 0x2d, 0x0a, 0x12, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x5f, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x11, 0x68, - 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, - 0x22, 0x1e, 0x0a, 0x1c, 0x53, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x4b, 0x0a, 0x1a, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x57, 0x61, 0x73, 0x52, 0x65, - 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d, - 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, - 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x22, 0x1d, 0x0a, - 0x1b, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x57, 0x61, 0x73, 0x52, 0x65, 0x73, 0x74, 0x61, - 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x7e, 0x0a, 0x22, - 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, - 0x6e, 0x64, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x58, 0x0a, 0x15, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x24, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x13, 0x73, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x22, 0x92, 0x01, 0x0a, - 0x23, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x41, 0x6e, 0x64, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x72, - 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x62, 0x61, - 0x63, 0x6b, 0x75, 0x70, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x4a, 0x04, 0x08, 0x01, 0x10, - 0x02, 0x22, 0x33, 0x0a, 0x15, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, - 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x65, - 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x22, 0x34, 0x0a, 0x16, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, - 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xe7, 0x01, 0x0a, - 0x0d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, - 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, - 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, - 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x72, - 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x30, 0x0a, 0x14, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x61, 0x6c, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x70, 0x6f, 0x73, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x12, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, - 0x46, 0x72, 0x6f, 0x6d, 0x50, 0x6f, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x70, 0x67, 0x72, 0x61, - 0x64, 0x65, 0x5f, 0x73, 0x61, 0x66, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x75, - 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x53, 0x61, 0x66, 0x65, 0x12, 0x28, 0x0a, 0x0d, 0x62, 0x61, - 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x09, 0x48, 0x00, 0x52, 0x0c, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x45, 0x6e, 0x67, 0x69, 0x6e, - 0x65, 0x88, 0x01, 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, - 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x22, 0x36, 0x0a, 0x0e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, - 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x22, 0xfe, - 0x01, 0x0a, 0x18, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, - 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x0b, 0x62, - 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0a, - 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x72, 0x65, - 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x70, 0x6f, 0x73, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x54, 0x6f, 0x50, 0x6f, 0x73, - 0x12, 0x17, 0x0a, 0x07, 0x64, 0x72, 0x79, 0x5f, 0x72, 0x75, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x06, 0x64, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x12, 0x3e, 0x0a, 0x14, 0x72, 0x65, 0x73, - 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, - 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x12, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x54, 0x6f, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x34, 0x0a, 0x16, 0x61, 0x6c, 0x6c, - 0x6f, 0x77, 0x65, 0x64, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x65, 0x6e, 0x67, 0x69, - 0x6e, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x61, 0x6c, 0x6c, 0x6f, 0x77, - 0x65, 0x64, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x73, 0x22, - 0x41, 0x0a, 0x19, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, - 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x05, - 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, - 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x65, 0x76, 0x65, - 0x6e, 0x74, 0x22, 0xee, 0x04, 0x0a, 0x21, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x56, 0x52, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, - 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x3d, 0x0a, 0x0d, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x5f, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x62, 0x69, - 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x53, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x0c, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x53, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x37, 0x0a, 0x0c, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0e, 0x32, - 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, - 0x65, 0x73, 0x12, 0x6c, 0x0a, 0x1b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x73, 0x65, 0x6c, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, - 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, - 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x19, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x53, 0x65, 0x6c, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, - 0x12, 0x49, 0x0a, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0c, 0x77, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x12, 0x53, 0x0a, 0x11, 0x77, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x73, 0x75, 0x62, 0x5f, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x75, 0x62, 0x54, 0x79, 0x70, 0x65, 0x52, - 0x0f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x75, 0x62, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x30, 0x0a, 0x14, 0x64, 0x65, 0x66, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, - 0x61, 0x72, 0x79, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, - 0x64, 0x65, 0x66, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, 0x79, 0x4b, 0x65, - 0x79, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, - 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x53, 0x74, 0x61, 0x72, - 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f, - 0x63, 0x6f, 0x70, 0x79, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x74, 0x6f, 0x70, - 0x41, 0x66, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x70, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x22, 0x50, 0x0a, 0x22, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x56, 0x52, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, - 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, - 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, - 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0xda, 0x01, 0x0a, 0x16, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x60, 0x0a, 0x0d, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, - 0x72, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x73, 0x69, 0x7a, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x62, 0x61, 0x74, 0x63, 0x68, 0x53, 0x69, 0x7a, - 0x65, 0x1a, 0x3f, 0x0a, 0x11, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, - 0x38, 0x01, 0x22, 0x19, 0x0a, 0x17, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3f, 0x0a, - 0x21, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x22, 0x50, - 0x0a, 0x22, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, - 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, - 0x22, 0x21, 0x0a, 0x1f, 0x48, 0x61, 0x73, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x22, 0x34, 0x0a, 0x20, 0x48, 0x61, 0x73, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x68, 0x61, 0x73, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x68, 0x61, 0x73, 0x22, 0xe0, 0x02, 0x0a, 0x20, 0x52, 0x65, - 0x61, 0x64, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, - 0x0a, 0x0b, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x05, 0x52, 0x0a, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x49, 0x64, 0x73, 0x12, - 0x2b, 0x0a, 0x11, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x69, 0x6e, 0x63, 0x6c, - 0x75, 0x64, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x12, 0x4c, 0x0a, 0x0e, - 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x73, 0x18, 0x03, - 0x20, 0x03, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0d, 0x69, 0x6e, 0x63, - 0x6c, 0x75, 0x64, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, - 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x18, - 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x57, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x12, 0x4c, 0x0a, 0x0e, 0x65, 0x78, 0x63, 0x6c, 0x75, - 0x64, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0e, 0x32, - 0x25, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x52, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, - 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0d, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, - 0x5f, 0x66, 0x72, 0x6f, 0x7a, 0x65, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x65, - 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x46, 0x72, 0x6f, 0x7a, 0x65, 0x6e, 0x22, 0x76, 0x0a, 0x21, - 0x52, 0x65, 0x61, 0x64, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x51, 0x0a, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x56, 0x52, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, - 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x73, 0x22, 0x3d, 0x0a, 0x1f, 0x52, 0x65, 0x61, 0x64, 0x56, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x22, 0xe7, 0x0a, 0x0a, 0x20, 0x52, 0x65, 0x61, 0x64, 0x56, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x37, 0x0a, 0x0c, 0x74, 0x61, + 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x18, 0x0a, 0x16, 0x50, 0x72, 0x69, + 0x6d, 0x61, 0x72, 0x79, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x22, 0x35, 0x0a, 0x17, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x50, 0x6f, + 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, + 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x34, 0x0a, 0x16, 0x57, 0x61, + 0x69, 0x74, 0x46, 0x6f, 0x72, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x22, 0x19, 0x0a, 0x17, 0x57, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x50, 0x6f, 0x73, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x18, 0x0a, 0x16, 0x53, + 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x19, 0x0a, 0x17, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x5e, 0x0a, 0x1d, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, + 0x0c, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x0b, 0x77, 0x61, 0x69, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x22, 0x3c, 0x0a, 0x1e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x35, + 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6d, + 0x69, 0x53, 0x79, 0x6e, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x65, 0x6d, + 0x69, 0x53, 0x79, 0x6e, 0x63, 0x22, 0x1a, 0x0a, 0x18, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x62, 0x0a, 0x21, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x6e, 0x74, 0x69, 0x6c, 0x41, 0x66, 0x74, 0x65, 0x72, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x77, 0x61, 0x69, 0x74, 0x54, 0x69, + 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, 0x24, 0x0a, 0x22, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x6e, 0x74, 0x69, 0x6c, 0x41, 0x66, + 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x14, 0x0a, 0x12, 0x47, + 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x22, 0x2b, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x61, 0x64, 0x64, 0x72, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x61, 0x64, 0x64, 0x72, 0x73, 0x22, 0x19, + 0x0a, 0x17, 0x52, 0x65, 0x73, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x1a, 0x0a, 0x18, 0x52, 0x65, 0x73, + 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2f, 0x0a, 0x17, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x65, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x22, 0x46, 0x0a, 0x18, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x65, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, + 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0x4b, + 0x0a, 0x1d, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, + 0x69, 0x74, 0x46, 0x6f, 0x72, 0x50, 0x6f, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x02, 0x69, 0x64, 0x12, + 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x20, 0x0a, 0x1e, 0x56, + 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x69, 0x74, 0x46, + 0x6f, 0x72, 0x50, 0x6f, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x30, 0x0a, + 0x12, 0x49, 0x6e, 0x69, 0x74, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x22, + 0x31, 0x0a, 0x13, 0x49, 0x6e, 0x69, 0x74, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x22, 0xd8, 0x01, 0x0a, 0x1e, 0x50, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x52, + 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x4a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, + 0x74, 0x69, 0x6d, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x4e, 0x73, 0x12, 0x1f, 0x0a, + 0x0b, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0a, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x3a, + 0x0a, 0x0d, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0c, 0x70, 0x72, + 0x69, 0x6d, 0x61, 0x72, 0x79, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x31, 0x0a, 0x14, 0x72, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x21, 0x0a, + 0x1f, 0x50, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x4a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x20, 0x0a, 0x1e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x4a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x22, 0x39, 0x0a, 0x1f, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x4a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x22, 0xba, 0x01, + 0x0a, 0x12, 0x49, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x06, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x12, 0x31, 0x0a, 0x14, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x13, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, + 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x0d, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x4e, 0x73, 0x12, 0x1a, + 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x22, 0x15, 0x0a, 0x13, 0x49, 0x6e, + 0x69, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x16, 0x0a, 0x14, 0x44, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, + 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x64, 0x0a, 0x15, 0x44, 0x65, 0x6d, + 0x6f, 0x74, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x45, 0x0a, 0x0e, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x72, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x72, 0x69, + 0x6d, 0x61, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0d, 0x70, 0x72, 0x69, 0x6d, + 0x61, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, + 0x36, 0x0a, 0x18, 0x55, 0x6e, 0x64, 0x6f, 0x44, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x72, 0x69, + 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x73, + 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, + 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x22, 0x1b, 0x0a, 0x19, 0x55, 0x6e, 0x64, 0x6f, 0x44, + 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1b, 0x0a, 0x19, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x57, + 0x61, 0x73, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x22, 0x1c, 0x0a, 0x1a, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x57, 0x61, 0x73, 0x50, + 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x23, 0x0a, 0x21, 0x52, 0x65, 0x73, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x22, 0x24, 0x0a, 0x22, 0x52, 0x65, 0x73, 0x65, 0x74, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, + 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x13, 0x0a, 0x11, 0x46, 0x75, + 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, + 0x49, 0x0a, 0x12, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x9c, 0x02, 0x0a, 0x1b, 0x53, + 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x06, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, + 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, + 0x73, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x74, 0x69, 0x6d, + 0x65, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x0d, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x4e, + 0x73, 0x12, 0x36, 0x0a, 0x17, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, + 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x15, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x77, 0x61, 0x69, + 0x74, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x77, 0x61, 0x69, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, + 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x12, 0x2d, 0x0a, 0x12, 0x68, 0x65, + 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x11, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, + 0x74, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x22, 0x1e, 0x0a, 0x1c, 0x53, 0x65, 0x74, + 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4b, 0x0a, 0x1a, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x57, 0x61, 0x73, 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x06, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x22, 0x1d, 0x0a, 0x1b, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x57, 0x61, 0x73, 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x7e, 0x0a, 0x22, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6e, 0x64, 0x47, 0x65, 0x74, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x58, 0x0a, 0x15, 0x73, + 0x74, 0x6f, 0x70, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x72, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x6f, + 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, + 0x52, 0x13, 0x73, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x22, 0x6b, 0x0a, 0x23, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6e, 0x64, 0x47, 0x65, 0x74, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x06, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x72, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, + 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x4a, 0x04, 0x08, 0x01, + 0x10, 0x02, 0x22, 0x33, 0x0a, 0x15, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x73, + 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, + 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x22, 0x34, 0x0a, 0x16, 0x50, 0x72, 0x6f, 0x6d, 0x6f, + 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xe7, 0x01, + 0x0a, 0x0d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, + 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, + 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, + 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x30, 0x0a, 0x14, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x70, 0x6f, 0x73, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, + 0x6c, 0x46, 0x72, 0x6f, 0x6d, 0x50, 0x6f, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x70, 0x67, 0x72, + 0x61, 0x64, 0x65, 0x5f, 0x73, 0x61, 0x66, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, + 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x53, 0x61, 0x66, 0x65, 0x12, 0x28, 0x0a, 0x0d, 0x62, + 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x45, 0x6e, 0x67, 0x69, + 0x6e, 0x65, 0x88, 0x01, 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, + 0x5f, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x22, 0x36, 0x0a, 0x0e, 0x42, 0x61, 0x63, 0x6b, 0x75, + 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x05, 0x65, 0x76, 0x65, + 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, + 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x22, + 0xfe, 0x01, 0x0a, 0x18, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, + 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x0b, + 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, + 0x0a, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x72, + 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x70, 0x6f, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x54, 0x6f, 0x50, 0x6f, + 0x73, 0x12, 0x17, 0x0a, 0x07, 0x64, 0x72, 0x79, 0x5f, 0x72, 0x75, 0x6e, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x06, 0x64, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x12, 0x3e, 0x0a, 0x14, 0x72, 0x65, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, + 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x12, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x54, + 0x6f, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x34, 0x0a, 0x16, 0x61, 0x6c, + 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x65, 0x6e, 0x67, + 0x69, 0x6e, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x61, 0x6c, 0x6c, 0x6f, + 0x77, 0x65, 0x64, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x73, + 0x22, 0x41, 0x0a, 0x19, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, + 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, + 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, + 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x65, 0x76, + 0x65, 0x6e, 0x74, 0x22, 0xee, 0x04, 0x0a, 0x21, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x56, 0x52, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, + 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, + 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x3d, 0x0a, 0x0d, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x5f, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x62, + 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, + 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x0c, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x37, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, @@ -8823,475 +8698,580 @@ var file_tabletmanagerdata_proto_rawDesc = []byte{ 0x6c, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x19, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, - 0x65, 0x12, 0x17, 0x0a, 0x07, 0x64, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x06, 0x64, 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x61, - 0x67, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x49, - 0x0a, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, - 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0c, 0x77, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x12, 0x53, 0x0a, 0x11, 0x77, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x73, 0x75, 0x62, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x09, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x75, 0x62, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0f, 0x77, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x75, 0x62, 0x54, 0x79, 0x70, 0x65, 0x12, 0x30, - 0x0a, 0x14, 0x64, 0x65, 0x66, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, - 0x79, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x64, 0x65, - 0x66, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, 0x79, 0x4b, 0x65, 0x79, 0x73, - 0x12, 0x54, 0x0a, 0x07, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x3a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x07, 0x73, - 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x73, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x6f, 0x76, 0x65, 0x72, 0x72, - 0x69, 0x64, 0x65, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, - 0x65, 0x61, 0x64, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4f, 0x76, 0x65, 0x72, - 0x72, 0x69, 0x64, 0x65, 0x73, 0x1a, 0xc1, 0x04, 0x0a, 0x06, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, - 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x02, 0x69, 0x64, - 0x12, 0x2a, 0x0a, 0x03, 0x62, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, - 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x69, 0x6e, 0x6c, 0x6f, - 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x03, 0x62, 0x6c, 0x73, 0x12, 0x10, 0x0a, 0x03, - 0x70, 0x6f, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x70, 0x6f, 0x73, 0x12, 0x19, - 0x0a, 0x08, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x70, 0x6f, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x07, 0x73, 0x74, 0x6f, 0x70, 0x50, 0x6f, 0x73, 0x12, 0x17, 0x0a, 0x07, 0x6d, 0x61, 0x78, - 0x5f, 0x74, 0x70, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x6d, 0x61, 0x78, 0x54, - 0x70, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x61, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x11, 0x6d, 0x61, 0x78, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, - 0x61, 0x67, 0x12, 0x2f, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, - 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x64, 0x12, 0x41, 0x0a, 0x15, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, - 0x52, 0x14, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x3b, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, - 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, - 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x0a, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1f, 0x0a, - 0x0b, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x63, 0x6f, 0x70, 0x69, 0x65, 0x64, 0x18, 0x0b, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x0a, 0x72, 0x6f, 0x77, 0x73, 0x43, 0x6f, 0x70, 0x69, 0x65, 0x64, 0x12, 0x33, - 0x0a, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, - 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, - 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0d, 0x74, 0x69, 0x6d, 0x65, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, - 0x65, 0x61, 0x74, 0x12, 0x33, 0x0a, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x74, 0x68, 0x72, 0x6f, - 0x74, 0x74, 0x6c, 0x65, 0x64, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, - 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0d, 0x74, 0x69, 0x6d, 0x65, 0x54, - 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x12, 0x2f, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x70, - 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x18, - 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, - 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x1a, 0x42, 0x0a, 0x14, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x28, 0x0a, - 0x26, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x4d, 0x0a, 0x27, 0x56, 0x61, 0x6c, 0x69, 0x64, - 0x61, 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, - 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x75, 0x73, 0x65, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x6f, 0x6b, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x02, 0x6f, 0x6b, 0x22, 0xd7, 0x01, 0x0a, 0x0c, 0x56, 0x44, 0x69, 0x66, 0x66, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, - 0x16, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x61, 0x72, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x41, 0x72, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x64, 0x69, 0x66, 0x66, 0x5f, - 0x75, 0x75, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, 0x64, 0x69, 0x66, - 0x66, 0x55, 0x75, 0x69, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x22, 0x6a, 0x0a, 0x0d, 0x56, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, - 0x64, 0x12, 0x2a, 0x0a, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x1d, 0x0a, - 0x0a, 0x76, 0x64, 0x69, 0x66, 0x66, 0x5f, 0x75, 0x75, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x09, 0x76, 0x64, 0x69, 0x66, 0x66, 0x55, 0x75, 0x69, 0x64, 0x22, 0x79, 0x0a, 0x12, - 0x56, 0x44, 0x69, 0x66, 0x66, 0x50, 0x69, 0x63, 0x6b, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, - 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, - 0x5f, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x61, 0x72, - 0x67, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x22, 0xce, 0x01, 0x0a, 0x12, 0x56, 0x44, 0x69, 0x66, - 0x66, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x19, - 0x0a, 0x08, 0x6f, 0x6e, 0x6c, 0x79, 0x5f, 0x70, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x07, 0x6f, 0x6e, 0x6c, 0x79, 0x50, 0x6b, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x65, 0x62, - 0x75, 0x67, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, - 0x64, 0x65, 0x62, 0x75, 0x67, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, - 0x72, 0x6d, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, - 0x61, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, - 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x6d, 0x61, 0x78, - 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x6f, 0x77, 0x73, 0x12, 0x3c, 0x0a, 0x1b, 0x72, 0x6f, - 0x77, 0x5f, 0x64, 0x69, 0x66, 0x66, 0x5f, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x5f, 0x74, 0x72, - 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x61, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x17, 0x72, 0x6f, 0x77, 0x44, 0x69, 0x66, 0x66, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x54, 0x72, - 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x41, 0x74, 0x22, 0x8d, 0x03, 0x0a, 0x10, 0x56, 0x44, 0x69, - 0x66, 0x66, 0x43, 0x6f, 0x72, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x16, 0x0a, - 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x72, 0x65, - 0x74, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x52, - 0x65, 0x74, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x6f, 0x77, 0x73, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x52, 0x6f, 0x77, 0x73, 0x12, - 0x1a, 0x0a, 0x08, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x08, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x73, - 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x70, 0x63, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x09, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x50, 0x63, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x69, - 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x63, 0x6f, - 0x6e, 0x64, 0x73, 0x12, 0x38, 0x0a, 0x19, 0x6d, 0x61, 0x78, 0x5f, 0x65, 0x78, 0x74, 0x72, 0x61, - 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x15, 0x6d, 0x61, 0x78, 0x45, 0x78, 0x74, 0x72, 0x61, - 0x52, 0x6f, 0x77, 0x73, 0x54, 0x6f, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x12, 0x2c, 0x0a, - 0x12, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x74, - 0x61, 0x74, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x75, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x28, 0x0a, 0x10, 0x6d, - 0x61, 0x78, 0x5f, 0x64, 0x69, 0x66, 0x66, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, - 0x09, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x44, 0x69, 0x66, 0x66, 0x53, 0x65, - 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x22, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x73, 0x74, - 0x61, 0x72, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x61, 0x75, 0x74, - 0x6f, 0x53, 0x74, 0x61, 0x72, 0x74, 0x88, 0x01, 0x01, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x61, 0x75, - 0x74, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x22, 0xf2, 0x01, 0x0a, 0x0c, 0x56, 0x44, 0x69, - 0x66, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x4c, 0x0a, 0x0e, 0x70, 0x69, 0x63, - 0x6b, 0x65, 0x72, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x25, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, 0x50, 0x69, 0x63, 0x6b, 0x65, - 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0d, 0x70, 0x69, 0x63, 0x6b, 0x65, 0x72, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x46, 0x0a, 0x0c, 0x63, 0x6f, 0x72, 0x65, 0x5f, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, 0x43, 0x6f, 0x72, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x52, 0x0b, 0x63, 0x6f, 0x72, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, - 0x4c, 0x0a, 0x0e, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, - 0x66, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0d, - 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xed, 0x04, - 0x0a, 0x21, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x65, 0x12, 0x49, 0x0a, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, + 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0c, + 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x12, 0x53, 0x0a, 0x11, + 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x73, 0x75, 0x62, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x75, 0x62, 0x54, 0x79, 0x70, 0x65, + 0x52, 0x0f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x75, 0x62, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x30, 0x0a, 0x14, 0x64, 0x65, 0x66, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, + 0x64, 0x61, 0x72, 0x79, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x12, 0x64, 0x65, 0x66, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, 0x79, 0x4b, + 0x65, 0x79, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x72, + 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x53, 0x74, 0x61, + 0x72, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, + 0x5f, 0x63, 0x6f, 0x70, 0x79, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x74, 0x6f, + 0x70, 0x41, 0x66, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x70, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x50, 0x0a, 0x22, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x56, 0x52, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, + 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, + 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, + 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0xda, 0x01, 0x0a, 0x16, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x60, 0x0a, 0x0d, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x73, 0x69, 0x7a, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x62, 0x61, 0x74, 0x63, 0x68, 0x53, 0x69, + 0x7a, 0x65, 0x1a, 0x3f, 0x0a, 0x11, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x02, 0x38, 0x01, 0x22, 0x19, 0x0a, 0x17, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3f, + 0x0a, 0x21, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, - 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, - 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x37, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, - 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, - 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, - 0x65, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x71, - 0x0a, 0x1b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x53, 0x65, - 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, - 0x65, 0x48, 0x00, 0x52, 0x19, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x88, 0x01, - 0x01, 0x12, 0x33, 0x0a, 0x06, 0x6f, 0x6e, 0x5f, 0x64, 0x64, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x17, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4f, - 0x6e, 0x44, 0x44, 0x4c, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x01, 0x52, 0x05, 0x6f, 0x6e, - 0x44, 0x64, 0x6c, 0x88, 0x01, 0x01, 0x12, 0x40, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x22, + 0x50, 0x0a, 0x22, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, + 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x22, 0x21, 0x0a, 0x1f, 0x48, 0x61, 0x73, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x22, 0x34, 0x0a, 0x20, 0x48, 0x61, 0x73, 0x56, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x68, 0x61, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x68, 0x61, 0x73, 0x22, 0xe0, 0x02, 0x0a, 0x20, 0x52, + 0x65, 0x61, 0x64, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, + 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1f, 0x0a, 0x0b, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x05, 0x52, 0x0a, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x49, 0x64, 0x73, + 0x12, 0x2b, 0x0a, 0x11, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x77, 0x6f, 0x72, 0x6b, + 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x69, 0x6e, 0x63, + 0x6c, 0x75, 0x64, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x12, 0x4c, 0x0a, + 0x0e, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x73, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x48, 0x02, 0x52, 0x05, - 0x73, 0x74, 0x61, 0x74, 0x65, 0x88, 0x01, 0x01, 0x12, 0x74, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x5f, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x18, 0x08, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x49, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x56, 0x52, 0x65, + 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0d, 0x69, 0x6e, + 0x63, 0x6c, 0x75, 0x64, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x65, + 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, + 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x57, + 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x12, 0x4c, 0x0a, 0x0e, 0x65, 0x78, 0x63, 0x6c, + 0x75, 0x64, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0e, + 0x32, 0x25, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x52, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0d, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, + 0x65, 0x5f, 0x66, 0x72, 0x6f, 0x7a, 0x65, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, + 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x46, 0x72, 0x6f, 0x7a, 0x65, 0x6e, 0x22, 0x76, 0x0a, + 0x21, 0x52, 0x65, 0x61, 0x64, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x51, 0x0a, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x56, 0x52, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x09, 0x77, 0x6f, 0x72, 0x6b, + 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x22, 0x3d, 0x0a, 0x1f, 0x52, 0x65, 0x61, 0x64, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, - 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4f, - 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x1a, 0x42, - 0x0a, 0x14, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, - 0x38, 0x01, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x73, 0x65, - 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, - 0x63, 0x65, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x6f, 0x6e, 0x5f, 0x64, 0x64, 0x6c, 0x42, 0x08, 0x0a, - 0x06, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, 0x22, 0x50, 0x0a, - 0x22, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, - 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, - 0xd6, 0x02, 0x0a, 0x22, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x5f, 0x77, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, - 0x6c, 0x6c, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x69, - 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x57, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, - 0x75, 0x64, 0x65, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x18, 0x03, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x57, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x12, 0x40, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x48, 0x00, 0x52, 0x05, 0x73, - 0x74, 0x61, 0x74, 0x65, 0x88, 0x01, 0x01, 0x12, 0x1d, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x01, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x88, 0x01, 0x01, 0x12, 0x28, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x70, - 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x02, 0x52, - 0x0c, 0x73, 0x74, 0x6f, 0x70, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, - 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x6d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x73, 0x74, 0x6f, 0x70, 0x5f, - 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x51, 0x0a, 0x23, 0x55, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, - 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0x2f, 0x0a, 0x15, 0x52, - 0x65, 0x73, 0x65, 0x74, 0x53, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x22, 0x18, 0x0a, 0x16, - 0x52, 0x65, 0x73, 0x65, 0x74, 0x53, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xdd, 0x01, 0x0a, 0x15, 0x43, 0x68, 0x65, 0x63, 0x6b, - 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x19, 0x0a, 0x08, 0x61, 0x70, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x07, 0x61, 0x70, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, - 0x63, 0x6f, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, - 0x65, 0x12, 0x36, 0x0a, 0x17, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x5f, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x15, 0x73, 0x6b, 0x69, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, - 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x73, 0x12, 0x27, 0x0a, 0x10, 0x6f, 0x6b, 0x5f, - 0x69, 0x66, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x65, 0x78, 0x69, 0x73, 0x74, 0x73, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x0d, 0x6f, 0x6b, 0x49, 0x66, 0x4e, 0x6f, 0x74, 0x45, 0x78, 0x69, 0x73, - 0x74, 0x73, 0x12, 0x32, 0x0a, 0x15, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x5f, 0x6d, 0x65, 0x74, 0x72, - 0x69, 0x63, 0x73, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x13, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, - 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x9f, 0x06, 0x0a, 0x16, 0x43, 0x68, 0x65, 0x63, 0x6b, - 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, - 0x64, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x68, 0x72, 0x65, - 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x74, 0x68, 0x72, - 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x18, 0x0a, 0x07, - 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, - 0x6c, 0x79, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x0f, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x6c, 0x79, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, - 0x64, 0x12, 0x50, 0x0a, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x07, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x54, 0x68, 0x72, 0x6f, - 0x74, 0x74, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4d, 0x65, - 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x6d, 0x65, 0x74, 0x72, - 0x69, 0x63, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x61, 0x70, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x70, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x18, - 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x52, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x2d, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, - 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x0c, - 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x1a, 0x8b, 0x02, 0x0a, - 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x73, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x14, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, - 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x12, 0x14, 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x12, 0x52, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x0c, 0x72, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x1a, 0x6c, 0x0a, 0x0c, 0x4d, 0x65, - 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x46, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x74, 0x61, + 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, + 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, + 0x66, 0x6c, 0x6f, 0x77, 0x22, 0xe7, 0x0a, 0x0a, 0x20, 0x52, 0x65, 0x61, 0x64, 0x56, 0x52, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, + 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, + 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, + 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x37, 0x0a, 0x0c, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, + 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x73, 0x12, 0x6c, 0x0a, 0x1b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x73, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, + 0x6e, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, + 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x19, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x53, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, + 0x63, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x64, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, + 0x61, 0x67, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, + 0x49, 0x0a, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0c, 0x77, 0x6f, + 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x12, 0x53, 0x0a, 0x11, 0x77, 0x6f, + 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x73, 0x75, 0x62, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, + 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x75, 0x62, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0f, + 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x75, 0x62, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x30, 0x0a, 0x14, 0x64, 0x65, 0x66, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, + 0x72, 0x79, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x64, + 0x65, 0x66, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, 0x79, 0x4b, 0x65, 0x79, + 0x73, 0x12, 0x54, 0x0a, 0x07, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x18, 0x0b, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x56, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x07, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x73, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x6f, 0x76, 0x65, 0x72, + 0x72, 0x69, 0x64, 0x65, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x1b, 0x0a, 0x19, 0x47, 0x65, 0x74, 0x54, - 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xb6, 0x10, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x54, 0x68, 0x72, - 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, - 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x69, 0x73, 0x5f, - 0x6c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x69, 0x73, - 0x4c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x17, 0x0a, 0x07, 0x69, 0x73, 0x5f, 0x6f, 0x70, 0x65, - 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x69, 0x73, 0x4f, 0x70, 0x65, 0x6e, 0x12, - 0x1d, 0x0a, 0x0a, 0x69, 0x73, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x09, 0x69, 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x1d, - 0x0a, 0x0a, 0x69, 0x73, 0x5f, 0x64, 0x6f, 0x72, 0x6d, 0x61, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x09, 0x69, 0x73, 0x44, 0x6f, 0x72, 0x6d, 0x61, 0x6e, 0x74, 0x12, 0x28, 0x0a, - 0x10, 0x6c, 0x61, 0x67, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x71, 0x75, 0x65, 0x72, - 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6c, 0x61, 0x67, 0x4d, 0x65, 0x74, 0x72, - 0x69, 0x63, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x2e, 0x0a, 0x13, 0x63, 0x75, 0x73, 0x74, 0x6f, - 0x6d, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x09, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x4d, 0x65, 0x74, 0x72, - 0x69, 0x63, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x2b, 0x0a, 0x11, 0x64, 0x65, 0x66, 0x61, 0x75, - 0x6c, 0x74, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x0a, 0x20, 0x01, - 0x28, 0x01, 0x52, 0x10, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x54, 0x68, 0x72, 0x65, 0x73, - 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x3c, 0x0a, 0x1b, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x5f, 0x75, 0x73, 0x65, 0x64, 0x5f, 0x61, 0x73, 0x5f, 0x64, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x17, 0x6d, 0x65, 0x74, 0x72, 0x69, - 0x63, 0x4e, 0x61, 0x6d, 0x65, 0x55, 0x73, 0x65, 0x64, 0x41, 0x73, 0x44, 0x65, 0x66, 0x61, 0x75, - 0x6c, 0x74, 0x12, 0x73, 0x0a, 0x12, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x64, - 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x44, + 0x52, 0x65, 0x61, 0x64, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4f, 0x76, 0x65, + 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x1a, 0xc1, 0x04, 0x0a, 0x06, 0x53, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x02, 0x69, + 0x64, 0x12, 0x2a, 0x0a, 0x03, 0x62, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, + 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x69, 0x6e, 0x6c, + 0x6f, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x03, 0x62, 0x6c, 0x73, 0x12, 0x10, 0x0a, + 0x03, 0x70, 0x6f, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x70, 0x6f, 0x73, 0x12, + 0x19, 0x0a, 0x08, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x70, 0x6f, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x73, 0x74, 0x6f, 0x70, 0x50, 0x6f, 0x73, 0x12, 0x17, 0x0a, 0x07, 0x6d, 0x61, + 0x78, 0x5f, 0x74, 0x70, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x6d, 0x61, 0x78, + 0x54, 0x70, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x61, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x11, 0x6d, 0x61, 0x78, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x4c, 0x61, 0x67, 0x12, 0x2f, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, + 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x64, 0x12, 0x41, 0x0a, 0x15, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x52, 0x14, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x3b, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, + 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1f, + 0x0a, 0x0b, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x63, 0x6f, 0x70, 0x69, 0x65, 0x64, 0x18, 0x0b, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, 0x6f, 0x77, 0x73, 0x43, 0x6f, 0x70, 0x69, 0x65, 0x64, 0x12, + 0x33, 0x0a, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, + 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0d, 0x74, 0x69, 0x6d, 0x65, 0x48, 0x65, 0x61, 0x72, 0x74, + 0x62, 0x65, 0x61, 0x74, 0x12, 0x33, 0x0a, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x74, 0x68, 0x72, + 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, + 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0d, 0x74, 0x69, 0x6d, 0x65, + 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x12, 0x2f, 0x0a, 0x13, 0x63, 0x6f, 0x6d, + 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, + 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, + 0x74, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x1a, 0x42, 0x0a, 0x14, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x28, + 0x0a, 0x26, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x4d, 0x0a, 0x27, 0x56, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x75, 0x73, 0x65, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x6f, 0x6b, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x02, 0x6f, 0x6b, 0x22, 0xd7, 0x01, 0x0a, 0x0c, 0x56, 0x44, 0x69, 0x66, + 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, + 0x12, 0x16, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x61, 0x72, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x72, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x64, 0x69, 0x66, 0x66, + 0x5f, 0x75, 0x75, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, 0x64, 0x69, + 0x66, 0x66, 0x55, 0x75, 0x69, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, + 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x22, 0x6a, 0x0a, 0x0d, 0x56, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, + 0x69, 0x64, 0x12, 0x2a, 0x0a, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, + 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x1d, + 0x0a, 0x0a, 0x76, 0x64, 0x69, 0x66, 0x66, 0x5f, 0x75, 0x75, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x76, 0x64, 0x69, 0x66, 0x66, 0x55, 0x75, 0x69, 0x64, 0x22, 0x79, 0x0a, + 0x12, 0x56, 0x44, 0x69, 0x66, 0x66, 0x50, 0x69, 0x63, 0x6b, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x5f, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, + 0x74, 0x5f, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x61, + 0x72, 0x67, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x22, 0xce, 0x01, 0x0a, 0x12, 0x56, 0x44, 0x69, + 0x66, 0x66, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x19, 0x0a, 0x08, 0x6f, 0x6e, 0x6c, 0x79, 0x5f, 0x70, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x07, 0x6f, 0x6e, 0x6c, 0x79, 0x50, 0x6b, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x65, + 0x62, 0x75, 0x67, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0a, 0x64, 0x65, 0x62, 0x75, 0x67, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x66, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x61, 0x6d, 0x70, 0x6c, + 0x65, 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x6d, 0x61, + 0x78, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x6f, 0x77, 0x73, 0x12, 0x3c, 0x0a, 0x1b, 0x72, + 0x6f, 0x77, 0x5f, 0x64, 0x69, 0x66, 0x66, 0x5f, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x5f, 0x74, + 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x61, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x17, 0x72, 0x6f, 0x77, 0x44, 0x69, 0x66, 0x66, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x54, + 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x41, 0x74, 0x22, 0x8d, 0x03, 0x0a, 0x10, 0x56, 0x44, + 0x69, 0x66, 0x66, 0x43, 0x6f, 0x72, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x16, + 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x72, + 0x65, 0x74, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, + 0x52, 0x65, 0x74, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x6f, 0x77, + 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x52, 0x6f, 0x77, 0x73, + 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x08, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, + 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x70, 0x63, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x09, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x50, 0x63, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x74, + 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x63, + 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x38, 0x0a, 0x19, 0x6d, 0x61, 0x78, 0x5f, 0x65, 0x78, 0x74, 0x72, + 0x61, 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, + 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x15, 0x6d, 0x61, 0x78, 0x45, 0x78, 0x74, 0x72, + 0x61, 0x52, 0x6f, 0x77, 0x73, 0x54, 0x6f, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x12, 0x2c, + 0x0a, 0x12, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, + 0x74, 0x61, 0x74, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x75, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x28, 0x0a, 0x10, + 0x6d, 0x61, 0x78, 0x5f, 0x64, 0x69, 0x66, 0x66, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x44, 0x69, 0x66, 0x66, 0x53, + 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x22, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x73, + 0x74, 0x61, 0x72, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x61, 0x75, + 0x74, 0x6f, 0x53, 0x74, 0x61, 0x72, 0x74, 0x88, 0x01, 0x01, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x61, + 0x75, 0x74, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x22, 0xf2, 0x01, 0x0a, 0x0c, 0x56, 0x44, + 0x69, 0x66, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x4c, 0x0a, 0x0e, 0x70, 0x69, + 0x63, 0x6b, 0x65, 0x72, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, 0x50, 0x69, 0x63, 0x6b, + 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0d, 0x70, 0x69, 0x63, 0x6b, 0x65, + 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x46, 0x0a, 0x0c, 0x63, 0x6f, 0x72, 0x65, + 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x41, 0x67, - 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x64, - 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x70, 0x0a, 0x11, 0x6d, 0x65, 0x74, 0x72, 0x69, - 0x63, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x73, 0x18, 0x0d, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x74, 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, 0x43, 0x6f, 0x72, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x0b, 0x63, 0x6f, 0x72, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x4c, 0x0a, 0x0e, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44, 0x69, + 0x66, 0x66, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, + 0x0d, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xed, + 0x04, 0x0a, 0x21, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, + 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x37, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, + 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, + 0x70, 0x65, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, + 0x71, 0x0a, 0x1b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x53, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, + 0x63, 0x65, 0x48, 0x00, 0x52, 0x19, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x88, + 0x01, 0x01, 0x12, 0x33, 0x0a, 0x06, 0x6f, 0x6e, 0x5f, 0x64, 0x64, 0x6c, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x4f, 0x6e, 0x44, 0x44, 0x4c, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x01, 0x52, 0x05, 0x6f, + 0x6e, 0x44, 0x64, 0x6c, 0x88, 0x01, 0x01, 0x12, 0x40, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x48, 0x02, 0x52, + 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x88, 0x01, 0x01, 0x12, 0x74, 0x0a, 0x10, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x5f, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x18, 0x08, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x49, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x56, 0x52, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x1a, + 0x42, 0x0a, 0x14, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, + 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x02, 0x38, 0x01, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x73, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, + 0x6e, 0x63, 0x65, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x6f, 0x6e, 0x5f, 0x64, 0x64, 0x6c, 0x42, 0x08, + 0x0a, 0x06, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, 0x22, 0x50, + 0x0a, 0x22, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, + 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x22, 0xd6, 0x02, 0x0a, 0x22, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x5f, 0x77, + 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, + 0x61, 0x6c, 0x6c, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x12, 0x2b, 0x0a, 0x11, + 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, + 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, + 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x57, 0x6f, 0x72, + 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x12, 0x40, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, + 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x48, 0x00, 0x52, 0x05, + 0x73, 0x74, 0x61, 0x74, 0x65, 0x88, 0x01, 0x01, 0x12, 0x1d, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x01, 0x52, 0x07, 0x6d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x88, 0x01, 0x01, 0x12, 0x28, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x70, 0x5f, + 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x02, + 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x70, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x88, 0x01, + 0x01, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x42, 0x0a, 0x0a, 0x08, 0x5f, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x73, 0x74, 0x6f, 0x70, + 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x51, 0x0a, 0x23, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, + 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, + 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0x2f, 0x0a, 0x15, + 0x52, 0x65, 0x73, 0x65, 0x74, 0x53, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x22, 0x18, 0x0a, + 0x16, 0x52, 0x65, 0x73, 0x65, 0x74, 0x53, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xdd, 0x01, 0x0a, 0x15, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x19, 0x0a, 0x08, 0x61, 0x70, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x70, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x73, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x63, 0x6f, + 0x70, 0x65, 0x12, 0x36, 0x0a, 0x17, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x73, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x15, 0x73, 0x6b, 0x69, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x73, 0x12, 0x27, 0x0a, 0x10, 0x6f, 0x6b, + 0x5f, 0x69, 0x66, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x65, 0x78, 0x69, 0x73, 0x74, 0x73, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x6f, 0x6b, 0x49, 0x66, 0x4e, 0x6f, 0x74, 0x45, 0x78, 0x69, + 0x73, 0x74, 0x73, 0x12, 0x32, 0x0a, 0x15, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x5f, 0x6d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x73, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x13, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, + 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x9f, 0x06, 0x0a, 0x16, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, + 0x6f, 0x64, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x68, 0x72, + 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x74, 0x68, + 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x18, 0x0a, + 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x63, 0x65, 0x6e, + 0x74, 0x6c, 0x79, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0f, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x6c, 0x79, 0x43, 0x68, 0x65, 0x63, 0x6b, + 0x65, 0x64, 0x12, 0x50, 0x0a, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x07, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x54, 0x68, 0x72, + 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x6d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x61, 0x70, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x70, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x52, 0x0a, 0x0d, 0x72, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x2d, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, + 0x6c, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x52, + 0x0c, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x1a, 0x8b, 0x02, + 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, + 0x64, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x12, 0x52, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, + 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, + 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x0c, 0x72, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x1a, 0x6c, 0x0a, 0x0c, 0x4d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x46, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x1b, 0x0a, 0x19, 0x47, 0x65, 0x74, + 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xb6, 0x10, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x54, 0x68, + 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, + 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x69, 0x73, + 0x5f, 0x6c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x69, + 0x73, 0x4c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x17, 0x0a, 0x07, 0x69, 0x73, 0x5f, 0x6f, 0x70, + 0x65, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x69, 0x73, 0x4f, 0x70, 0x65, 0x6e, + 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x73, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x69, 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, + 0x1d, 0x0a, 0x0a, 0x69, 0x73, 0x5f, 0x64, 0x6f, 0x72, 0x6d, 0x61, 0x6e, 0x74, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x09, 0x69, 0x73, 0x44, 0x6f, 0x72, 0x6d, 0x61, 0x6e, 0x74, 0x12, 0x28, + 0x0a, 0x10, 0x6c, 0x61, 0x67, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x71, 0x75, 0x65, + 0x72, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6c, 0x61, 0x67, 0x4d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x2e, 0x0a, 0x13, 0x63, 0x75, 0x73, 0x74, + 0x6f, 0x6d, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x4d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x2b, 0x0a, 0x11, 0x64, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x0a, 0x20, + 0x01, 0x28, 0x01, 0x52, 0x10, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x54, 0x68, 0x72, 0x65, + 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x3c, 0x0a, 0x1b, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x75, 0x73, 0x65, 0x64, 0x5f, 0x61, 0x73, 0x5f, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x17, 0x6d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x4e, 0x61, 0x6d, 0x65, 0x55, 0x73, 0x65, 0x64, 0x41, 0x73, 0x44, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x12, 0x73, 0x0a, 0x12, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, + 0x64, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x44, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x41, + 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, + 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x70, 0x0a, 0x11, 0x6d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x73, 0x18, 0x0d, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x68, 0x72, 0x6f, 0x74, + 0x74, 0x6c, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, + 0x6c, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x10, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x73, 0x12, 0x67, 0x0a, 0x0e, 0x6d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x73, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x18, 0x0e, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, - 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x10, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, - 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x73, 0x12, 0x67, 0x0a, 0x0e, 0x6d, 0x65, 0x74, - 0x72, 0x69, 0x63, 0x73, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x18, 0x0e, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x40, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, - 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x52, 0x0d, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x48, 0x65, 0x61, 0x6c, - 0x74, 0x68, 0x12, 0x67, 0x0a, 0x0e, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x5f, - 0x61, 0x70, 0x70, 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, - 0x65, 0x74, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, - 0x6c, 0x65, 0x64, 0x41, 0x70, 0x70, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0d, 0x74, 0x68, - 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x41, 0x70, 0x70, 0x73, 0x12, 0x74, 0x0a, 0x13, 0x61, - 0x70, 0x70, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, - 0x63, 0x73, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x44, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, - 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x41, 0x70, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, - 0x65, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, - 0x61, 0x70, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, - 0x73, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x6c, 0x79, 0x5f, 0x63, 0x68, - 0x65, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x72, 0x65, 0x63, - 0x65, 0x6e, 0x74, 0x6c, 0x79, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x5e, 0x0a, 0x0b, - 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x5f, 0x61, 0x70, 0x70, 0x73, 0x18, 0x12, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x3d, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, - 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x2e, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x41, 0x70, 0x70, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x52, 0x0a, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x41, 0x70, 0x70, 0x73, 0x1a, 0x3a, 0x0a, 0x0c, - 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x14, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x1a, 0x80, 0x01, 0x0a, 0x16, 0x41, 0x67, 0x67, - 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x50, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x68, 0x72, 0x6f, - 0x74, 0x74, 0x6c, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x43, 0x0a, 0x15, 0x4d, - 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x73, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, - 0x1a, 0x81, 0x01, 0x0a, 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x48, 0x65, 0x61, 0x6c, 0x74, - 0x68, 0x12, 0x34, 0x0a, 0x0f, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, - 0x79, 0x5f, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, - 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0d, 0x6c, 0x61, 0x73, 0x74, 0x48, 0x65, - 0x61, 0x6c, 0x74, 0x68, 0x79, 0x41, 0x74, 0x12, 0x3b, 0x0a, 0x1a, 0x73, 0x65, 0x63, 0x6f, 0x6e, - 0x64, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x68, 0x65, - 0x61, 0x6c, 0x74, 0x68, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x17, 0x73, 0x65, 0x63, - 0x6f, 0x6e, 0x64, 0x73, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x4c, 0x61, 0x73, 0x74, 0x48, 0x65, 0x61, - 0x6c, 0x74, 0x68, 0x79, 0x1a, 0x7c, 0x0a, 0x12, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x48, - 0x65, 0x61, 0x6c, 0x74, 0x68, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x50, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x74, 0x61, + 0x65, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0d, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x48, 0x65, 0x61, + 0x6c, 0x74, 0x68, 0x12, 0x67, 0x0a, 0x0e, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, + 0x5f, 0x61, 0x70, 0x70, 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, - 0x63, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, - 0x38, 0x01, 0x1a, 0x5c, 0x0a, 0x12, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x41, - 0x70, 0x70, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x30, 0x0a, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x74, 0x6f, 0x70, 0x6f, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x41, 0x70, - 0x70, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, - 0x1a, 0x44, 0x0a, 0x16, 0x41, 0x70, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x4d, 0x65, - 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0xad, 0x01, 0x0a, 0x09, 0x52, 0x65, 0x63, 0x65, 0x6e, - 0x74, 0x41, 0x70, 0x70, 0x12, 0x2b, 0x0a, 0x0a, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x5f, - 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, - 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x09, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x41, - 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, - 0x64, 0x65, 0x12, 0x52, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x63, - 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x68, - 0x65, 0x63, 0x6b, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x1a, 0x76, 0x0a, 0x0f, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, - 0x41, 0x70, 0x70, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x4d, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, - 0x65, 0x74, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, - 0x41, 0x70, 0x70, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xaa, - 0x01, 0x0a, 0x11, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x54, 0x61, 0x67, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x54, 0x61, 0x67, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x70, 0x6c, - 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x72, 0x65, 0x70, 0x6c, 0x61, - 0x63, 0x65, 0x1a, 0x37, 0x0a, 0x09, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x92, 0x01, 0x0a, 0x12, - 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x54, 0x61, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x43, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x2f, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x54, 0x61, 0x67, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x1a, 0x37, 0x0a, 0x09, 0x54, 0x61, 0x67, 0x73, 0x45, + 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x54, 0x68, 0x72, 0x6f, 0x74, + 0x74, 0x6c, 0x65, 0x64, 0x41, 0x70, 0x70, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0d, 0x74, + 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x41, 0x70, 0x70, 0x73, 0x12, 0x74, 0x0a, 0x13, + 0x61, 0x70, 0x70, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x73, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x44, 0x2e, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, + 0x74, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x41, 0x70, 0x70, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x65, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x11, 0x61, 0x70, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x6c, 0x79, 0x5f, 0x63, + 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x72, 0x65, + 0x63, 0x65, 0x6e, 0x74, 0x6c, 0x79, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x5e, 0x0a, + 0x0b, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x5f, 0x61, 0x70, 0x70, 0x73, 0x18, 0x12, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, + 0x6c, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x2e, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x41, 0x70, 0x70, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x0a, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x41, 0x70, 0x70, 0x73, 0x1a, 0x3a, 0x0a, + 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x1a, 0x80, 0x01, 0x0a, 0x16, 0x41, 0x67, + 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, - 0x2a, 0x3e, 0x0a, 0x19, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x07, 0x0a, - 0x03, 0x41, 0x4e, 0x59, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x4f, 0x52, 0x44, 0x45, - 0x52, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x03, - 0x2a, 0x83, 0x01, 0x0a, 0x1a, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, - 0x6c, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x12, - 0x0d, 0x0a, 0x09, 0x55, 0x4e, 0x44, 0x45, 0x46, 0x49, 0x4e, 0x45, 0x44, 0x10, 0x00, 0x12, 0x06, - 0x0a, 0x02, 0x4f, 0x4b, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x48, 0x52, 0x45, 0x53, 0x48, - 0x4f, 0x4c, 0x44, 0x5f, 0x45, 0x58, 0x43, 0x45, 0x45, 0x44, 0x45, 0x44, 0x10, 0x02, 0x12, 0x0e, - 0x0a, 0x0a, 0x41, 0x50, 0x50, 0x5f, 0x44, 0x45, 0x4e, 0x49, 0x45, 0x44, 0x10, 0x03, 0x12, 0x12, - 0x0a, 0x0e, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x4d, 0x45, 0x54, 0x52, 0x49, 0x43, - 0x10, 0x04, 0x12, 0x12, 0x0a, 0x0e, 0x49, 0x4e, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x45, - 0x52, 0x52, 0x4f, 0x52, 0x10, 0x05, 0x42, 0x30, 0x5a, 0x2e, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, - 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, - 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x50, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x68, 0x72, + 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x43, 0x0a, 0x15, + 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x1a, 0x81, 0x01, 0x0a, 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x48, 0x65, 0x61, 0x6c, + 0x74, 0x68, 0x12, 0x34, 0x0a, 0x0f, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x79, 0x5f, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, + 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0d, 0x6c, 0x61, 0x73, 0x74, 0x48, + 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x41, 0x74, 0x12, 0x3b, 0x0a, 0x1a, 0x73, 0x65, 0x63, 0x6f, + 0x6e, 0x64, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x68, + 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x17, 0x73, 0x65, + 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x4c, 0x61, 0x73, 0x74, 0x48, 0x65, + 0x61, 0x6c, 0x74, 0x68, 0x79, 0x1a, 0x7c, 0x0a, 0x12, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, + 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x50, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x47, 0x65, 0x74, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x02, 0x38, 0x01, 0x1a, 0x5c, 0x0a, 0x12, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, + 0x41, 0x70, 0x70, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x30, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x74, 0x6f, 0x70, + 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x41, + 0x70, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x1a, 0x44, 0x0a, 0x16, 0x41, 0x70, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x4d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0xad, 0x01, 0x0a, 0x09, 0x52, 0x65, 0x63, 0x65, + 0x6e, 0x74, 0x41, 0x70, 0x70, 0x12, 0x2b, 0x0a, 0x0a, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, + 0x5f, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, + 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x09, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, + 0x41, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, + 0x6f, 0x64, 0x65, 0x12, 0x52, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, + 0x63, 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x1a, 0x76, 0x0a, 0x0f, 0x52, 0x65, 0x63, 0x65, 0x6e, + 0x74, 0x41, 0x70, 0x70, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x4d, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x47, 0x65, 0x74, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x63, 0x65, 0x6e, + 0x74, 0x41, 0x70, 0x70, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, + 0xaa, 0x01, 0x0a, 0x11, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x54, 0x61, 0x67, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x54, 0x61, + 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x70, + 0x6c, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x72, 0x65, 0x70, 0x6c, + 0x61, 0x63, 0x65, 0x1a, 0x37, 0x0a, 0x09, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x92, 0x01, 0x0a, + 0x12, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x54, 0x61, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x43, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x2f, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x54, 0x61, 0x67, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x1a, 0x37, 0x0a, 0x09, 0x54, 0x61, 0x67, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x2a, 0x3e, 0x0a, 0x19, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x07, + 0x0a, 0x03, 0x41, 0x4e, 0x59, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x4f, 0x52, 0x44, + 0x45, 0x52, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, + 0x03, 0x2a, 0x83, 0x01, 0x0a, 0x1a, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x54, 0x68, 0x72, 0x6f, 0x74, + 0x74, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, + 0x12, 0x0d, 0x0a, 0x09, 0x55, 0x4e, 0x44, 0x45, 0x46, 0x49, 0x4e, 0x45, 0x44, 0x10, 0x00, 0x12, + 0x06, 0x0a, 0x02, 0x4f, 0x4b, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x48, 0x52, 0x45, 0x53, + 0x48, 0x4f, 0x4c, 0x44, 0x5f, 0x45, 0x58, 0x43, 0x45, 0x45, 0x44, 0x45, 0x44, 0x10, 0x02, 0x12, + 0x0e, 0x0a, 0x0a, 0x41, 0x50, 0x50, 0x5f, 0x44, 0x45, 0x4e, 0x49, 0x45, 0x44, 0x10, 0x03, 0x12, + 0x12, 0x0a, 0x0e, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x4d, 0x45, 0x54, 0x52, 0x49, + 0x43, 0x10, 0x04, 0x12, 0x12, 0x0a, 0x0e, 0x49, 0x4e, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x5f, + 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x05, 0x42, 0x30, 0x5a, 0x2e, 0x76, 0x69, 0x74, 0x65, 0x73, + 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, + 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, } var ( diff --git a/go/vt/proto/tabletmanagerdata/tabletmanagerdata_vtproto.pb.go b/go/vt/proto/tabletmanagerdata/tabletmanagerdata_vtproto.pb.go index 5e4a664f0f3..9f6b0df851b 100644 --- a/go/vt/proto/tabletmanagerdata/tabletmanagerdata_vtproto.pb.go +++ b/go/vt/proto/tabletmanagerdata/tabletmanagerdata_vtproto.pb.go @@ -1190,7 +1190,6 @@ func (m *ReplicationStatusResponse) CloneVT() *ReplicationStatusResponse { } r := new(ReplicationStatusResponse) r.Status = m.Status.CloneVT() - r.BackupRunning = m.BackupRunning if len(m.unknownFields) > 0 { r.unknownFields = make([]byte, len(m.unknownFields)) copy(r.unknownFields, m.unknownFields) @@ -1968,7 +1967,6 @@ func (m *StopReplicationAndGetStatusResponse) CloneVT() *StopReplicationAndGetSt } r := new(StopReplicationAndGetStatusResponse) r.Status = m.Status.CloneVT() - r.BackupRunning = m.BackupRunning if len(m.unknownFields) > 0 { r.unknownFields = make([]byte, len(m.unknownFields)) copy(r.unknownFields, m.unknownFields) @@ -5788,16 +5786,6 @@ func (m *ReplicationStatusResponse) MarshalToSizedBufferVT(dAtA []byte) (int, er i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.BackupRunning { - i-- - if m.BackupRunning { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - } if m.Status != nil { size, err := m.Status.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { @@ -7605,16 +7593,6 @@ func (m *StopReplicationAndGetStatusResponse) MarshalToSizedBufferVT(dAtA []byte i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.BackupRunning { - i-- - if m.BackupRunning { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - } if m.Status != nil { size, err := m.Status.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { @@ -11538,9 +11516,6 @@ func (m *ReplicationStatusResponse) SizeVT() (n int) { l = m.Status.SizeVT() n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) } - if m.BackupRunning { - n += 2 - } n += len(m.unknownFields) return n } @@ -12136,9 +12111,6 @@ func (m *StopReplicationAndGetStatusResponse) SizeVT() (n int) { l = m.Status.SizeVT() n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) } - if m.BackupRunning { - n += 2 - } n += len(m.unknownFields) return n } @@ -19440,26 +19412,6 @@ func (m *ReplicationStatusResponse) UnmarshalVT(dAtA []byte) error { return err } iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field BackupRunning", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return protohelpers.ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.BackupRunning = bool(v != 0) default: iNdEx = preIndex skippy, err := protohelpers.Skip(dAtA[iNdEx:]) @@ -22829,26 +22781,6 @@ func (m *StopReplicationAndGetStatusResponse) UnmarshalVT(dAtA []byte) error { return err } iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field BackupRunning", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return protohelpers.ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.BackupRunning = bool(v != 0) default: iNdEx = preIndex skippy, err := protohelpers.Skip(dAtA[iNdEx:]) diff --git a/go/vt/schemamanager/tablet_executor.go b/go/vt/schemamanager/tablet_executor.go index bd521703723..60e86bf7faf 100644 --- a/go/vt/schemamanager/tablet_executor.go +++ b/go/vt/schemamanager/tablet_executor.go @@ -249,14 +249,12 @@ func (exec *TabletExecutor) executeAlterMigrationThrottle(ctx context.Context, a throttlerConfig.ThrottledApps = make(map[string]*topodatapb.ThrottledAppRule) } if req.ThrottledApp != nil && req.ThrottledApp.Name != "" { - // TODO(shlomi) in v22: replace the following line with the commented out block - throttlerConfig.ThrottledApps[req.ThrottledApp.Name] = req.ThrottledApp - // timeNow := time.Now() - // if protoutil.TimeFromProto(req.ThrottledApp.ExpiresAt).After(timeNow) { - // throttlerConfig.ThrottledApps[req.ThrottledApp.Name] = req.ThrottledApp - // } else { - // delete(throttlerConfig.ThrottledApps, req.ThrottledApp.Name) - // } + timeNow := time.Now() + if protoutil.TimeFromProto(req.ThrottledApp.ExpiresAt).After(timeNow) { + throttlerConfig.ThrottledApps[req.ThrottledApp.Name] = req.ThrottledApp + } else { + delete(throttlerConfig.ThrottledApps, req.ThrottledApp.Name) + } } return throttlerConfig diff --git a/go/vt/vtctl/grpcvtctldserver/server.go b/go/vt/vtctl/grpcvtctldserver/server.go index 3bfce2204a2..c3dc22d21b4 100644 --- a/go/vt/vtctl/grpcvtctldserver/server.go +++ b/go/vt/vtctl/grpcvtctldserver/server.go @@ -2098,14 +2098,12 @@ func (s *VtctldServer) UpdateThrottlerConfig(ctx context.Context, req *vtctldata throttlerConfig.CheckAsCheckSelf = false } if req.ThrottledApp != nil && req.ThrottledApp.Name != "" { - // TODO(shlomi) in v22: replace the following line with the commented out block - throttlerConfig.ThrottledApps[req.ThrottledApp.Name] = req.ThrottledApp - // timeNow := time.Now() - // if protoutil.TimeFromProto(req.ThrottledApp.ExpiresAt).After(timeNow) { - // throttlerConfig.ThrottledApps[req.ThrottledApp.Name] = req.ThrottledApp - // } else { - // delete(throttlerConfig.ThrottledApps, req.ThrottledApp.Name) - // } + timeNow := time.Now() + if protoutil.TimeFromProto(req.ThrottledApp.ExpiresAt).After(timeNow) { + throttlerConfig.ThrottledApps[req.ThrottledApp.Name] = req.ThrottledApp + } else { + delete(throttlerConfig.ThrottledApps, req.ThrottledApp.Name) + } } return throttlerConfig } diff --git a/go/vt/vtctl/reparentutil/replication.go b/go/vt/vtctl/reparentutil/replication.go index e7919361a09..17dbaeae015 100644 --- a/go/vt/vtctl/reparentutil/replication.go +++ b/go/vt/vtctl/reparentutil/replication.go @@ -216,9 +216,6 @@ func stopReplicationAndBuildStatusMaps( logger.Infof("getting replication position from %v", alias) stopReplicationStatus, err := tmc.StopReplicationAndGetStatus(groupCtx, tabletInfo.Tablet, replicationdatapb.StopReplicationMode_IOTHREADONLY) - m.Lock() - res.tabletsBackupState[alias] = stopReplicationStatus.GetBackupRunning() - m.Unlock() if err != nil { sqlErr, isSQLErr := sqlerror.NewSQLErrorFromError(err).(*sqlerror.SQLError) if isSQLErr && sqlErr != nil && sqlErr.Number() == sqlerror.ERNotReplica { @@ -242,6 +239,20 @@ func stopReplicationAndBuildStatusMaps( err = vterrors.Wrapf(err, "error when getting replication status for alias %v: %v", alias, err) } } else { + isTakingBackup := false + + // Prefer the most up-to-date information regarding whether the tablet is taking a backup from the After + // replication status, but fall back to the Before status if After is nil. + if stopReplicationStatus.After != nil { + isTakingBackup = stopReplicationStatus.After.BackupRunning + } else if stopReplicationStatus.Before != nil { + isTakingBackup = stopReplicationStatus.Before.BackupRunning + } + + m.Lock() + res.tabletsBackupState[alias] = isTakingBackup + m.Unlock() + var sqlThreadRunning bool // Check if the sql thread was running for the tablet sqlThreadRunning, err = SQLThreadWasRunning(stopReplicationStatus) diff --git a/go/vt/vtctl/reparentutil/replication_test.go b/go/vt/vtctl/reparentutil/replication_test.go index 4a449b1189c..1b36186efb8 100644 --- a/go/vt/vtctl/reparentutil/replication_test.go +++ b/go/vt/vtctl/reparentutil/replication_test.go @@ -283,6 +283,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { waitForAllTablets bool expectedStatusMap map[string]*replicationdatapb.StopReplicationStatus expectedPrimaryStatusMap map[string]*replicationdatapb.PrimaryStatus + expectedTakingBackup map[string]bool expectedTabletsReachable []*topodatapb.Tablet shouldErr bool }{ @@ -339,6 +340,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-9"}, }, }, + expectedTakingBackup: map[string]bool{"zone1-0000000100": false, "zone1-0000000101": false}, expectedPrimaryStatusMap: map[string]*replicationdatapb.PrimaryStatus{}, expectedTabletsReachable: []*topodatapb.Tablet{{ Type: topodatapb.TabletType_REPLICA, @@ -407,6 +409,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-9"}, }, }, + expectedTakingBackup: map[string]bool{"zone1-0000000100": false, "zone1-0000000101": false}, expectedPrimaryStatusMap: map[string]*replicationdatapb.PrimaryStatus{}, expectedTabletsReachable: []*topodatapb.Tablet{{ Type: topodatapb.TabletType_REPLICA, @@ -491,6 +494,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-9"}, }, }, + expectedTakingBackup: map[string]bool{"zone1-0000000100": false, "zone1-0000000101": false}, expectedPrimaryStatusMap: map[string]*replicationdatapb.PrimaryStatus{}, expectedTabletsReachable: []*topodatapb.Tablet{{ Type: topodatapb.TabletType_REPLICA, @@ -585,6 +589,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-9"}, }, }, + expectedTakingBackup: map[string]bool{"zone1-0000000100": false, "zone1-0000000101": false}, expectedPrimaryStatusMap: map[string]*replicationdatapb.PrimaryStatus{}, expectedTabletsReachable: []*topodatapb.Tablet{{ Type: topodatapb.TabletType_REPLICA, @@ -678,6 +683,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-9"}, }, }, + expectedTakingBackup: map[string]bool{"zone1-0000000100": false, "zone1-0000000101": false}, expectedPrimaryStatusMap: map[string]*replicationdatapb.PrimaryStatus{}, expectedTabletsReachable: []*topodatapb.Tablet{{ Type: topodatapb.TabletType_REPLICA, @@ -750,6 +756,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { Uid: 101, }, }}, + expectedTakingBackup: map[string]bool{"zone1-0000000101": false}, expectedPrimaryStatusMap: map[string]*replicationdatapb.PrimaryStatus{}, shouldErr: false, }, @@ -811,6 +818,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-9"}, }, }, + expectedTakingBackup: map[string]bool{"zone1-0000000101": false}, expectedPrimaryStatusMap: map[string]*replicationdatapb.PrimaryStatus{ "zone1-0000000100": { Position: "primary-position-100", @@ -885,6 +893,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-9"}, }, }, + expectedTakingBackup: map[string]bool{"zone1-0000000101": false}, expectedPrimaryStatusMap: map[string]*replicationdatapb.PrimaryStatus{}, // zone1-0000000100 fails to demote, so does not appear expectedTabletsReachable: []*topodatapb.Tablet{{ Type: topodatapb.TabletType_REPLICA, @@ -1008,6 +1017,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { Uid: 101, }, }}, + expectedTakingBackup: map[string]bool{"zone1-0000000101": false}, expectedPrimaryStatusMap: map[string]*replicationdatapb.PrimaryStatus{}, shouldErr: false, }, @@ -1057,6 +1067,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-9"}, }, }, + expectedTakingBackup: map[string]bool{"zone1-0000000101": false}, expectedPrimaryStatusMap: map[string]*replicationdatapb.PrimaryStatus{}, expectedTabletsReachable: []*topodatapb.Tablet{{ Type: topodatapb.TabletType_REPLICA, @@ -1252,8 +1263,78 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }, }}, stopReplicasTimeout: time.Minute, + expectedTakingBackup: map[string]bool{"zone1-0000000100": false, "zone1-0000000101": false, "zone1-0000000102": false}, expectedPrimaryStatusMap: map[string]*replicationdatapb.PrimaryStatus{}, shouldErr: false, + }, { + name: "Handle nil replication status After. No segfaulting when determining backup status, and fall back to Before status", + durability: "none", + tmc: &stopReplicationAndBuildStatusMapsTestTMClient{ + stopReplicationAndGetStatusResults: map[string]*struct { + StopStatus *replicationdatapb.StopReplicationStatus + Err error + }{ + "zone1-0000000100": { + StopStatus: &replicationdatapb.StopReplicationStatus{ + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning), BackupRunning: true}, + After: nil, + }, + }, + "zone1-0000000101": { + StopStatus: &replicationdatapb.StopReplicationStatus{ + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning), BackupRunning: true}, + After: nil, + }, + }, + }, + }, + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Type: topodatapb.TabletType_REPLICA, + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + "zone1-0000000101": { + Tablet: &topodatapb.Tablet{ + Type: topodatapb.TabletType_REPLICA, + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + }, + }, + ignoredTablets: sets.New[string](), + expectedStatusMap: map[string]*replicationdatapb.StopReplicationStatus{ + "zone1-0000000100": { + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning), BackupRunning: true}, + After: nil, + }, + "zone1-0000000101": { + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning), BackupRunning: true}, + After: nil, + }, + }, + expectedTakingBackup: map[string]bool{"zone1-0000000100": true, "zone1-0000000101": true}, + expectedPrimaryStatusMap: map[string]*replicationdatapb.PrimaryStatus{}, + expectedTabletsReachable: []*topodatapb.Tablet{{ + Type: topodatapb.TabletType_REPLICA, + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, { + Type: topodatapb.TabletType_REPLICA, + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }}, + shouldErr: false, }, } @@ -1279,6 +1360,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { for idx, tablet := range res.reachableTablets { assert.True(t, topoproto.IsTabletInList(tablet, tt.expectedTabletsReachable), "TabletsReached[%d] not found - %s", idx, topoproto.TabletAliasString(tablet.Alias)) } + assert.Equal(t, tt.expectedTakingBackup, res.tabletsBackupState) }) } } diff --git a/go/vt/vtctl/workflow/framework_test.go b/go/vt/vtctl/workflow/framework_test.go index 249ff07cf41..a2c1b2ef8e3 100644 --- a/go/vt/vtctl/workflow/framework_test.go +++ b/go/vt/vtctl/workflow/framework_test.go @@ -271,6 +271,7 @@ type testTMClient struct { vrQueries map[int][]*queryResult createVReplicationWorkflowRequests map[uint32]*createVReplicationWorkflowRequestResponse readVReplicationWorkflowRequests map[uint32]*readVReplicationWorkflowRequestResponse + updateVReplicationWorklowsRequests map[uint32]*tabletmanagerdatapb.UpdateVReplicationWorkflowsRequest applySchemaRequests map[uint32]*applySchemaRequestResponse primaryPositions map[uint32]string vdiffRequests map[uint32]*vdiffRequestResponse @@ -294,6 +295,7 @@ func newTestTMClient(env *testEnv) *testTMClient { vrQueries: make(map[int][]*queryResult), createVReplicationWorkflowRequests: make(map[uint32]*createVReplicationWorkflowRequestResponse), readVReplicationWorkflowRequests: make(map[uint32]*readVReplicationWorkflowRequestResponse), + updateVReplicationWorklowsRequests: make(map[uint32]*tabletmanagerdatapb.UpdateVReplicationWorkflowsRequest), applySchemaRequests: make(map[uint32]*applySchemaRequestResponse), readVReplicationWorkflowsResponses: make(map[string][]*tabletmanagerdatapb.ReadVReplicationWorkflowsResponse), primaryPositions: make(map[uint32]string), @@ -496,6 +498,11 @@ func (tmc *testTMClient) ExecuteFetchAsAllPrivs(ctx context.Context, tablet *top return nil, nil } +func (tmc *testTMClient) ExecuteFetchAsApp(ctx context.Context, tablet *topodatapb.Tablet, usePool bool, req *tabletmanagerdatapb.ExecuteFetchAsAppRequest) (*querypb.QueryResult, error) { + // Reuse VReplicationExec. + return tmc.VReplicationExec(ctx, tablet, string(req.Query)) +} + func (tmc *testTMClient) expectApplySchemaRequest(tabletID uint32, req *applySchemaRequestResponse) { tmc.mu.Lock() defer tmc.mu.Unlock() @@ -617,6 +624,10 @@ func (tmc *testTMClient) HasVReplicationWorkflows(ctx context.Context, tablet *t }, nil } +func (tmc *testTMClient) ResetSequences(ctx context.Context, tablet *topodatapb.Tablet, tables []string) error { + return nil +} + func (tmc *testTMClient) ReadVReplicationWorkflows(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.ReadVReplicationWorkflowsRequest) (*tabletmanagerdatapb.ReadVReplicationWorkflowsResponse, error) { tmc.mu.Lock() defer tmc.mu.Unlock() @@ -677,6 +688,19 @@ func (tmc *testTMClient) UpdateVReplicationWorkflow(ctx context.Context, tablet }, nil } +func (tmc *testTMClient) UpdateVReplicationWorkflows(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.UpdateVReplicationWorkflowsRequest) (*tabletmanagerdatapb.UpdateVReplicationWorkflowsResponse, error) { + tmc.mu.Lock() + defer tmc.mu.Unlock() + if expect := tmc.updateVReplicationWorklowsRequests[tablet.Alias.Uid]; expect != nil { + if !proto.Equal(expect, req) { + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "unexpected ReadVReplicationWorkflow request on tablet %s: got %+v, want %+v", + topoproto.TabletAliasString(tablet.Alias), req, expect) + } + } + delete(tmc.updateVReplicationWorklowsRequests, tablet.Alias.Uid) + return nil, nil +} + func (tmc *testTMClient) ValidateVReplicationPermissions(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.ValidateVReplicationPermissionsRequest) (*tabletmanagerdatapb.ValidateVReplicationPermissionsResponse, error) { return &tabletmanagerdatapb.ValidateVReplicationPermissionsResponse{ User: "vt_filtered", @@ -736,6 +760,12 @@ func (tmc *testTMClient) AddVReplicationWorkflowsResponse(key string, resp *tabl tmc.readVReplicationWorkflowsResponses[key] = append(tmc.readVReplicationWorkflowsResponses[key], resp) } +func (tmc *testTMClient) AddUpdateVReplicationRequests(tabletUID uint32, req *tabletmanagerdatapb.UpdateVReplicationWorkflowsRequest) { + tmc.mu.Lock() + defer tmc.mu.Unlock() + tmc.updateVReplicationWorklowsRequests[tabletUID] = req +} + func (tmc *testTMClient) getVReplicationWorkflowsResponse(key string) *tabletmanagerdatapb.ReadVReplicationWorkflowsResponse { if len(tmc.readVReplicationWorkflowsResponses) == 0 { return nil diff --git a/go/vt/vtctl/workflow/materializer_test.go b/go/vt/vtctl/workflow/materializer_test.go index 746c5fe2bae..a583a101186 100644 --- a/go/vt/vtctl/workflow/materializer_test.go +++ b/go/vt/vtctl/workflow/materializer_test.go @@ -2588,7 +2588,7 @@ func TestCreateLookupVindexFailures(t *testing.T) { err: "unique vindex 'from' should have only one column", }, { - description: "non-unique lookup should have more than one column", + description: "non-unique lookup can have only one column", input: &vschemapb.Keyspace{ Vindexes: map[string]*vschemapb.Vindex{ "v": { @@ -2601,7 +2601,7 @@ func TestCreateLookupVindexFailures(t *testing.T) { }, }, }, - err: "non-unique vindex 'from' should have more than one column", + err: "", }, { description: "vindex not found", diff --git a/go/vt/vtctl/workflow/resharder_test.go b/go/vt/vtctl/workflow/resharder_test.go index 6353f36db9f..6fe1afb0c70 100644 --- a/go/vt/vtctl/workflow/resharder_test.go +++ b/go/vt/vtctl/workflow/resharder_test.go @@ -22,14 +22,20 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "golang.org/x/exp/maps" + "vitess.io/vitess/go/ptr" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vtgate/vindexes" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vschemapb "vitess.io/vitess/go/vt/proto/vschema" vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" ) @@ -65,6 +71,8 @@ func TestReshardCreate(t *testing.T) { sourceKeyspace, targetKeyspace *testKeyspace preFunc func(env *testEnv) want *vtctldatapb.WorkflowStatusResponse + updateVReplicationRequest *tabletmanagerdatapb.UpdateVReplicationWorkflowsRequest + autoStart bool wantErr string }{ { @@ -77,6 +85,11 @@ func TestReshardCreate(t *testing.T) { KeyspaceName: targetKeyspaceName, ShardNames: []string{"-80", "80-"}, }, + autoStart: true, + updateVReplicationRequest: &tabletmanagerdatapb.UpdateVReplicationWorkflowsRequest{ + AllWorkflows: true, + State: ptr.Of(binlogdatapb.VReplicationWorkflowState_Running), + }, want: &vtctldatapb.WorkflowStatusResponse{ ShardStreams: map[string]*vtctldatapb.WorkflowStatusResponse_ShardStreams{ "targetks/-80": { @@ -137,6 +150,7 @@ func TestReshardCreate(t *testing.T) { SourceShards: tc.sourceKeyspace.ShardNames, TargetShards: tc.targetKeyspace.ShardNames, Cells: []string{env.cell}, + AutoStart: tc.autoStart, } for i := range tc.sourceKeyspace.ShardNames { @@ -172,6 +186,9 @@ func TestReshardCreate(t *testing.T) { "select vrepl_id, table_name, lastpk from _vt.copy_state where vrepl_id in (1) and id in (select max(id) from _vt.copy_state where vrepl_id in (1) group by vrepl_id, table_name)", &sqltypes.Result{}, ) + if tc.updateVReplicationRequest != nil { + env.tmc.AddUpdateVReplicationRequests(uint32(tabletUID), tc.updateVReplicationRequest) + } } if tc.preFunc != nil { @@ -187,6 +204,300 @@ func TestReshardCreate(t *testing.T) { if tc.want != nil { require.Equal(t, tc.want, res) } + + // Expect updateVReplicationWorklowsRequests to be empty, + // if AutoStart is enabled. This is because we delete the specific + // key from the map in the testTMC, once updateVReplicationWorklows() + // with the expected request is called. + if tc.autoStart { + assert.Len(t, env.tmc.updateVReplicationWorklowsRequests, 0) + } + }) + } +} + +func TestReadRefStreams(t *testing.T) { + ctx := context.Background() + + sourceKeyspace := &testKeyspace{ + KeyspaceName: "sourceKeyspace", + ShardNames: []string{"-"}, + } + targetKeyspace := &testKeyspace{ + KeyspaceName: "targetKeyspace", + ShardNames: []string{"-"}, + } + + env := newTestEnv(t, ctx, defaultCellName, sourceKeyspace, targetKeyspace) + defer env.close() + + s1, err := env.ts.UpdateShardFields(ctx, targetKeyspace.KeyspaceName, "-", func(si *topo.ShardInfo) error { + return nil + }) + require.NoError(t, err) + + sourceTablet, ok := env.tablets[sourceKeyspace.KeyspaceName][100] + require.True(t, ok) + + env.tmc.schema = map[string]*tabletmanagerdatapb.SchemaDefinition{ + "t1": {}, + } + + rules := make([]*binlogdatapb.Rule, len(env.tmc.schema)) + for i, table := range maps.Keys(env.tmc.schema) { + rules[i] = &binlogdatapb.Rule{ + Match: table, + Filter: fmt.Sprintf("select * from %s", table), + } + } + + refKey := fmt.Sprintf("wf:%s:-", sourceKeyspace.KeyspaceName) + + testCases := []struct { + name string + addVReplicationWorkflowsResponse *tabletmanagerdatapb.ReadVReplicationWorkflowsResponse + preRefStreams map[string]*refStream + wantRefStreamKeys []string + wantErr bool + errContains string + }{ + { + name: "error for unnamed workflow", + addVReplicationWorkflowsResponse: &tabletmanagerdatapb.ReadVReplicationWorkflowsResponse{ + Workflows: []*tabletmanagerdatapb.ReadVReplicationWorkflowResponse{ + { + Workflow: "", + WorkflowType: binlogdatapb.VReplicationWorkflowType_Reshard, + }, + }, + }, + wantErr: true, + }, + { + name: "populate ref streams", + addVReplicationWorkflowsResponse: &tabletmanagerdatapb.ReadVReplicationWorkflowsResponse{ + Workflows: []*tabletmanagerdatapb.ReadVReplicationWorkflowResponse{ + { + Workflow: "wf", + WorkflowType: binlogdatapb.VReplicationWorkflowType_Reshard, + Streams: []*tabletmanagerdatapb.ReadVReplicationWorkflowResponse_Stream{ + { + + Bls: &binlogdatapb.BinlogSource{ + Keyspace: sourceKeyspace.KeyspaceName, + Shard: "-", + Tables: maps.Keys(env.tmc.schema), + Filter: &binlogdatapb.Filter{ + Rules: rules, + }, + }, + }, + }, + }, + }, + }, + wantRefStreamKeys: []string{refKey}, + }, + { + name: "mismatched streams with empty map", + preRefStreams: map[string]*refStream{}, + addVReplicationWorkflowsResponse: &tabletmanagerdatapb.ReadVReplicationWorkflowsResponse{ + Workflows: []*tabletmanagerdatapb.ReadVReplicationWorkflowResponse{ + { + Workflow: "wf", + WorkflowType: binlogdatapb.VReplicationWorkflowType_Reshard, + Streams: []*tabletmanagerdatapb.ReadVReplicationWorkflowResponse_Stream{ + { + + Bls: &binlogdatapb.BinlogSource{ + Keyspace: sourceKeyspace.KeyspaceName, + Shard: "-", + Tables: maps.Keys(env.tmc.schema), + Filter: &binlogdatapb.Filter{ + Rules: rules, + }, + }, + }, + }, + }, + }, + }, + wantErr: true, + errContains: "mismatch", + }, + { + name: "mismatched streams", + preRefStreams: map[string]*refStream{ + refKey: nil, + "nonexisting": nil, + }, + addVReplicationWorkflowsResponse: &tabletmanagerdatapb.ReadVReplicationWorkflowsResponse{ + Workflows: []*tabletmanagerdatapb.ReadVReplicationWorkflowResponse{ + { + Workflow: "wf", + WorkflowType: binlogdatapb.VReplicationWorkflowType_Reshard, + Streams: []*tabletmanagerdatapb.ReadVReplicationWorkflowResponse_Stream{ + { + + Bls: &binlogdatapb.BinlogSource{ + Keyspace: sourceKeyspace.KeyspaceName, + Shard: "-", + Tables: maps.Keys(env.tmc.schema), + Filter: &binlogdatapb.Filter{ + Rules: rules, + }, + }, + }, + }, + }, + }, + }, + wantErr: true, + errContains: "mismatch", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + rs := &resharder{ + s: env.ws, + keyspace: targetKeyspace.KeyspaceName, + sourceShards: []*topo.ShardInfo{s1}, + sourcePrimaries: map[string]*topo.TabletInfo{ + "-": { + Tablet: sourceTablet, + }, + }, + workflow: "wf", + vschema: &vschemapb.Keyspace{ + Tables: map[string]*vschemapb.Table{ + "t1": { + Type: vindexes.TypeReference, + }, + }, + }, + refStreams: tc.preRefStreams, + } + + workflowKey := env.tmc.GetWorkflowKey(sourceKeyspace.KeyspaceName, "-") + + env.tmc.AddVReplicationWorkflowsResponse(workflowKey, tc.addVReplicationWorkflowsResponse) + + err := rs.readRefStreams(ctx) + if !tc.wantErr { + assert.NoError(t, err) + for _, rk := range tc.wantRefStreamKeys { + assert.Contains(t, rs.refStreams, rk) + } + return + } + + assert.Error(t, err) + assert.ErrorContains(t, err, tc.errContains) + }) + } +} + +func TestBlsIsReference(t *testing.T) { + testCases := []struct { + name string + bls *binlogdatapb.BinlogSource + tables map[string]*vschemapb.Table + expected bool + wantErr bool + errContains string + }{ + { + name: "all references", + bls: &binlogdatapb.BinlogSource{ + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{ + {Match: "ref_table1"}, + {Match: "ref_table2"}, + }, + }, + }, + tables: map[string]*vschemapb.Table{ + "ref_table1": {Type: vindexes.TypeReference}, + "ref_table2": {Type: vindexes.TypeReference}, + }, + expected: true, + }, + { + name: "all sharded", + bls: &binlogdatapb.BinlogSource{ + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{ + {Match: "sharded_table1"}, + {Match: "sharded_table2"}, + }, + }, + }, + tables: map[string]*vschemapb.Table{ + "sharded_table1": {Type: vindexes.TypeTable}, + "sharded_table2": {Type: vindexes.TypeTable}, + }, + expected: false, + }, + { + name: "mixed reference and sharded tables", + bls: &binlogdatapb.BinlogSource{ + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{ + {Match: "ref_table"}, + {Match: "sharded_table"}, + }, + }, + }, + tables: map[string]*vschemapb.Table{ + "ref_table": {Type: vindexes.TypeReference}, + "sharded_table": {Type: vindexes.TypeTable}, + }, + wantErr: true, + }, + { + name: "rule table not found in vschema", + bls: &binlogdatapb.BinlogSource{ + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{ + {Match: "unknown_table"}, + }, + }, + }, + tables: map[string]*vschemapb.Table{}, + wantErr: true, + errContains: "unknown_table", + }, + { + name: "internal operation table ignored", + bls: &binlogdatapb.BinlogSource{ + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{ + {Match: "_vt_hld_6ace8bcef73211ea87e9f875a4d24e90_20200915120410_"}, + }, + }, + }, + tables: map[string]*vschemapb.Table{}, + expected: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + rs := &resharder{ + vschema: &vschemapb.Keyspace{ + Tables: tc.tables, + }, + } + + result, err := rs.blsIsReference(tc.bls) + + if tc.wantErr { + assert.ErrorContains(t, err, tc.errContains) + } else { + assert.NoError(t, err) + assert.Equal(t, tc.expected, result) + } }) } } diff --git a/go/vt/vtctl/workflow/server.go b/go/vt/vtctl/workflow/server.go index 7c49de58c9b..baea602b7a4 100644 --- a/go/vt/vtctl/workflow/server.go +++ b/go/vt/vtctl/workflow/server.go @@ -18,7 +18,6 @@ package workflow import ( "context" - "encoding/json" "errors" "fmt" "math" @@ -41,11 +40,9 @@ import ( "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/protoutil" "vitess.io/vitess/go/ptr" - "vitess.io/vitess/go/sets" "vitess.io/vitess/go/sqlescape" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/trace" - "vitess.io/vitess/go/vt/binlog/binlogplayer" "vitess.io/vitess/go/vt/concurrency" "vitess.io/vitess/go/vt/discovery" "vitess.io/vitess/go/vt/key" @@ -58,7 +55,6 @@ import ( "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/topotools" "vitess.io/vitess/go/vt/vtctl/schematools" - "vitess.io/vitess/go/vt/vtctl/workflow/common" "vitess.io/vitess/go/vt/vtctl/workflow/vexec" "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vterrors" @@ -406,546 +402,28 @@ func (s *Server) GetWorkflows(ctx context.Context, req *vtctldatapb.GetWorkflows span.Annotate("include_logs", req.IncludeLogs) span.Annotate("shards", req.Shards) - readReq := &tabletmanagerdatapb.ReadVReplicationWorkflowsRequest{} - if req.Workflow != "" { - readReq.IncludeWorkflows = []string{req.Workflow} + w := &workflowFetcher{ + ts: s.ts, + tmc: s.tmc, + parser: s.SQLParser(), + logger: s.Logger(), } - if req.ActiveOnly { - readReq.ExcludeStates = []binlogdatapb.VReplicationWorkflowState{binlogdatapb.VReplicationWorkflowState_Stopped} - } - - // Guards access to the maps used throughout. - m := sync.Mutex{} - shards, err := common.GetShards(ctx, s.ts, req.Keyspace, req.Shards) + workflowsByShard, err := w.fetchWorkflowsByShard(ctx, req) if err != nil { return nil, err } - results := make(map[*topo.TabletInfo]*tabletmanagerdatapb.ReadVReplicationWorkflowsResponse, len(shards)) - readWorkflowsEg, readWorkflowsCtx := errgroup.WithContext(ctx) - for _, shard := range shards { - readWorkflowsEg.Go(func() error { - si, err := s.ts.GetShard(readWorkflowsCtx, req.Keyspace, shard) - if err != nil { - return err - } - if si.PrimaryAlias == nil { - return fmt.Errorf("%w %s/%s", vexec.ErrNoShardPrimary, req.Keyspace, shard) - } - primary, err := s.ts.GetTablet(readWorkflowsCtx, si.PrimaryAlias) - if err != nil { - return err - } - if primary == nil { - return fmt.Errorf("%w %s/%s: tablet %v not found", vexec.ErrNoShardPrimary, req.Keyspace, shard, topoproto.TabletAliasString(si.PrimaryAlias)) - } - // Clone the request so that we can set the correct DB name for tablet. - req := readReq.CloneVT() - wres, err := s.tmc.ReadVReplicationWorkflows(readWorkflowsCtx, primary.Tablet, req) - if err != nil { - return err - } - m.Lock() - defer m.Unlock() - results[primary] = wres - return nil - }) - } - if readWorkflowsEg.Wait() != nil { - return nil, err - } - - copyStatesByShardStreamId := make(map[string][]*vtctldatapb.Workflow_Stream_CopyState, len(results)) - - fetchCopyStates := func(ctx context.Context, tablet *topo.TabletInfo, streamIds []int32) error { - span, ctx := trace.NewSpan(ctx, "workflow.Server.fetchCopyStates") - defer span.Finish() - - span.Annotate("keyspace", req.Keyspace) - span.Annotate("shard", tablet.Shard) - span.Annotate("tablet_alias", tablet.AliasString()) - - copyStates, err := s.getWorkflowCopyStates(ctx, tablet, streamIds) - if err != nil { - return err - } - m.Lock() - defer m.Unlock() - - for _, copyState := range copyStates { - shardStreamId := fmt.Sprintf("%s/%d", tablet.Shard, copyState.StreamId) - copyStatesByShardStreamId[shardStreamId] = append( - copyStatesByShardStreamId[shardStreamId], - copyState, - ) - } - - return nil - } - - fetchCopyStatesEg, fetchCopyStatesCtx := errgroup.WithContext(ctx) - for tablet, result := range results { - tablet := tablet // loop closure - - streamIds := make([]int32, 0, len(result.Workflows)) - for _, wf := range result.Workflows { - for _, stream := range wf.Streams { - streamIds = append(streamIds, stream.Id) - } - } - - if len(streamIds) == 0 { - continue - } - - fetchCopyStatesEg.Go(func() error { - return fetchCopyStates(fetchCopyStatesCtx, tablet, streamIds) - }) - } - - if err := fetchCopyStatesEg.Wait(); err != nil { + copyStatesByShardStreamId, err := w.fetchCopyStatesByShardStream(ctx, workflowsByShard) + if err != nil { return nil, err } - workflowsMap := make(map[string]*vtctldatapb.Workflow, len(results)) - sourceKeyspaceByWorkflow := make(map[string]string, len(results)) - sourceShardsByWorkflow := make(map[string]sets.Set[string], len(results)) - targetKeyspaceByWorkflow := make(map[string]string, len(results)) - targetShardsByWorkflow := make(map[string]sets.Set[string], len(results)) - maxVReplicationLagByWorkflow := make(map[string]float64, len(results)) - maxVReplicationTransactionLagByWorkflow := make(map[string]float64, len(results)) - - // We guarantee the following invariants when this function is called for a - // given workflow: - // - workflow.Name != "" (more precisely, ".Name is set 'properly'") - // - workflowsMap[workflow.Name] == workflow - // - sourceShardsByWorkflow[workflow.Name] != nil - // - targetShardsByWorkflow[workflow.Name] != nil - // - workflow.ShardStatuses != nil - scanWorkflow := func(ctx context.Context, workflow *vtctldatapb.Workflow, res *tabletmanagerdatapb.ReadVReplicationWorkflowResponse, tablet *topo.TabletInfo) error { - // This is not called concurrently, but we still protect the maps to ensure - // that we're concurrency-safe in the face of future changes (e.g. where other - // things are running concurrently with this which also access these maps). - m.Lock() - defer m.Unlock() - for _, rstream := range res.Streams { - // The value in the pos column can be compressed and thus not - // have a valid GTID consisting of valid UTF-8 characters so we - // have to decode it so that it's properly decompressed first - // when needed. - pos := rstream.Pos - if pos != "" { - mpos, err := binlogplayer.DecodePosition(pos) - if err != nil { - return err - } - pos = mpos.String() - } - - cells := strings.Split(res.Cells, ",") - for i := range cells { - cells[i] = strings.TrimSpace(cells[i]) - } - options := res.Options - if options != "" { - if err := json.Unmarshal([]byte(options), &workflow.Options); err != nil { - return err - } - } - stream := &vtctldatapb.Workflow_Stream{ - Id: int64(rstream.Id), - Shard: tablet.Shard, - Tablet: tablet.Alias, - BinlogSource: rstream.Bls, - Position: pos, - StopPosition: rstream.StopPos, - State: rstream.State.String(), - DbName: tablet.DbName(), - TabletTypes: res.TabletTypes, - TabletSelectionPreference: res.TabletSelectionPreference, - Cells: cells, - TransactionTimestamp: rstream.TransactionTimestamp, - TimeUpdated: rstream.TimeUpdated, - Message: rstream.Message, - Tags: strings.Split(res.Tags, ","), - RowsCopied: rstream.RowsCopied, - ThrottlerStatus: &vtctldatapb.Workflow_Stream_ThrottlerStatus{ - ComponentThrottled: rstream.ComponentThrottled, - TimeThrottled: rstream.TimeThrottled, - }, - } - - // Merge in copy states, which we've already fetched. - shardStreamId := fmt.Sprintf("%s/%d", tablet.Shard, stream.Id) - if copyState, ok := copyStatesByShardStreamId[shardStreamId]; ok { - stream.CopyStates = copyState - } - - if rstream.TimeUpdated == nil { - rstream.TimeUpdated = &vttimepb.Time{} - } - - switch { - case strings.Contains(strings.ToLower(stream.Message), "error"): - stream.State = binlogdatapb.VReplicationWorkflowState_Error.String() - case stream.State == binlogdatapb.VReplicationWorkflowState_Running.String() && len(stream.CopyStates) > 0: - stream.State = binlogdatapb.VReplicationWorkflowState_Copying.String() - case stream.State == binlogdatapb.VReplicationWorkflowState_Running.String() && int64(time.Now().Second())-rstream.TimeUpdated.Seconds > 10: - stream.State = binlogdatapb.VReplicationWorkflowState_Lagging.String() - } - - shardStreamKey := fmt.Sprintf("%s/%s", tablet.Shard, tablet.AliasString()) - shardStream, ok := workflow.ShardStreams[shardStreamKey] - if !ok { - ctx, cancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout) - defer cancel() - - si, err := s.ts.GetShard(ctx, req.Keyspace, tablet.Shard) - if err != nil { - return err - } - - shardStream = &vtctldatapb.Workflow_ShardStream{ - Streams: nil, - TabletControls: si.TabletControls, - IsPrimaryServing: si.IsPrimaryServing, - } - - workflow.ShardStreams[shardStreamKey] = shardStream - } - - shardStream.Streams = append(shardStream.Streams, stream) - sourceShardsByWorkflow[workflow.Name].Insert(stream.BinlogSource.Shard) - targetShardsByWorkflow[workflow.Name].Insert(tablet.Shard) - - if ks, ok := sourceKeyspaceByWorkflow[workflow.Name]; ok && ks != stream.BinlogSource.Keyspace { - return vterrors.Wrapf(ErrMultipleSourceKeyspaces, "workflow = %v, ks1 = %v, ks2 = %v", workflow.Name, ks, stream.BinlogSource.Keyspace) - } - - sourceKeyspaceByWorkflow[workflow.Name] = stream.BinlogSource.Keyspace - - if ks, ok := targetKeyspaceByWorkflow[workflow.Name]; ok && ks != tablet.Keyspace { - return vterrors.Wrapf(ErrMultipleTargetKeyspaces, "workflow = %v, ks1 = %v, ks2 = %v", workflow.Name, ks, tablet.Keyspace) - } - - targetKeyspaceByWorkflow[workflow.Name] = tablet.Keyspace - - if stream.TimeUpdated == nil { - stream.TimeUpdated = &vttimepb.Time{} - } - timeUpdated := time.Unix(stream.TimeUpdated.Seconds, 0) - vreplicationLag := time.Since(timeUpdated) - - // MaxVReplicationLag represents the time since we last processed any event - // in the workflow. - if currentMaxLag, ok := maxVReplicationLagByWorkflow[workflow.Name]; ok { - if vreplicationLag.Seconds() > currentMaxLag { - maxVReplicationLagByWorkflow[workflow.Name] = vreplicationLag.Seconds() - } - } else { - maxVReplicationLagByWorkflow[workflow.Name] = vreplicationLag.Seconds() - } - - workflow.WorkflowType = res.WorkflowType.String() - workflow.WorkflowSubType = res.WorkflowSubType.String() - workflow.DeferSecondaryKeys = res.DeferSecondaryKeys - - // MaxVReplicationTransactionLag estimates the actual statement processing lag - // between the source and the target. If we are still processing source events it - // is the difference b/w current time and the timestamp of the last event. If - // heartbeats are more recent than the last event, then the lag is the time since - // the last heartbeat as there can be an actual event immediately after the - // heartbeat, but which has not yet been processed on the target. - // We don't allow switching during the copy phase, so in that case we just return - // a large lag. All timestamps are in seconds since epoch. - if _, ok := maxVReplicationTransactionLagByWorkflow[workflow.Name]; !ok { - maxVReplicationTransactionLagByWorkflow[workflow.Name] = 0 - } - if rstream.TransactionTimestamp == nil { - rstream.TransactionTimestamp = &vttimepb.Time{} - } - lastTransactionTime := rstream.TransactionTimestamp.Seconds - if rstream.TimeHeartbeat == nil { - rstream.TimeHeartbeat = &vttimepb.Time{} - } - lastHeartbeatTime := rstream.TimeHeartbeat.Seconds - if stream.State == binlogdatapb.VReplicationWorkflowState_Copying.String() { - maxVReplicationTransactionLagByWorkflow[workflow.Name] = math.MaxInt64 - } else { - if lastTransactionTime == 0 /* no new events after copy */ || - lastHeartbeatTime > lastTransactionTime /* no recent transactions, so all caught up */ { - - lastTransactionTime = lastHeartbeatTime - } - now := time.Now().Unix() /* seconds since epoch */ - transactionReplicationLag := float64(now - lastTransactionTime) - if transactionReplicationLag > maxVReplicationTransactionLagByWorkflow[workflow.Name] { - maxVReplicationTransactionLagByWorkflow[workflow.Name] = transactionReplicationLag - } - } - } - - return nil - } - - for tablet, result := range results { - // In the old implementation, we knew we had at most one (0 <= N <= 1) - // workflow for each shard primary we queried. There might be multiple - // rows (streams) comprising that workflow, so we would aggregate the - // rows for a given primary into a single value ("the workflow", - // ReplicationStatusResult in the old types). - // - // In this version, we have many (N >= 0) workflows for each shard - // primary we queried, so we need to determine if each row corresponds - // to a workflow we're already aggregating, or if it's a workflow we - // haven't seen yet for that shard primary. We use the workflow name to - // dedupe for this. - for _, wfres := range result.Workflows { - workflowName := wfres.Workflow - workflow, ok := workflowsMap[workflowName] - if !ok { - workflow = &vtctldatapb.Workflow{ - Name: workflowName, - ShardStreams: map[string]*vtctldatapb.Workflow_ShardStream{}, - } - - workflowsMap[workflowName] = workflow - sourceShardsByWorkflow[workflowName] = sets.New[string]() - targetShardsByWorkflow[workflowName] = sets.New[string]() - } - - if err := scanWorkflow(ctx, workflow, wfres, tablet); err != nil { - return nil, err - } - } - } - - var ( - fetchLogsWG sync.WaitGroup - vrepLogQuery = strings.TrimSpace(` -SELECT - id, - vrepl_id, - type, - state, - message, - created_at, - updated_at, - count -FROM - _vt.vreplication_log -WHERE vrepl_id IN %a -ORDER BY - vrepl_id ASC, - id ASC -`) - ) - - fetchStreamLogs := func(ctx context.Context, workflow *vtctldatapb.Workflow) { - span, ctx := trace.NewSpan(ctx, "workflow.Server.fetchStreamLogs") - defer span.Finish() - - span.Annotate("keyspace", req.Keyspace) - span.Annotate("workflow", workflow.Name) - - vreplIDs := make([]int64, 0, len(workflow.ShardStreams)) - for _, shardStream := range maps.Values(workflow.ShardStreams) { - for _, stream := range shardStream.Streams { - vreplIDs = append(vreplIDs, stream.Id) - } - } - idsBV, err := sqltypes.BuildBindVariable(vreplIDs) - if err != nil { - return - } - - query, err := sqlparser.ParseAndBind(vrepLogQuery, idsBV) - if err != nil { - return - } - - vx := vexec.NewVExec(req.Keyspace, workflow.Name, s.ts, s.tmc, s.SQLParser()) - results, err := vx.QueryContext(ctx, query) - if err != nil { - // Note that we do not return here. If there are any query results - // in the map (i.e. some tablets returned successfully), we will - // still try to read log rows from them on a best-effort basis. But, - // we will also pre-emptively record the top-level fetch error on - // every stream in every shard in the workflow. Further processing - // below may override the error message for certain streams. - for _, streams := range workflow.ShardStreams { - for _, stream := range streams.Streams { - stream.LogFetchError = err.Error() - } - } - } - - for target, p3qr := range results { - qr := sqltypes.Proto3ToResult(p3qr) - shardStreamKey := fmt.Sprintf("%s/%s", target.Shard, target.AliasString()) - - ss, ok := workflow.ShardStreams[shardStreamKey] - if !ok || ss == nil { - continue - } - - streams := ss.Streams - streamIdx := 0 - markErrors := func(err error) { - if streamIdx >= len(streams) { - return - } - - streams[streamIdx].LogFetchError = err.Error() - } - - for _, row := range qr.Rows { - id, err := row[0].ToCastInt64() - if err != nil { - markErrors(err) - continue - } - - streamID, err := row[1].ToCastInt64() - if err != nil { - markErrors(err) - continue - } - - typ := row[2].ToString() - state := row[3].ToString() - message := row[4].ToString() - - createdAt, err := time.Parse("2006-01-02 15:04:05", row[5].ToString()) - if err != nil { - markErrors(err) - continue - } - - updatedAt, err := time.Parse("2006-01-02 15:04:05", row[6].ToString()) - if err != nil { - markErrors(err) - continue - } - - count, err := row[7].ToCastInt64() - if err != nil { - markErrors(err) - continue - } - - streamLog := &vtctldatapb.Workflow_Stream_Log{ - Id: id, - StreamId: streamID, - Type: typ, - State: state, - CreatedAt: &vttimepb.Time{ - Seconds: createdAt.Unix(), - }, - UpdatedAt: &vttimepb.Time{ - Seconds: updatedAt.Unix(), - }, - Message: message, - Count: count, - } - - // Earlier, in the main loop where we called scanWorkflow for - // each _vt.vreplication row, we also sorted each ShardStreams - // slice by ascending id, and our _vt.vreplication_log query - // ordered by (stream_id ASC, id ASC), so we can walk the - // streams in index order in O(n) amortized over all the rows - // for this tablet. - for streamIdx < len(streams) { - stream := streams[streamIdx] - if stream.Id < streamLog.StreamId { - streamIdx++ - continue - } - - if stream.Id > streamLog.StreamId { - s.Logger().Warningf("Found stream log for nonexistent stream: %+v", streamLog) - // This can happen on manual/failed workflow cleanup so move to the next log. - break - } - - // stream.Id == streamLog.StreamId - stream.Logs = append(stream.Logs, streamLog) - break - } - } - } - } - - workflows := make([]*vtctldatapb.Workflow, 0, len(workflowsMap)) - - for name, workflow := range workflowsMap { - sourceShards, ok := sourceShardsByWorkflow[name] - if !ok { - return nil, vterrors.Wrapf(ErrInvalidWorkflow, "%s has no source shards", name) - } - - sourceKeyspace, ok := sourceKeyspaceByWorkflow[name] - if !ok { - return nil, vterrors.Wrapf(ErrInvalidWorkflow, "%s has no source keyspace", name) - } - - targetShards, ok := targetShardsByWorkflow[name] - if !ok { - return nil, vterrors.Wrapf(ErrInvalidWorkflow, "%s has no target shards", name) - } - - targetKeyspace, ok := targetKeyspaceByWorkflow[name] - if !ok { - return nil, vterrors.Wrapf(ErrInvalidWorkflow, "%s has no target keyspace", name) - } - - maxVReplicationLag, ok := maxVReplicationLagByWorkflow[name] - if !ok { - return nil, vterrors.Wrapf(ErrInvalidWorkflow, "%s has no tracked vreplication lag", name) - } - - maxVReplicationTransactionLag, ok := maxVReplicationTransactionLagByWorkflow[name] - if !ok { - return nil, vterrors.Wrapf(ErrInvalidWorkflow, "%s has no tracked vreplication transaction lag", name) - } - - workflow.Source = &vtctldatapb.Workflow_ReplicationLocation{ - Keyspace: sourceKeyspace, - Shards: sets.List(sourceShards), - } - - workflow.Target = &vtctldatapb.Workflow_ReplicationLocation{ - Keyspace: targetKeyspace, - Shards: sets.List(targetShards), - } - - workflow.MaxVReplicationLag = int64(maxVReplicationLag) - workflow.MaxVReplicationTransactionLag = int64(maxVReplicationTransactionLag) - - // Sort shard streams by stream_id ASC, to support an optimization - // in fetchStreamLogs below. - for _, shardStreams := range workflow.ShardStreams { - sort.Slice(shardStreams.Streams, func(i, j int) bool { - return shardStreams.Streams[i].Id < shardStreams.Streams[j].Id - }) - } - - workflows = append(workflows, workflow) - - if req.IncludeLogs { - // Fetch logs for all streams associated with this workflow in the background. - fetchLogsWG.Add(1) - go func(ctx context.Context, workflow *vtctldatapb.Workflow) { - defer fetchLogsWG.Done() - fetchStreamLogs(ctx, workflow) - }(ctx, workflow) - } + workflows, err := w.buildWorkflows(ctx, workflowsByShard, copyStatesByShardStreamId, req) + if err != nil { + return nil, err } - // Wait for all the log fetchers to finish. - fetchLogsWG.Wait() - return &vtctldatapb.GetWorkflowsResponse{ Workflows: workflows, }, nil @@ -1080,51 +558,6 @@ func (s *Server) getWorkflowState(ctx context.Context, targetKeyspace, workflowN return ts, state, nil } -func (s *Server) getWorkflowCopyStates(ctx context.Context, tablet *topo.TabletInfo, streamIds []int32) ([]*vtctldatapb.Workflow_Stream_CopyState, error) { - span, ctx := trace.NewSpan(ctx, "workflow.Server.getWorkflowCopyStates") - defer span.Finish() - - span.Annotate("keyspace", tablet.Keyspace) - span.Annotate("shard", tablet.Shard) - span.Annotate("tablet_alias", tablet.AliasString()) - span.Annotate("stream_ids", fmt.Sprintf("%#v", streamIds)) - - idsBV, err := sqltypes.BuildBindVariable(streamIds) - if err != nil { - return nil, err - } - query, err := sqlparser.ParseAndBind("select vrepl_id, table_name, lastpk from _vt.copy_state where vrepl_id in %a and id in (select max(id) from _vt.copy_state where vrepl_id in %a group by vrepl_id, table_name)", - idsBV, idsBV) - if err != nil { - return nil, err - } - qr, err := s.tmc.VReplicationExec(ctx, tablet.Tablet, query) - if err != nil { - return nil, err - } - - result := sqltypes.Proto3ToResult(qr) - if result == nil { - return nil, nil - } - - copyStates := make([]*vtctldatapb.Workflow_Stream_CopyState, len(result.Rows)) - for i, row := range result.Rows { - streamId, err := row[0].ToInt64() - if err != nil { - return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "failed to cast vrepl_id to int64: %v", err) - } - // These string fields are technically varbinary, but this is close enough. - copyStates[i] = &vtctldatapb.Workflow_Stream_CopyState{ - StreamId: streamId, - Table: row[1].ToString(), - LastPk: row[2].ToString(), - } - } - - return copyStates, nil -} - // LookupVindexCreate creates the lookup vindex in the specified // keyspace and creates a VReplication workflow to backfill that // vindex from the keyspace to the target/lookup table specified. @@ -1545,7 +978,7 @@ func (s *Server) moveTablesCreate(ctx context.Context, req *vtctldatapb.MoveTabl } } if isStandardMoveTables() { // Non-standard ones do not use shard scoped mechanisms - if err := s.setupInitialDeniedTables(ctx, ts); err != nil { + if err := setupInitialDeniedTables(ctx, ts); err != nil { return nil, vterrors.Wrapf(err, "failed to put initial denied tables entries in place on the target shards") } } @@ -1600,7 +1033,7 @@ func (s *Server) moveTablesCreate(ctx context.Context, req *vtctldatapb.MoveTabl }) } -func (s *Server) validateRoutingRuleFlags(req *vtctldatapb.MoveTablesCreateRequest, mz *materializer) error { +func validateRoutingRuleFlags(req *vtctldatapb.MoveTablesCreateRequest, mz *materializer) error { if mz.IsMultiTenantMigration() { switch { case req.NoRoutingRules: @@ -1612,7 +1045,7 @@ func (s *Server) validateRoutingRuleFlags(req *vtctldatapb.MoveTablesCreateReque return nil } -func (s *Server) setupInitialDeniedTables(ctx context.Context, ts *trafficSwitcher) error { +func setupInitialDeniedTables(ctx context.Context, ts *trafficSwitcher) error { if ts.MigrationType() != binlogdatapb.MigrationType_TABLES { return nil } @@ -1630,7 +1063,7 @@ func (s *Server) setupInitialDeniedTables(ctx context.Context, ts *trafficSwitch } func (s *Server) setupInitialRoutingRules(ctx context.Context, req *vtctldatapb.MoveTablesCreateRequest, mz *materializer, tables []string) error { - if err := s.validateRoutingRuleFlags(req, mz); err != nil { + if err := validateRoutingRuleFlags(req, mz); err != nil { return err } @@ -4026,10 +3459,6 @@ func (s *Server) prepareCreateLookup(ctx context.Context, workflow, keyspace str if len(vindexFromCols) != 1 { return nil, nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "unique vindex 'from' should have only one column") } - } else { - if len(vindexFromCols) < 2 { - return nil, nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "non-unique vindex 'from' should have more than one column") - } } vindexToCol = vindex.Params["to"] // Make the vindex write_only. If one exists already in the vschema, diff --git a/go/vt/vtctl/workflow/server_test.go b/go/vt/vtctl/workflow/server_test.go index dbe06ab1a47..26d722f1de0 100644 --- a/go/vt/vtctl/workflow/server_test.go +++ b/go/vt/vtctl/workflow/server_test.go @@ -2470,7 +2470,7 @@ func TestGetWorkflowsStreamLogs(t *testing.T) { }, sourceShards, targetShards) logResult := sqltypes.MakeTestResult( - sqltypes.MakeTestFields("id|vrepl_id|type|state|message|created_at|updated_at|`count`", "int64|int64|varchar|varchar|varchar|varchar|varchar|int64"), + sqltypes.MakeTestFields("id|vrepl_id|type|state|message|created_at|updated_at|count", "int64|int64|varchar|varchar|varchar|varchar|varchar|int64"), "1|0|State Change|Running|test message for non-existent 1|2006-01-02 15:04:05|2006-01-02 15:04:05|1", "2|0|State Change|Stopped|test message for non-existent 2|2006-01-02 15:04:06|2006-01-02 15:04:06|1", "3|1|State Change|Running|log message|2006-01-02 15:04:07|2006-01-02 15:04:07|1", @@ -2499,3 +2499,63 @@ func TestGetWorkflowsStreamLogs(t *testing.T) { assert.Equal(t, gotLogs[0].State, "Running") assert.Equal(t, gotLogs[0].Id, int64(3)) } + +func TestWorkflowStatus(t *testing.T) { + ctx := context.Background() + + sourceKeyspace := "source_keyspace" + targetKeyspace := "target_keyspace" + workflow := "test_workflow" + + sourceShards := []string{"-"} + targetShards := []string{"-"} + + te := newTestMaterializerEnv(t, ctx, &vtctldatapb.MaterializeSettings{ + SourceKeyspace: sourceKeyspace, + TargetKeyspace: targetKeyspace, + Workflow: workflow, + TableSettings: []*vtctldatapb.TableMaterializeSettings{ + { + TargetTable: "table1", + SourceExpression: fmt.Sprintf("select * from %s", "table1"), + }, + { + TargetTable: "table2", + SourceExpression: fmt.Sprintf("select * from %s", "table2"), + }, + }, + }, sourceShards, targetShards) + + tablesResult := sqltypes.MakeTestResult(sqltypes.MakeTestFields("table_name", "varchar"), "table1", "table2") + te.tmc.expectVRQuery(200, "select distinct table_name from _vt.copy_state cs, _vt.vreplication vr where vr.id = cs.vrepl_id and vr.id = 1", tablesResult) + + tablesTargetCopyResult := sqltypes.MakeTestResult(sqltypes.MakeTestFields("table_name|table_rows|data_length", "varchar|int64|int64"), "table1|50|500", "table2|100|250") + te.tmc.expectVRQuery(200, "select table_name, table_rows, data_length from information_schema.tables where table_schema = 'vt_target_keyspace' and table_name in ('table1','table2')", tablesTargetCopyResult) + + tablesSourceCopyResult := sqltypes.MakeTestResult(sqltypes.MakeTestFields("table_name|table_rows|data_length", "varchar|int64|int64"), "table1|100|1000", "table2|200|500") + te.tmc.expectVRQuery(100, "select table_name, table_rows, data_length from information_schema.tables where table_schema = 'vt_source_keyspace' and table_name in ('table1','table2')", tablesSourceCopyResult) + + te.tmc.expectVRQuery(200, "select vrepl_id, table_name, lastpk from _vt.copy_state where vrepl_id in (1) and id in (select max(id) from _vt.copy_state where vrepl_id in (1) group by vrepl_id, table_name)", &sqltypes.Result{}) + + res, err := te.ws.WorkflowStatus(ctx, &vtctldatapb.WorkflowStatusRequest{ + Keyspace: targetKeyspace, + Workflow: workflow, + Shards: targetShards, + }) + + assert.NoError(t, err) + + require.NotNil(t, res.TableCopyState) + + stateTable1 := res.TableCopyState["table1"] + stateTable2 := res.TableCopyState["table2"] + require.NotNil(t, stateTable1) + require.NotNil(t, stateTable2) + + assert.Equal(t, int64(100), stateTable1.RowsTotal) + assert.Equal(t, int64(200), stateTable2.RowsTotal) + assert.Equal(t, int64(50), stateTable1.RowsCopied) + assert.Equal(t, int64(100), stateTable2.RowsCopied) + assert.Equal(t, float32(50), stateTable1.RowsPercentage) + assert.Equal(t, float32(50), stateTable2.RowsPercentage) +} diff --git a/go/vt/vtctl/workflow/traffic_switcher.go b/go/vt/vtctl/workflow/traffic_switcher.go index 937dffe70b3..4fc34992b0f 100644 --- a/go/vt/vtctl/workflow/traffic_switcher.go +++ b/go/vt/vtctl/workflow/traffic_switcher.go @@ -1135,30 +1135,45 @@ func (ts *trafficSwitcher) switchDeniedTables(ctx context.Context) error { return nil } +// cancelMigration attempts to revert all changes made during the migration so that we can get back to the +// state when traffic switching (or reversing) was initiated. func (ts *trafficSwitcher) cancelMigration(ctx context.Context, sm *StreamMigrator) { var err error + + if ctx.Err() != nil { + // Even though we create a new context later on we still record any context error: + // for forensics in case of failures. + ts.Logger().Infof("In Cancel migration: original context invalid: %s", ctx.Err()) + } + + // We create a new context while canceling the migration, so that we are independent of the original + // context being cancelled prior to or during the cancel operation. + cmTimeout := 60 * time.Second + cmCtx, cmCancel := context.WithTimeout(context.Background(), cmTimeout) + defer cmCancel() + if ts.MigrationType() == binlogdatapb.MigrationType_TABLES { - err = ts.switchDeniedTables(ctx) + err = ts.switchDeniedTables(cmCtx) } else { - err = ts.changeShardsAccess(ctx, ts.SourceKeyspaceName(), ts.SourceShards(), allowWrites) + err = ts.changeShardsAccess(cmCtx, ts.SourceKeyspaceName(), ts.SourceShards(), allowWrites) } if err != nil { - ts.Logger().Errorf("Cancel migration failed: %v", err) + ts.Logger().Errorf("Cancel migration failed: could not revert denied tables / shard access: %v", err) } - sm.CancelStreamMigrations(ctx) + sm.CancelStreamMigrations(cmCtx) err = ts.ForAllTargets(func(target *MigrationTarget) error { query := fmt.Sprintf("update _vt.vreplication set state='Running', message='' where db_name=%s and workflow=%s", encodeString(target.GetPrimary().DbName()), encodeString(ts.WorkflowName())) - _, err := ts.TabletManagerClient().VReplicationExec(ctx, target.GetPrimary().Tablet, query) + _, err := ts.TabletManagerClient().VReplicationExec(cmCtx, target.GetPrimary().Tablet, query) return err }) if err != nil { ts.Logger().Errorf("Cancel migration failed: could not restart vreplication: %v", err) } - err = ts.deleteReverseVReplication(ctx) + err = ts.deleteReverseVReplication(cmCtx) if err != nil { ts.Logger().Errorf("Cancel migration failed: could not delete reverse vreplication streams: %v", err) } diff --git a/go/vt/vtctl/workflow/traffic_switcher_test.go b/go/vt/vtctl/workflow/traffic_switcher_test.go index 325b405b6f0..b06c95b6c16 100644 --- a/go/vt/vtctl/workflow/traffic_switcher_test.go +++ b/go/vt/vtctl/workflow/traffic_switcher_test.go @@ -28,6 +28,7 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/sqlescape" + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/mysqlctl/tmutils" "vitess.io/vitess/go/vt/proto/vschema" "vitess.io/vitess/go/vt/sqlparser" @@ -36,6 +37,7 @@ import ( "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication" tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" ) @@ -649,3 +651,255 @@ func TestTrafficSwitchPositionHandling(t *testing.T) { }) require.NoError(t, err) } + +func TestInitializeTargetSequences(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() + + workflowName := "wf1" + tableName := "t1" + sourceKeyspaceName := "sourceks" + targetKeyspaceName := "targetks" + + schema := map[string]*tabletmanagerdatapb.SchemaDefinition{ + tableName: { + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: tableName, + Schema: fmt.Sprintf("CREATE TABLE %s (id BIGINT, name VARCHAR(64), PRIMARY KEY (id))", tableName), + }, + }, + }, + } + + sourceKeyspace := &testKeyspace{ + KeyspaceName: sourceKeyspaceName, + ShardNames: []string{"0"}, + } + targetKeyspace := &testKeyspace{ + KeyspaceName: targetKeyspaceName, + ShardNames: []string{"0"}, + } + + env := newTestEnv(t, ctx, defaultCellName, sourceKeyspace, targetKeyspace) + defer env.close() + env.tmc.schema = schema + + ts, _, err := env.ws.getWorkflowState(ctx, targetKeyspaceName, workflowName) + require.NoError(t, err) + sw := &switcher{ts: ts, s: env.ws} + + sequencesByBackingTable := map[string]*sequenceMetadata{ + "my-seq1": { + backingTableName: "my-seq1", + backingTableKeyspace: sourceKeyspaceName, + backingTableDBName: fmt.Sprintf("vt_%s", sourceKeyspaceName), + usingTableName: tableName, + usingTableDBName: "vt_targetks", + usingTableDefinition: &vschema.Table{ + AutoIncrement: &vschema.AutoIncrement{ + Column: "my-col", + Sequence: fmt.Sprintf("%s.my-seq1", sourceKeyspace.KeyspaceName), + }, + }, + }, + } + + env.tmc.expectVRQuery(200, "/select max.*", sqltypes.MakeTestResult(sqltypes.MakeTestFields("maxval", "int64"), "34")) + // Expect the insert query to be executed with 35 as a params, since we provide a maxID of 34 in the last query + env.tmc.expectVRQuery(100, "/insert into.*35.*", &sqltypes.Result{RowsAffected: 1}) + + err = sw.initializeTargetSequences(ctx, sequencesByBackingTable) + assert.NoError(t, err) + + // Expect the queries to be cleared + assert.Empty(t, env.tmc.vrQueries[100]) + assert.Empty(t, env.tmc.vrQueries[200]) +} + +func TestAddTenantFilter(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() + + workflowName := "wf1" + tableName := "t1" + sourceKeyspaceName := "sourceks" + targetKeyspaceName := "targetks" + + sourceKeyspace := &testKeyspace{ + KeyspaceName: sourceKeyspaceName, + ShardNames: []string{"0"}, + } + targetKeyspace := &testKeyspace{ + KeyspaceName: targetKeyspaceName, + ShardNames: []string{"0"}, + } + + schema := map[string]*tabletmanagerdatapb.SchemaDefinition{ + tableName: { + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: tableName, + Schema: fmt.Sprintf("CREATE TABLE %s (id BIGINT, name VARCHAR(64), PRIMARY KEY (id))", tableName), + }, + }, + }, + } + + env := newTestEnv(t, ctx, defaultCellName, sourceKeyspace, targetKeyspace) + defer env.close() + env.tmc.schema = schema + + err := env.ts.SaveVSchema(ctx, targetKeyspaceName, &vschema.Keyspace{ + MultiTenantSpec: &vschema.MultiTenantSpec{ + TenantIdColumnName: "tenant_id", + TenantIdColumnType: sqltypes.Int64, + }, + }) + require.NoError(t, err) + + ts, _, err := env.ws.getWorkflowState(ctx, targetKeyspaceName, workflowName) + require.NoError(t, err) + + ts.options.TenantId = "123" + + filter, err := ts.addTenantFilter(ctx, fmt.Sprintf("select * from %s where id < 5", tableName)) + assert.NoError(t, err) + assert.Equal(t, "select * from t1 where tenant_id = 123 and id < 5", filter) +} + +func TestChangeShardRouting(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() + + workflowName := "wf1" + tableName := "t1" + sourceKeyspaceName := "sourceks" + targetKeyspaceName := "targetks" + + sourceKeyspace := &testKeyspace{ + KeyspaceName: sourceKeyspaceName, + ShardNames: []string{"0"}, + } + targetKeyspace := &testKeyspace{ + KeyspaceName: targetKeyspaceName, + ShardNames: []string{"0"}, + } + + schema := map[string]*tabletmanagerdatapb.SchemaDefinition{ + tableName: { + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: tableName, + Schema: fmt.Sprintf("CREATE TABLE %s (id BIGINT, name VARCHAR(64), PRIMARY KEY (id))", tableName), + }, + }, + }, + } + + env := newTestEnv(t, ctx, defaultCellName, sourceKeyspace, targetKeyspace) + defer env.close() + env.tmc.schema = schema + + ts, _, err := env.ws.getWorkflowState(ctx, targetKeyspaceName, workflowName) + require.NoError(t, err) + + err = env.ws.ts.UpdateSrvKeyspace(ctx, "cell", targetKeyspaceName, &topodatapb.SrvKeyspace{ + Partitions: []*topodatapb.SrvKeyspace_KeyspacePartition{ + { + ShardReferences: []*topodatapb.ShardReference{ + { + Name: "0", + }, + }, + }, + }, + }) + require.NoError(t, err) + + err = env.ws.ts.UpdateSrvKeyspace(ctx, "cell", sourceKeyspaceName, &topodatapb.SrvKeyspace{ + Partitions: []*topodatapb.SrvKeyspace_KeyspacePartition{ + { + ShardReferences: []*topodatapb.ShardReference{ + { + Name: "0", + }, + }, + }, + }, + }) + require.NoError(t, err) + + ctx, _, err = env.ws.ts.LockShard(ctx, targetKeyspaceName, "0", "targetks0") + require.NoError(t, err) + + ctx, _, err = env.ws.ts.LockKeyspace(ctx, targetKeyspaceName, "targetks0") + require.NoError(t, err) + + err = ts.changeShardRouting(ctx) + assert.NoError(t, err) + + sourceShardInfo, err := env.ws.ts.GetShard(ctx, sourceKeyspaceName, "0") + assert.NoError(t, err) + assert.False(t, sourceShardInfo.IsPrimaryServing, "source shard shouldn't have it's primary serving after changeShardRouting() is called.") + + targetShardInfo, err := env.ws.ts.GetShard(ctx, targetKeyspaceName, "0") + assert.NoError(t, err) + assert.True(t, targetShardInfo.IsPrimaryServing, "target shard should have it's primary serving after changeShardRouting() is called.") +} + +func TestAddParticipatingTablesToKeyspace(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() + + workflowName := "wf1" + tableName := "t1" + sourceKeyspaceName := "sourceks" + targetKeyspaceName := "targetks" + + sourceKeyspace := &testKeyspace{ + KeyspaceName: sourceKeyspaceName, + ShardNames: []string{"0"}, + } + targetKeyspace := &testKeyspace{ + KeyspaceName: targetKeyspaceName, + ShardNames: []string{"0"}, + } + + schema := map[string]*tabletmanagerdatapb.SchemaDefinition{ + tableName: { + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: tableName, + Schema: fmt.Sprintf("CREATE TABLE %s (id BIGINT, name VARCHAR(64), PRIMARY KEY (id))", tableName), + }, + }, + }, + } + + env := newTestEnv(t, ctx, defaultCellName, sourceKeyspace, targetKeyspace) + defer env.close() + env.tmc.schema = schema + + ts, _, err := env.ws.getWorkflowState(ctx, targetKeyspaceName, workflowName) + require.NoError(t, err) + + err = ts.addParticipatingTablesToKeyspace(ctx, sourceKeyspaceName, "") + assert.NoError(t, err) + + vs, err := env.ts.GetVSchema(ctx, sourceKeyspaceName) + assert.NoError(t, err) + assert.NotNil(t, vs.Tables["t1"]) + assert.Empty(t, vs.Tables["t1"]) + + specs := `{"t1":{"column_vindexes":[{"column":"col1","name":"v1"}, {"column":"col2","name":"v2"}]},"t2":{"column_vindexes":[{"column":"col2","name":"v2"}]}}` + err = ts.addParticipatingTablesToKeyspace(ctx, sourceKeyspaceName, specs) + assert.NoError(t, err) + + vs, err = env.ts.GetVSchema(ctx, sourceKeyspaceName) + assert.NoError(t, err) + require.NotNil(t, vs.Tables["t1"]) + require.NotNil(t, vs.Tables["t2"]) + assert.Len(t, vs.Tables["t1"].ColumnVindexes, 2) + assert.Len(t, vs.Tables["t2"].ColumnVindexes, 1) +} diff --git a/go/vt/vtctl/workflow/workflows.go b/go/vt/vtctl/workflow/workflows.go new file mode 100644 index 00000000000..da0ee5dfec7 --- /dev/null +++ b/go/vt/vtctl/workflow/workflows.go @@ -0,0 +1,672 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +This file provides functions for fetching and retrieving information about VReplication workflows + +At the moment it is used by the `GetWorkflows` function in `server.go and includes functionality to +get the following: +- Fetch workflows by shard +- Fetch copy states by shard stream +- Build workflows with metadata +- Fetch stream logs +*/ + +package workflow + +import ( + "context" + "encoding/json" + "fmt" + "math" + "sort" + "strings" + "sync" + "time" + + "golang.org/x/exp/maps" + "golang.org/x/sync/errgroup" + + "vitess.io/vitess/go/sets" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/trace" + "vitess.io/vitess/go/vt/binlog/binlogplayer" + "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vtctl/workflow/common" + "vitess.io/vitess/go/vt/vtctl/workflow/vexec" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vttablet/tmclient" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + vttimepb "vitess.io/vitess/go/vt/proto/vttime" +) + +// workflowFetcher is responsible for fetching and retrieving information +// about VReplication workflows. +type workflowFetcher struct { + ts *topo.Server + tmc tmclient.TabletManagerClient + + logger logutil.Logger + parser *sqlparser.Parser +} + +type workflowMetadata struct { + sourceKeyspace string + sourceShards sets.Set[string] + targetKeyspace string + targetShards sets.Set[string] + maxVReplicationLag float64 + maxVReplicationTransactionLag float64 +} + +var vrepLogQuery = strings.TrimSpace(` +SELECT + id, + vrepl_id, + type, + state, + message, + created_at, + updated_at, + count +FROM + _vt.vreplication_log +WHERE vrepl_id IN %a +ORDER BY + vrepl_id ASC, + id ASC +`) + +func (wf *workflowFetcher) fetchWorkflowsByShard( + ctx context.Context, + req *vtctldatapb.GetWorkflowsRequest, +) (map[*topo.TabletInfo]*tabletmanagerdatapb.ReadVReplicationWorkflowsResponse, error) { + readReq := &tabletmanagerdatapb.ReadVReplicationWorkflowsRequest{} + if req.Workflow != "" { + readReq.IncludeWorkflows = []string{req.Workflow} + } + if req.ActiveOnly { + readReq.ExcludeStates = []binlogdatapb.VReplicationWorkflowState{binlogdatapb.VReplicationWorkflowState_Stopped} + } + + m := sync.Mutex{} + + shards, err := common.GetShards(ctx, wf.ts, req.Keyspace, req.Shards) + if err != nil { + return nil, err + } + + results := make(map[*topo.TabletInfo]*tabletmanagerdatapb.ReadVReplicationWorkflowsResponse, len(shards)) + + err = wf.forAllShards(ctx, req.Keyspace, shards, func(ctx context.Context, si *topo.ShardInfo) error { + primary, err := wf.ts.GetTablet(ctx, si.PrimaryAlias) + if err != nil { + return err + } + if primary == nil { + return fmt.Errorf("%w %s/%s: tablet %v not found", vexec.ErrNoShardPrimary, req.Keyspace, si.ShardName(), topoproto.TabletAliasString(si.PrimaryAlias)) + } + // Clone the request so that we can set the correct DB name for tablet. + req := readReq.CloneVT() + wres, err := wf.tmc.ReadVReplicationWorkflows(ctx, primary.Tablet, req) + if err != nil { + return err + } + m.Lock() + defer m.Unlock() + results[primary] = wres + return nil + }) + if err != nil { + return nil, err + } + + return results, nil +} + +func (wf *workflowFetcher) fetchCopyStatesByShardStream( + ctx context.Context, + workflowsByShard map[*topo.TabletInfo]*tabletmanagerdatapb.ReadVReplicationWorkflowsResponse, +) (map[string][]*vtctldatapb.Workflow_Stream_CopyState, error) { + m := sync.Mutex{} + + copyStatesByShardStreamId := make(map[string][]*vtctldatapb.Workflow_Stream_CopyState, len(workflowsByShard)) + + fetchCopyStates := func(ctx context.Context, tablet *topo.TabletInfo, streamIds []int32) error { + span, ctx := trace.NewSpan(ctx, "workflowFetcher.workflow.fetchCopyStates") + defer span.Finish() + + span.Annotate("shard", tablet.Shard) + span.Annotate("tablet_alias", tablet.AliasString()) + + copyStates, err := wf.getWorkflowCopyStates(ctx, tablet, streamIds) + if err != nil { + return err + } + + m.Lock() + defer m.Unlock() + + for _, copyState := range copyStates { + shardStreamId := fmt.Sprintf("%s/%d", tablet.Shard, copyState.StreamId) + copyStatesByShardStreamId[shardStreamId] = append( + copyStatesByShardStreamId[shardStreamId], + copyState, + ) + } + + return nil + } + + fetchCopyStatesEg, fetchCopyStatesCtx := errgroup.WithContext(ctx) + for tablet, result := range workflowsByShard { + streamIds := make([]int32, 0, len(result.Workflows)) + for _, wf := range result.Workflows { + for _, stream := range wf.Streams { + streamIds = append(streamIds, stream.Id) + } + } + + if len(streamIds) == 0 { + continue + } + + fetchCopyStatesEg.Go(func() error { + return fetchCopyStates(fetchCopyStatesCtx, tablet, streamIds) + }) + } + if err := fetchCopyStatesEg.Wait(); err != nil { + return nil, err + } + + return copyStatesByShardStreamId, nil +} + +func (wf *workflowFetcher) getWorkflowCopyStates(ctx context.Context, tablet *topo.TabletInfo, streamIds []int32) ([]*vtctldatapb.Workflow_Stream_CopyState, error) { + span, ctx := trace.NewSpan(ctx, "workflowFetcher.workflow.getWorkflowCopyStates") + defer span.Finish() + + span.Annotate("keyspace", tablet.Keyspace) + span.Annotate("shard", tablet.Shard) + span.Annotate("tablet_alias", tablet.AliasString()) + span.Annotate("stream_ids", fmt.Sprintf("%#v", streamIds)) + + idsBV, err := sqltypes.BuildBindVariable(streamIds) + if err != nil { + return nil, err + } + query, err := sqlparser.ParseAndBind("select vrepl_id, table_name, lastpk from _vt.copy_state where vrepl_id in %a and id in (select max(id) from _vt.copy_state where vrepl_id in %a group by vrepl_id, table_name)", + idsBV, idsBV) + if err != nil { + return nil, err + } + qr, err := wf.tmc.VReplicationExec(ctx, tablet.Tablet, query) + if err != nil { + return nil, err + } + + result := sqltypes.Proto3ToResult(qr) + if result == nil { + return nil, nil + } + + copyStates := make([]*vtctldatapb.Workflow_Stream_CopyState, len(result.Rows)) + for i, row := range result.Named().Rows { + streamId, err := row["vrepl_id"].ToInt64() + if err != nil { + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "failed to cast vrepl_id to int64: %v", err) + } + // These string fields are technically varbinary, but this is close enough. + copyStates[i] = &vtctldatapb.Workflow_Stream_CopyState{ + StreamId: streamId, + Table: row["table_name"].ToString(), + LastPk: row["lastpk"].ToString(), + } + } + + return copyStates, nil +} + +func (wf *workflowFetcher) buildWorkflows( + ctx context.Context, + results map[*topo.TabletInfo]*tabletmanagerdatapb.ReadVReplicationWorkflowsResponse, + copyStatesByShardStreamId map[string][]*vtctldatapb.Workflow_Stream_CopyState, + req *vtctldatapb.GetWorkflowsRequest, +) ([]*vtctldatapb.Workflow, error) { + workflowsMap := make(map[string]*vtctldatapb.Workflow, len(results)) + workflowMetadataMap := make(map[string]*workflowMetadata, len(results)) + + for tablet, result := range results { + // In the old implementation, we knew we had at most one (0 <= N <= 1) + // workflow for each shard primary we queried. There might be multiple + // rows (streams) comprising that workflow, so we would aggregate the + // rows for a given primary into a single value ("the workflow", + // ReplicationStatusResult in the old types). + // + // In this version, we have many (N >= 0) workflows for each shard + // primary we queried, so we need to determine if each row corresponds + // to a workflow we're already aggregating, or if it's a workflow we + // haven't seen yet for that shard primary. We use the workflow name to + // dedupe for this. + for _, wfres := range result.Workflows { + workflowName := wfres.Workflow + workflow, ok := workflowsMap[workflowName] + if !ok { + workflow = &vtctldatapb.Workflow{ + Name: workflowName, + ShardStreams: map[string]*vtctldatapb.Workflow_ShardStream{}, + } + + workflowsMap[workflowName] = workflow + workflowMetadataMap[workflowName] = &workflowMetadata{ + sourceShards: sets.New[string](), + targetShards: sets.New[string](), + } + } + + metadata := workflowMetadataMap[workflowName] + err := wf.scanWorkflow(ctx, workflow, wfres, tablet, metadata, copyStatesByShardStreamId, req.Keyspace) + if err != nil { + return nil, err + } + } + } + + for name, workflow := range workflowsMap { + meta := workflowMetadataMap[name] + updateWorkflowWithMetadata(workflow, meta) + + // Sort shard streams by stream_id ASC, to support an optimization + // in fetchStreamLogs below. + for _, shardStreams := range workflow.ShardStreams { + sort.Slice(shardStreams.Streams, func(i, j int) bool { + return shardStreams.Streams[i].Id < shardStreams.Streams[j].Id + }) + } + } + + if req.IncludeLogs { + var fetchLogsWG sync.WaitGroup + + for _, workflow := range workflowsMap { + // Fetch logs for all streams associated with this workflow in the background. + fetchLogsWG.Add(1) + go func(ctx context.Context, workflow *vtctldatapb.Workflow) { + defer fetchLogsWG.Done() + wf.fetchStreamLogs(ctx, req.Keyspace, workflow) + }(ctx, workflow) + } + + // Wait for all the log fetchers to finish. + fetchLogsWG.Wait() + } + + return maps.Values(workflowsMap), nil +} + +func (wf *workflowFetcher) scanWorkflow( + ctx context.Context, + workflow *vtctldatapb.Workflow, + res *tabletmanagerdatapb.ReadVReplicationWorkflowResponse, + tablet *topo.TabletInfo, + meta *workflowMetadata, + copyStatesByShardStreamId map[string][]*vtctldatapb.Workflow_Stream_CopyState, + keyspace string, +) error { + shardStreamKey := fmt.Sprintf("%s/%s", tablet.Shard, tablet.AliasString()) + shardStream, ok := workflow.ShardStreams[shardStreamKey] + if !ok { + ctx, cancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout) + defer cancel() + + si, err := wf.ts.GetShard(ctx, keyspace, tablet.Shard) + if err != nil { + return err + } + + shardStream = &vtctldatapb.Workflow_ShardStream{ + Streams: nil, + TabletControls: si.TabletControls, + IsPrimaryServing: si.IsPrimaryServing, + } + + workflow.ShardStreams[shardStreamKey] = shardStream + } + + for _, rstream := range res.Streams { + // The value in the pos column can be compressed and thus not + // have a valid GTID consisting of valid UTF-8 characters so we + // have to decode it so that it's properly decompressed first + // when needed. + pos := rstream.Pos + if pos != "" { + mpos, err := binlogplayer.DecodePosition(pos) + if err != nil { + return err + } + pos = mpos.String() + } + + cells := strings.Split(res.Cells, ",") + for i := range cells { + cells[i] = strings.TrimSpace(cells[i]) + } + options := res.Options + if options != "" { + if err := json.Unmarshal([]byte(options), &workflow.Options); err != nil { + return err + } + } + + stream := &vtctldatapb.Workflow_Stream{ + Id: int64(rstream.Id), + Shard: tablet.Shard, + Tablet: tablet.Alias, + BinlogSource: rstream.Bls, + Position: pos, + StopPosition: rstream.StopPos, + State: rstream.State.String(), + DbName: tablet.DbName(), + TabletTypes: res.TabletTypes, + TabletSelectionPreference: res.TabletSelectionPreference, + Cells: cells, + TransactionTimestamp: rstream.TransactionTimestamp, + TimeUpdated: rstream.TimeUpdated, + Message: rstream.Message, + Tags: strings.Split(res.Tags, ","), + RowsCopied: rstream.RowsCopied, + ThrottlerStatus: &vtctldatapb.Workflow_Stream_ThrottlerStatus{ + ComponentThrottled: rstream.ComponentThrottled, + TimeThrottled: rstream.TimeThrottled, + }, + } + + // Merge in copy states, which we've already fetched. + shardStreamId := fmt.Sprintf("%s/%d", tablet.Shard, stream.Id) + if copyStates, ok := copyStatesByShardStreamId[shardStreamId]; ok { + stream.CopyStates = copyStates + } + + if rstream.TimeUpdated == nil { + rstream.TimeUpdated = &vttimepb.Time{} + } + + stream.State = getStreamState(stream, rstream) + + shardStream.Streams = append(shardStream.Streams, stream) + + meta.sourceShards.Insert(stream.BinlogSource.Shard) + meta.targetShards.Insert(tablet.Shard) + + if meta.sourceKeyspace != "" && meta.sourceKeyspace != stream.BinlogSource.Keyspace { + return vterrors.Wrapf(ErrMultipleSourceKeyspaces, "workflow = %v, ks1 = %v, ks2 = %v", workflow.Name, meta.sourceKeyspace, stream.BinlogSource.Keyspace) + } + + meta.sourceKeyspace = stream.BinlogSource.Keyspace + + if meta.targetKeyspace != "" && meta.targetKeyspace != tablet.Keyspace { + return vterrors.Wrapf(ErrMultipleTargetKeyspaces, "workflow = %v, ks1 = %v, ks2 = %v", workflow.Name, meta.targetKeyspace, tablet.Keyspace) + } + + meta.targetKeyspace = tablet.Keyspace + + if stream.TimeUpdated == nil { + stream.TimeUpdated = &vttimepb.Time{} + } + timeUpdated := time.Unix(stream.TimeUpdated.Seconds, 0) + vreplicationLag := time.Since(timeUpdated) + + // MaxVReplicationLag represents the time since we last processed any event + // in the workflow. + if vreplicationLag.Seconds() > meta.maxVReplicationLag { + meta.maxVReplicationLag = vreplicationLag.Seconds() + } + + workflow.WorkflowType = res.WorkflowType.String() + workflow.WorkflowSubType = res.WorkflowSubType.String() + workflow.DeferSecondaryKeys = res.DeferSecondaryKeys + + // MaxVReplicationTransactionLag estimates the actual statement processing lag + // between the source and the target. If we are still processing source events it + // is the difference b/w current time and the timestamp of the last event. If + // heartbeats are more recent than the last event, then the lag is the time since + // the last heartbeat as there can be an actual event immediately after the + // heartbeat, but which has not yet been processed on the target. + // We don't allow switching during the copy phase, so in that case we just return + // a large lag. All timestamps are in seconds since epoch. + if rstream.TransactionTimestamp == nil { + rstream.TransactionTimestamp = &vttimepb.Time{} + } + lastTransactionTime := rstream.TransactionTimestamp.Seconds + if rstream.TimeHeartbeat == nil { + rstream.TimeHeartbeat = &vttimepb.Time{} + } + lastHeartbeatTime := rstream.TimeHeartbeat.Seconds + if stream.State == binlogdatapb.VReplicationWorkflowState_Copying.String() { + meta.maxVReplicationTransactionLag = math.MaxInt64 + } else { + if lastTransactionTime == 0 /* no new events after copy */ || + lastHeartbeatTime > lastTransactionTime /* no recent transactions, so all caught up */ { + + lastTransactionTime = lastHeartbeatTime + } + now := time.Now().Unix() /* seconds since epoch */ + transactionReplicationLag := float64(now - lastTransactionTime) + if transactionReplicationLag > meta.maxVReplicationTransactionLag { + meta.maxVReplicationTransactionLag = transactionReplicationLag + } + } + } + + return nil +} + +func updateWorkflowWithMetadata(workflow *vtctldatapb.Workflow, meta *workflowMetadata) { + workflow.Source = &vtctldatapb.Workflow_ReplicationLocation{ + Keyspace: meta.sourceKeyspace, + Shards: sets.List(meta.sourceShards), + } + + workflow.Target = &vtctldatapb.Workflow_ReplicationLocation{ + Keyspace: meta.targetKeyspace, + Shards: sets.List(meta.targetShards), + } + + workflow.MaxVReplicationLag = int64(meta.maxVReplicationLag) + workflow.MaxVReplicationTransactionLag = int64(meta.maxVReplicationTransactionLag) +} + +func (wf *workflowFetcher) fetchStreamLogs(ctx context.Context, keyspace string, workflow *vtctldatapb.Workflow) { + span, ctx := trace.NewSpan(ctx, "workflowFetcher.workflow.fetchStreamLogs") + defer span.Finish() + + span.Annotate("keyspace", keyspace) + span.Annotate("workflow", workflow.Name) + + vreplIDs := make([]int64, 0, len(workflow.ShardStreams)) + for _, shardStream := range maps.Values(workflow.ShardStreams) { + for _, stream := range shardStream.Streams { + vreplIDs = append(vreplIDs, stream.Id) + } + } + idsBV, err := sqltypes.BuildBindVariable(vreplIDs) + if err != nil { + return + } + + query, err := sqlparser.ParseAndBind(vrepLogQuery, idsBV) + if err != nil { + return + } + + vx := vexec.NewVExec(keyspace, workflow.Name, wf.ts, wf.tmc, wf.parser) + results, err := vx.QueryContext(ctx, query) + if err != nil { + // Note that we do not return here. If there are any query results + // in the map (i.e. some tablets returned successfully), we will + // still try to read log rows from them on a best-effort basis. But, + // we will also pre-emptively record the top-level fetch error on + // every stream in every shard in the workflow. Further processing + // below may override the error message for certain streams. + for _, streams := range workflow.ShardStreams { + for _, stream := range streams.Streams { + stream.LogFetchError = err.Error() + } + } + } + + for target, p3qr := range results { + qr := sqltypes.Proto3ToResult(p3qr) + shardStreamKey := fmt.Sprintf("%s/%s", target.Shard, target.AliasString()) + + ss, ok := workflow.ShardStreams[shardStreamKey] + if !ok || ss == nil { + continue + } + + streams := ss.Streams + streamIdx := 0 + markErrors := func(err error) { + if streamIdx >= len(streams) { + return + } + + streams[streamIdx].LogFetchError = err.Error() + } + + for _, row := range qr.Named().Rows { + id, err := row["id"].ToCastInt64() + if err != nil { + markErrors(err) + continue + } + + streamID, err := row["vrepl_id"].ToCastInt64() + if err != nil { + markErrors(err) + continue + } + + typ := row["type"].ToString() + state := row["state"].ToString() + message := row["message"].ToString() + + createdAt, err := time.Parse("2006-01-02 15:04:05", row["created_at"].ToString()) + if err != nil { + markErrors(err) + continue + } + + updatedAt, err := time.Parse("2006-01-02 15:04:05", row["updated_at"].ToString()) + if err != nil { + markErrors(err) + continue + } + + count, err := row["count"].ToCastInt64() + if err != nil { + markErrors(err) + continue + } + + streamLog := &vtctldatapb.Workflow_Stream_Log{ + Id: id, + StreamId: streamID, + Type: typ, + State: state, + CreatedAt: &vttimepb.Time{ + Seconds: createdAt.Unix(), + }, + UpdatedAt: &vttimepb.Time{ + Seconds: updatedAt.Unix(), + }, + Message: message, + Count: count, + } + + // Earlier, in buildWorkflows, we sorted each ShardStreams + // slice by ascending id, and our _vt.vreplication_log query + // ordered by (stream_id ASC, id ASC), so we can walk the + // streams in index order in O(n) amortized over all the rows + // for this tablet. + for streamIdx < len(streams) { + stream := streams[streamIdx] + if stream.Id < streamLog.StreamId { + streamIdx++ + continue + } + + if stream.Id > streamLog.StreamId { + wf.logger.Warningf("Found stream log for nonexistent stream: %+v", streamLog) + // This can happen on manual/failed workflow cleanup so move to the next log. + break + } + + // stream.Id == streamLog.StreamId + stream.Logs = append(stream.Logs, streamLog) + break + } + } + } +} + +func (wf *workflowFetcher) forAllShards( + ctx context.Context, + keyspace string, + shards []string, + f func(ctx context.Context, shard *topo.ShardInfo) error, +) error { + eg, egCtx := errgroup.WithContext(ctx) + for _, shard := range shards { + eg.Go(func() error { + si, err := wf.ts.GetShard(ctx, keyspace, shard) + if err != nil { + return err + } + if si.PrimaryAlias == nil { + return fmt.Errorf("%w %s/%s", vexec.ErrNoShardPrimary, keyspace, shard) + } + + if err := f(egCtx, si); err != nil { + return err + } + return nil + }) + } + if err := eg.Wait(); err != nil { + return err + } + return nil +} + +func getStreamState(stream *vtctldatapb.Workflow_Stream, rstream *tabletmanagerdatapb.ReadVReplicationWorkflowResponse_Stream) string { + switch { + case strings.Contains(strings.ToLower(stream.Message), "error"): + return binlogdatapb.VReplicationWorkflowState_Error.String() + case stream.State == binlogdatapb.VReplicationWorkflowState_Running.String() && len(stream.CopyStates) > 0: + return binlogdatapb.VReplicationWorkflowState_Copying.String() + case stream.State == binlogdatapb.VReplicationWorkflowState_Running.String() && int64(time.Now().Second())-rstream.TimeUpdated.Seconds > 10: + return binlogdatapb.VReplicationWorkflowState_Lagging.String() + } + return rstream.State.String() +} diff --git a/go/vt/vtctl/workflow/workflows_test.go b/go/vt/vtctl/workflow/workflows_test.go new file mode 100644 index 00000000000..2015c8d1b7c --- /dev/null +++ b/go/vt/vtctl/workflow/workflows_test.go @@ -0,0 +1,260 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workflow + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/proto/binlogdata" + "vitess.io/vitess/go/vt/proto/vttime" + "vitess.io/vitess/go/vt/topo" + + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +func TestGetStreamState(t *testing.T) { + testCases := []struct { + name string + stream *vtctldatapb.Workflow_Stream + rstream *tabletmanagerdatapb.ReadVReplicationWorkflowResponse_Stream + want string + }{ + { + name: "error state", + stream: &vtctldatapb.Workflow_Stream{ + Message: "test error", + }, + want: "Error", + }, + { + name: "copying state", + stream: &vtctldatapb.Workflow_Stream{ + State: "Running", + CopyStates: []*vtctldatapb.Workflow_Stream_CopyState{ + { + Table: "table1", + }, + }, + }, + want: "Copying", + }, + { + name: "lagging state", + stream: &vtctldatapb.Workflow_Stream{ + State: "Running", + }, + rstream: &tabletmanagerdatapb.ReadVReplicationWorkflowResponse_Stream{ + TimeUpdated: &vttime.Time{ + Seconds: int64(time.Now().Second()) - 11, + }, + }, + want: "Lagging", + }, + { + name: "non-running and error free", + stream: &vtctldatapb.Workflow_Stream{ + State: "Stopped", + }, + rstream: &tabletmanagerdatapb.ReadVReplicationWorkflowResponse_Stream{ + State: binlogdata.VReplicationWorkflowState_Stopped, + }, + want: "Stopped", + }, + } + + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + state := getStreamState(tt.stream, tt.rstream) + assert.Equal(t, tt.want, state) + }) + } +} + +func TestGetWorkflowCopyStates(t *testing.T) { + ctx := context.Background() + + sourceShards := []string{"-"} + targetShards := []string{"-"} + + te := newTestMaterializerEnv(t, ctx, &vtctldatapb.MaterializeSettings{ + SourceKeyspace: "source_keyspace", + TargetKeyspace: "target_keyspace", + Workflow: "test_workflow", + TableSettings: []*vtctldatapb.TableMaterializeSettings{ + { + TargetTable: "table1", + SourceExpression: fmt.Sprintf("select * from %s", "table1"), + }, + { + TargetTable: "table2", + SourceExpression: fmt.Sprintf("select * from %s", "table2"), + }, + }, + }, sourceShards, targetShards) + + wf := workflowFetcher{ + ts: te.ws.ts, + tmc: te.tmc, + } + + tablet := &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + } + + query := "select vrepl_id, table_name, lastpk from _vt.copy_state where vrepl_id in (1) and id in (select max(id) from _vt.copy_state where vrepl_id in (1) group by vrepl_id, table_name)" + te.tmc.expectVRQuery(100, query, sqltypes.MakeTestResult( + sqltypes.MakeTestFields("vrepl_id|table_name|lastpk", "int64|varchar|varchar"), + "1|table1|2", "1|table2|1", + )) + + copyStates, err := wf.getWorkflowCopyStates(ctx, &topo.TabletInfo{ + Tablet: tablet, + }, []int32{1}) + assert.NoError(t, err) + assert.Len(t, copyStates, 2) + + state1 := &vtctldatapb.Workflow_Stream_CopyState{ + Table: "table1", + LastPk: "2", + StreamId: 1, + } + state2 := &vtctldatapb.Workflow_Stream_CopyState{ + Table: "table2", + LastPk: "1", + StreamId: 1, + } + assert.Contains(t, copyStates, state1) + assert.Contains(t, copyStates, state2) +} + +func TestFetchCopyStatesByShardStream(t *testing.T) { + ctx := context.Background() + + sourceShards := []string{"-"} + targetShards := []string{"-"} + + te := newTestMaterializerEnv(t, ctx, &vtctldatapb.MaterializeSettings{ + SourceKeyspace: "source_keyspace", + TargetKeyspace: "target_keyspace", + Workflow: "test_workflow", + TableSettings: []*vtctldatapb.TableMaterializeSettings{ + { + TargetTable: "table1", + SourceExpression: fmt.Sprintf("select * from %s", "table1"), + }, + { + TargetTable: "table2", + SourceExpression: fmt.Sprintf("select * from %s", "table2"), + }, + }, + }, sourceShards, targetShards) + + wf := workflowFetcher{ + ts: te.ws.ts, + tmc: te.tmc, + } + + tablet := &topodatapb.Tablet{ + Shard: "-80", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + } + tablet2 := &topodatapb.Tablet{ + Shard: "80-", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + } + + query := "select vrepl_id, table_name, lastpk from _vt.copy_state where vrepl_id in (1, 2) and id in (select max(id) from _vt.copy_state where vrepl_id in (1, 2) group by vrepl_id, table_name)" + te.tmc.expectVRQuery(100, query, sqltypes.MakeTestResult( + sqltypes.MakeTestFields("vrepl_id|table_name|lastpk", "int64|varchar|varchar"), + "1|table1|2", "2|table2|1", "2|table1|1", + )) + + te.tmc.expectVRQuery(101, query, sqltypes.MakeTestResult( + sqltypes.MakeTestFields("vrepl_id|table_name|lastpk", "int64|varchar|varchar"), + "1|table1|2", "1|table2|1", + )) + + ti := &topo.TabletInfo{ + Tablet: tablet, + } + ti2 := &topo.TabletInfo{ + Tablet: tablet2, + } + + readVReplicationResponse := map[*topo.TabletInfo]*tabletmanagerdatapb.ReadVReplicationWorkflowsResponse{ + ti: { + Workflows: []*tabletmanagerdatapb.ReadVReplicationWorkflowResponse{ + { + Streams: []*tabletmanagerdatapb.ReadVReplicationWorkflowResponse_Stream{ + { + Id: 1, + }, { + Id: 2, + }, + }, + }, + }, + }, + ti2: { + Workflows: []*tabletmanagerdatapb.ReadVReplicationWorkflowResponse{ + { + Streams: []*tabletmanagerdatapb.ReadVReplicationWorkflowResponse_Stream{ + { + Id: 1, + }, { + Id: 2, + }, + }, + }, + }, + }, + } + copyStatesByStreamId, err := wf.fetchCopyStatesByShardStream(ctx, readVReplicationResponse) + assert.NoError(t, err) + + copyStates1 := copyStatesByStreamId["-80/1"] + copyStates2 := copyStatesByStreamId["-80/2"] + copyStates3 := copyStatesByStreamId["80-/1"] + + require.NotNil(t, copyStates1) + require.NotNil(t, copyStates2) + require.NotNil(t, copyStates3) + + assert.Len(t, copyStates1, 1) + assert.Len(t, copyStates2, 2) + assert.Len(t, copyStates3, 2) + + assert.Nil(t, copyStatesByStreamId["80-/2"]) +} diff --git a/go/vt/vtexplain/vtexplain_vttablet.go b/go/vt/vtexplain/vtexplain_vttablet.go index 38a3ca7bbb3..65cd1a96181 100644 --- a/go/vt/vtexplain/vtexplain_vttablet.go +++ b/go/vt/vtexplain/vtexplain_vttablet.go @@ -22,6 +22,7 @@ import ( "reflect" "strings" "sync" + "time" "vitess.io/vitess/go/stats" "vitess.io/vitess/go/vt/sidecardb" @@ -113,8 +114,7 @@ func (vte *VTExplain) newTablet(ctx context.Context, env *vtenv.Environment, opt config := tabletenv.NewCurrentConfig() config.TrackSchemaVersions = false if opts.ExecutionMode == ModeTwoPC { - config.TwoPCAbandonAge = 1.0 - config.TwoPCEnable = true + config.TwoPCAbandonAge = 1 * time.Second } config.EnableOnlineDDL = false config.EnableTableGC = false diff --git a/go/vt/vtgate/buffer/buffer.go b/go/vt/vtgate/buffer/buffer.go index dec83e2c78c..eb937a6361c 100644 --- a/go/vt/vtgate/buffer/buffer.go +++ b/go/vt/vtgate/buffer/buffer.go @@ -208,6 +208,7 @@ func (b *Buffer) WaitForFailoverEnd(ctx context.Context, keyspace, shard string, } func (b *Buffer) HandleKeyspaceEvent(ksevent *discovery.KeyspaceEvent) { + log.Infof("Keyspace Event received for keyspace %v", ksevent.Keyspace) for _, shard := range ksevent.Shards { sb := b.getOrCreateBuffer(shard.Target.Keyspace, shard.Target.Shard) if sb != nil { diff --git a/go/vt/vtgate/buffer/shard_buffer.go b/go/vt/vtgate/buffer/shard_buffer.go index e1f02bb7f0e..66c6ee702e6 100644 --- a/go/vt/vtgate/buffer/shard_buffer.go +++ b/go/vt/vtgate/buffer/shard_buffer.go @@ -286,7 +286,7 @@ func (sb *shardBuffer) startBufferingLocked(ctx context.Context, kev *discovery. msg = "Dry-run: Would have started buffering" } starts.Add(sb.statsKey, 1) - log.Infof("%v for shard: %s (window: %v, size: %v, max failover duration: %v) (A failover was detected by this seen error: %v.)", + log.V(2).Infof("%v for shard: %s (window: %v, size: %v, max failover duration: %v) (A failover was detected by this seen error: %v.)", msg, topoproto.KeyspaceShardString(sb.keyspace, sb.shard), sb.buf.config.Window, @@ -488,7 +488,7 @@ func (sb *shardBuffer) recordKeyspaceEvent(alias *topodatapb.TabletAlias, stillS sb.mu.Lock() defer sb.mu.Unlock() - log.Infof("disruption in shard %s/%s resolved (serving: %v), movetable state %#v", + log.V(2).Infof("disruption in shard %s/%s resolved (serving: %v), movetable state %#v", sb.keyspace, sb.shard, stillServing, keyspaceEvent.MoveTablesState) if !topoproto.TabletAliasEqual(alias, sb.currentPrimary) { @@ -562,7 +562,7 @@ func (sb *shardBuffer) stopBufferingLocked(reason stopReason, details string) { if sb.mode == bufferModeDryRun { msg = "Dry-run: Would have stopped buffering" } - log.Infof("%v for shard: %s after: %.1f seconds due to: %v. Draining %d buffered requests now.", + log.V(2).Infof("%v for shard: %s after: %.1f seconds due to: %v. Draining %d buffered requests now.", msg, topoproto.KeyspaceShardString(sb.keyspace, sb.shard), d.Seconds(), details, len(q)) var clientEntryError error @@ -622,7 +622,7 @@ func (sb *shardBuffer) drain(q []*entry, err error) { wg.Wait() d := sb.timeNow().Sub(start) - log.Infof("Draining finished for shard: %s Took: %v for: %d requests.", topoproto.KeyspaceShardString(sb.keyspace, sb.shard), d, len(q)) + log.V(2).Infof("Draining finished for shard: %s Took: %v for: %d requests.", topoproto.KeyspaceShardString(sb.keyspace, sb.shard), d, len(q)) requestsDrained.Add(sb.statsKey, int64(len(q))) // Draining is done. Change state from "draining" to "idle". diff --git a/go/vt/vtgate/engine/plan_description.go b/go/vt/vtgate/engine/plan_description.go index dfcad4e5e6b..e8e763c1ee1 100644 --- a/go/vt/vtgate/engine/plan_description.go +++ b/go/vt/vtgate/engine/plan_description.go @@ -126,6 +126,133 @@ func (pd PrimitiveDescription) MarshalJSON() ([]byte, error) { return buf.Bytes(), nil } +// PrimitiveDescriptionFromString creates primitive description out of a data string. +func PrimitiveDescriptionFromString(data string) (pd PrimitiveDescription, err error) { + resultMap := make(map[string]any) + err = json.Unmarshal([]byte(data), &resultMap) + if err != nil { + return PrimitiveDescription{}, err + } + return PrimitiveDescriptionFromMap(resultMap) +} + +// PrimitiveDescriptionFromMap populates the fields of a PrimitiveDescription from a map representation. +func PrimitiveDescriptionFromMap(data map[string]any) (pd PrimitiveDescription, err error) { + if opType, isPresent := data["OperatorType"]; isPresent { + pd.OperatorType = opType.(string) + } + if variant, isPresent := data["Variant"]; isPresent { + pd.Variant = variant.(string) + } + if ksMap, isPresent := data["Keyspace"]; isPresent { + ksMap := ksMap.(map[string]any) + pd.Keyspace = &vindexes.Keyspace{ + Name: ksMap["Name"].(string), + Sharded: ksMap["Sharded"].(bool), + } + } + if ttt, isPresent := data["TargetTabletType"]; isPresent { + pd.TargetTabletType = topodatapb.TabletType(ttt.(int)) + } + if other, isPresent := data["Other"]; isPresent { + pd.Other = other.(map[string]any) + } + if inpName, isPresent := data["InputName"]; isPresent { + pd.InputName = inpName.(string) + } + if avgRows, isPresent := data["AvgNumberOfRows"]; isPresent { + pd.RowsReceived = RowsReceived{ + int(avgRows.(float64)), + } + } + if sq, isPresent := data["ShardsQueried"]; isPresent { + sq := int(sq.(float64)) + pd.ShardsQueried = (*ShardsQueried)(&sq) + } + if inputs, isPresent := data["Inputs"]; isPresent { + inputs := inputs.([]any) + for _, input := range inputs { + inputMap := input.(map[string]any) + inp, err := PrimitiveDescriptionFromMap(inputMap) + if err != nil { + return PrimitiveDescription{}, err + } + pd.Inputs = append(pd.Inputs, inp) + } + } + return pd, nil +} + +// WalkPrimitiveDescription walks the primitive description. +func WalkPrimitiveDescription(pd PrimitiveDescription, f func(PrimitiveDescription)) { + f(pd) + for _, child := range pd.Inputs { + WalkPrimitiveDescription(child, f) + } +} + +func (pd PrimitiveDescription) Equals(other PrimitiveDescription) string { + if pd.Variant != other.Variant { + return fmt.Sprintf("Variant: %v != %v", pd.Variant, other.Variant) + } + + if pd.OperatorType != other.OperatorType { + return fmt.Sprintf("OperatorType: %v != %v", pd.OperatorType, other.OperatorType) + } + + // TODO (harshit): enable this to compare keyspace as well + // switch { + // case pd.Keyspace == nil && other.Keyspace == nil: + // // do nothing + // case pd.Keyspace != nil && other.Keyspace != nil: + // if pd.Keyspace.Name != other.Keyspace.Name { + // return fmt.Sprintf("Keyspace.Name: %v != %v", pd.Keyspace.Name, other.Keyspace.Name) + // } + // default: + // return "Keyspace is nil in one of the descriptions" + // } + + switch { + case pd.TargetDestination == nil && other.TargetDestination == nil: + // do nothing + case pd.TargetDestination != nil && other.TargetDestination != nil: + if pd.TargetDestination.String() != other.TargetDestination.String() { + return fmt.Sprintf("TargetDestination: %v != %v", pd.TargetDestination, other.TargetDestination) + } + default: + return "TargetDestination is nil in one of the descriptions" + } + + if pd.TargetTabletType != other.TargetTabletType { + return fmt.Sprintf("TargetTabletType: %v != %v", pd.TargetTabletType, other.TargetTabletType) + } + + switch { + case pd.Other == nil && other.Other == nil: + // do nothing + case pd.Other != nil && other.Other != nil: + if len(pd.Other) != len(other.Other) { + return fmt.Sprintf("Other length did not match: %v != %v", pd.Other, other.Other) + } + for ky, val := range pd.Other { + if other.Other[ky] != val { + return fmt.Sprintf("Other[%v]: %v != %v", ky, val, other.Other[ky]) + } + } + default: + return "Other is nil in one of the descriptions" + } + if len(pd.Inputs) != len(other.Inputs) { + return fmt.Sprintf("Inputs length did not match: %v != %v", len(pd.Inputs), len(other.Inputs)) + } + for idx, input := range pd.Inputs { + if diff := input.Equals(other.Inputs[idx]); diff != "" { + return diff + } + } + return "" +} + func average(nums []int) float64 { total := 0 for _, num := range nums { diff --git a/go/vt/vtgate/executor.go b/go/vt/vtgate/executor.go index e84ab7fbb21..0bb47361f55 100644 --- a/go/vt/vtgate/executor.go +++ b/go/vt/vtgate/executor.go @@ -187,7 +187,6 @@ func NewExecutor( // setting the vcursor config. e.initVConfig(warnOnShardedOnly, pv) - vschemaacl.Init() // we subscribe to update from the VSchemaManager e.vm = &VSchemaManager{ subscriber: e.SaveVSchema, diff --git a/go/vt/vtgate/executor_set_test.go b/go/vt/vtgate/executor_set_test.go index 62101639a11..f8ed0b558c3 100644 --- a/go/vt/vtgate/executor_set_test.go +++ b/go/vt/vtgate/executor_set_test.go @@ -401,9 +401,9 @@ func TestExecutorSetMetadata(t *testing.T) { }) t.Run("Session 2", func(t *testing.T) { - vschemaacl.AuthorizedDDLUsers = "%" + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("%")) defer func() { - vschemaacl.AuthorizedDDLUsers = "" + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("")) }() executor, _, _, _, ctx := createExecutorEnv(t) @@ -411,11 +411,11 @@ func TestExecutorSetMetadata(t *testing.T) { set := "set @@vitess_metadata.app_keyspace_v1= '1'" _, err := executor.Execute(ctx, nil, "TestExecute", session, set, nil) - assert.NoError(t, err, "%s error: %v", set, err) + require.NoError(t, err, "%s error: %v", set, err) show := `show vitess_metadata variables like 'app\\_keyspace\\_v_'` result, err := executor.Execute(ctx, nil, "TestExecute", session, show, nil) - assert.NoError(t, err) + require.NoError(t, err) want := "1" got := result.Rows[0][1].ToString() @@ -424,11 +424,11 @@ func TestExecutorSetMetadata(t *testing.T) { // Update metadata set = "set @@vitess_metadata.app_keyspace_v2='2'" _, err = executor.Execute(ctx, nil, "TestExecute", session, set, nil) - assert.NoError(t, err, "%s error: %v", set, err) + require.NoError(t, err, "%s error: %v", set, err) show = `show vitess_metadata variables like 'app\\_keyspace\\_v%'` gotqr, err := executor.Execute(ctx, nil, "TestExecute", session, show, nil) - assert.NoError(t, err) + require.NoError(t, err) wantqr := &sqltypes.Result{ Fields: buildVarCharFields("Key", "Value"), diff --git a/go/vt/vtgate/executor_test.go b/go/vt/vtgate/executor_test.go index 2b6d4710bce..d3ab28d6600 100644 --- a/go/vt/vtgate/executor_test.go +++ b/go/vt/vtgate/executor_test.go @@ -335,9 +335,9 @@ func TestExecutorTransactionsAutoCommitStreaming(t *testing.T) { } func TestExecutorDeleteMetadata(t *testing.T) { - vschemaacl.AuthorizedDDLUsers = "%" + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("%")) defer func() { - vschemaacl.AuthorizedDDLUsers = "" + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("")) }() executor, _, _, _, ctx := createExecutorEnv(t) @@ -1318,9 +1318,9 @@ func TestExecutorDDLFk(t *testing.T) { } func TestExecutorAlterVSchemaKeyspace(t *testing.T) { - vschemaacl.AuthorizedDDLUsers = "%" + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("%")) defer func() { - vschemaacl.AuthorizedDDLUsers = "" + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("")) }() executor, _, _, _, ctx := createExecutorEnv(t) @@ -1347,9 +1347,9 @@ func TestExecutorAlterVSchemaKeyspace(t *testing.T) { } func TestExecutorCreateVindexDDL(t *testing.T) { - vschemaacl.AuthorizedDDLUsers = "%" + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("%")) defer func() { - vschemaacl.AuthorizedDDLUsers = "" + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("")) }() executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) ks := "TestExecutor" @@ -1417,9 +1417,9 @@ func TestExecutorCreateVindexDDL(t *testing.T) { } func TestExecutorAddDropVschemaTableDDL(t *testing.T) { - vschemaacl.AuthorizedDDLUsers = "%" + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("%")) defer func() { - vschemaacl.AuthorizedDDLUsers = "" + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("")) }() executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) ks := KsTestUnsharded @@ -1486,8 +1486,7 @@ func TestExecutorVindexDDLACL(t *testing.T) { require.EqualError(t, err, `User 'blueUser' is not authorized to perform vschema operations`) // test when all users are enabled - vschemaacl.AuthorizedDDLUsers = "%" - vschemaacl.Init() + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("%")) _, err = executor.Execute(ctxRedUser, nil, "TestExecute", session, stmt, nil) if err != nil { t.Errorf("unexpected error '%v'", err) @@ -1499,8 +1498,7 @@ func TestExecutorVindexDDLACL(t *testing.T) { } // test when only one user is enabled - vschemaacl.AuthorizedDDLUsers = "orangeUser, blueUser, greenUser" - vschemaacl.Init() + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("orangeUser, blueUser, greenUser")) _, err = executor.Execute(ctxRedUser, nil, "TestExecute", session, stmt, nil) require.EqualError(t, err, `User 'redUser' is not authorized to perform vschema operations`) @@ -1511,7 +1509,7 @@ func TestExecutorVindexDDLACL(t *testing.T) { } // restore the disallowed state - vschemaacl.AuthorizedDDLUsers = "" + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("")) } func TestExecutorUnrecognized(t *testing.T) { diff --git a/go/vt/vtgate/executor_vschema_ddl_test.go b/go/vt/vtgate/executor_vschema_ddl_test.go index 825b65ab8f3..1acc1ba2362 100644 --- a/go/vt/vtgate/executor_vschema_ddl_test.go +++ b/go/vt/vtgate/executor_vschema_ddl_test.go @@ -135,9 +135,9 @@ func waitForColVindexes(t *testing.T, ks, table string, names []string, executor } func TestPlanExecutorAlterVSchemaKeyspace(t *testing.T) { - vschemaacl.AuthorizedDDLUsers = "%" + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("%")) defer func() { - vschemaacl.AuthorizedDDLUsers = "" + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("")) }() executor, _, _, _, ctx := createExecutorEnv(t) session := econtext.NewSafeSession(&vtgatepb.Session{TargetString: "@primary", Autocommit: true}) @@ -163,9 +163,9 @@ func TestPlanExecutorAlterVSchemaKeyspace(t *testing.T) { } func TestPlanExecutorCreateVindexDDL(t *testing.T) { - vschemaacl.AuthorizedDDLUsers = "%" + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("%")) defer func() { - vschemaacl.AuthorizedDDLUsers = "" + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("")) }() executor, _, _, _, ctx := createExecutorEnv(t) ks := "TestExecutor" @@ -205,9 +205,9 @@ func TestPlanExecutorCreateVindexDDL(t *testing.T) { } func TestPlanExecutorDropVindexDDL(t *testing.T) { - vschemaacl.AuthorizedDDLUsers = "%" + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("%")) defer func() { - vschemaacl.AuthorizedDDLUsers = "" + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("")) }() executor, _, _, _, ctx := createExecutorEnv(t) ks := "TestExecutor" @@ -274,9 +274,9 @@ func TestPlanExecutorDropVindexDDL(t *testing.T) { } func TestPlanExecutorAddDropVschemaTableDDL(t *testing.T) { - vschemaacl.AuthorizedDDLUsers = "%" + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("%")) defer func() { - vschemaacl.AuthorizedDDLUsers = "" + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("")) }() executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) ks := KsTestUnsharded @@ -331,9 +331,9 @@ func TestPlanExecutorAddDropVschemaTableDDL(t *testing.T) { } func TestExecutorAddSequenceDDL(t *testing.T) { - vschemaacl.AuthorizedDDLUsers = "%" + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("%")) defer func() { - vschemaacl.AuthorizedDDLUsers = "" + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("")) }() executor, _, _, _, ctx := createExecutorEnv(t) ks := KsTestUnsharded @@ -391,9 +391,9 @@ func TestExecutorAddSequenceDDL(t *testing.T) { } func TestExecutorDropSequenceDDL(t *testing.T) { - vschemaacl.AuthorizedDDLUsers = "%" + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("%")) defer func() { - vschemaacl.AuthorizedDDLUsers = "" + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("")) }() executor, _, _, _, ctx := createExecutorEnv(t) ks := KsTestUnsharded @@ -442,9 +442,9 @@ func TestExecutorDropSequenceDDL(t *testing.T) { } func TestExecutorDropAutoIncDDL(t *testing.T) { - vschemaacl.AuthorizedDDLUsers = "%" + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("%")) defer func() { - vschemaacl.AuthorizedDDLUsers = "" + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("")) }() executor, _, _, _, ctx := createExecutorEnv(t) ks := KsTestUnsharded @@ -484,9 +484,9 @@ func TestExecutorDropAutoIncDDL(t *testing.T) { } func TestExecutorAddDropVindexDDL(t *testing.T) { - vschemaacl.AuthorizedDDLUsers = "%" + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("%")) defer func() { - vschemaacl.AuthorizedDDLUsers = "" + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("")) }() executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) ks := "TestExecutor" @@ -747,8 +747,7 @@ func TestPlanExecutorVindexDDLACL(t *testing.T) { require.EqualError(t, err, `User 'blueUser' is not authorized to perform vschema operations`) // test when all users are enabled - vschemaacl.AuthorizedDDLUsers = "%" - vschemaacl.Init() + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("%")) _, err = executor.Execute(ctxRedUser, nil, "TestExecute", session, stmt, nil) if err != nil { t.Errorf("unexpected error '%v'", err) @@ -760,8 +759,7 @@ func TestPlanExecutorVindexDDLACL(t *testing.T) { } // test when only one user is enabled - vschemaacl.AuthorizedDDLUsers = "orangeUser, blueUser, greenUser" - vschemaacl.Init() + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("orangeUser, blueUser, greenUser")) _, err = executor.Execute(ctxRedUser, nil, "TestExecute", session, stmt, nil) require.EqualError(t, err, `User 'redUser' is not authorized to perform vschema operations`) @@ -772,5 +770,5 @@ func TestPlanExecutorVindexDDLACL(t *testing.T) { } // restore the disallowed state - vschemaacl.AuthorizedDDLUsers = "" + vschemaacl.AuthorizedDDLUsers.Set(vschemaacl.NewAuthorizedDDLUsers("")) } diff --git a/go/vt/vtgate/executorcontext/vcursor_impl.go b/go/vt/vtgate/executorcontext/vcursor_impl.go index d6aac5cf5b0..c1f341b38cf 100644 --- a/go/vt/vtgate/executorcontext/vcursor_impl.go +++ b/go/vt/vtgate/executorcontext/vcursor_impl.go @@ -31,6 +31,7 @@ import ( "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/config" "vitess.io/vitess/go/mysql/sqlerror" + "vitess.io/vitess/go/protoutil" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/callerid" "vitess.io/vitess/go/vt/discovery" @@ -1439,14 +1440,12 @@ func (vc *VCursorImpl) ThrottleApp(ctx context.Context, throttledAppRule *topoda throttlerConfig.ThrottledApps = make(map[string]*topodatapb.ThrottledAppRule) } if req.ThrottledApp != nil && req.ThrottledApp.Name != "" { - // TODO(shlomi) in v22: replace the following line with the commented out block - throttlerConfig.ThrottledApps[req.ThrottledApp.Name] = req.ThrottledApp - // timeNow := time.Now() - // if protoutil.TimeFromProto(req.ThrottledApp.ExpiresAt).After(timeNow) { - // throttlerConfig.ThrottledApps[req.ThrottledApp.Name] = req.ThrottledApp - // } else { - // delete(throttlerConfig.ThrottledApps, req.ThrottledApp.Name) - // } + timeNow := time.Now() + if protoutil.TimeFromProto(req.ThrottledApp.ExpiresAt).After(timeNow) { + throttlerConfig.ThrottledApps[req.ThrottledApp.Name] = req.ThrottledApp + } else { + delete(throttlerConfig.ThrottledApps, req.ThrottledApp.Name) + } } return throttlerConfig } diff --git a/go/vt/vtgate/planbuilder/plan_test.go b/go/vt/vtgate/planbuilder/plan_test.go index ccbc9821170..7135f4dff29 100644 --- a/go/vt/vtgate/planbuilder/plan_test.go +++ b/go/vt/vtgate/planbuilder/plan_test.go @@ -649,21 +649,12 @@ func createFkDefinition(childCols []string, parentTableName string, parentCols [ } } -type ( - planTest struct { - Comment string `json:"comment,omitempty"` - Query string `json:"query,omitempty"` - Plan json.RawMessage `json:"plan,omitempty"` - Skip bool `json:"skip,omitempty"` - } -) - func (s *planTestSuite) testFile(filename string, vschema *vschemawrapper.VSchemaWrapper, render bool) { opts := jsondiff.DefaultConsoleOptions() s.T().Run(filename, func(t *testing.T) { failed := false - var expected []planTest + var expected []PlanTest for _, tcase := range readJSONTests(filename) { testName := tcase.Comment if testName == "" { @@ -672,9 +663,10 @@ func (s *planTestSuite) testFile(filename string, vschema *vschemawrapper.VSchem if tcase.Query == "" { continue } - current := planTest{ - Comment: testName, + current := PlanTest{ + Comment: tcase.Comment, Query: tcase.Query, + SkipE2E: tcase.SkipE2E, } vschema.Version = Gen4 out := getPlanOutput(tcase, vschema, render) @@ -720,8 +712,8 @@ func (s *planTestSuite) testFile(filename string, vschema *vschemawrapper.VSchem }) } -func readJSONTests(filename string) []planTest { - var output []planTest +func readJSONTests(filename string) []PlanTest { + var output []PlanTest file, err := os.Open(locateFile(filename)) if err != nil { panic(err) @@ -735,7 +727,7 @@ func readJSONTests(filename string) []planTest { return output } -func getPlanOutput(tcase planTest, vschema *vschemawrapper.VSchemaWrapper, render bool) (out string) { +func getPlanOutput(tcase PlanTest, vschema *vschemawrapper.VSchemaWrapper, render bool) (out string) { defer func() { if r := recover(); r != nil { out = fmt.Sprintf("panicked: %v\n%s", r, string(debug.Stack())) @@ -867,7 +859,7 @@ func BenchmarkBaselineVsMirrored(b *testing.B) { }) } -func benchmarkPlanner(b *testing.B, version plancontext.PlannerVersion, testCases []planTest, vschema *vschemawrapper.VSchemaWrapper) { +func benchmarkPlanner(b *testing.B, version plancontext.PlannerVersion, testCases []PlanTest, vschema *vschemawrapper.VSchemaWrapper) { b.ReportAllocs() for n := 0; n < b.N; n++ { for _, tcase := range testCases { diff --git a/go/vt/vtgate/planbuilder/test_helper.go b/go/vt/vtgate/planbuilder/test_helper.go new file mode 100644 index 00000000000..25d6b7306d1 --- /dev/null +++ b/go/vt/vtgate/planbuilder/test_helper.go @@ -0,0 +1,27 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package planbuilder + +import "encoding/json" + +type PlanTest struct { + Comment string `json:"comment,omitempty"` + Query string `json:"query,omitempty"` + Plan json.RawMessage `json:"plan,omitempty"` + Skip bool `json:"skip,omitempty"` + SkipE2E bool `json:"skip_e2e,omitempty"` +} diff --git a/go/vt/vtgate/planbuilder/testdata/aggr_cases.json b/go/vt/vtgate/planbuilder/testdata/aggr_cases.json index 8b268e367dd..49a03a8f05a 100644 --- a/go/vt/vtgate/planbuilder/testdata/aggr_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/aggr_cases.json @@ -940,19 +940,44 @@ "Table": "`user`, user_extra" }, { - "OperatorType": "Route", + "OperatorType": "VindexLookup", "Variant": "EqualUnique", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select music.`name` from music where 1 != 1", - "Query": "select music.`name` from music where music.id = :user_id", - "Table": "music", "Values": [ ":user_id" ], - "Vindex": "music_user_map" + "Vindex": "music_user_map", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1", + "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals", + "Table": "name_user_vdx", + "Values": [ + "::name" + ], + "Vindex": "user_index" + }, + { + "OperatorType": "Route", + "Variant": "ByDestination", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select music.`name` from music where 1 != 1", + "Query": "select music.`name` from music where music.id = :user_id", + "Table": "music" + } + ] } ] } @@ -2992,19 +3017,44 @@ ] }, { - "OperatorType": "Route", + "OperatorType": "VindexLookup", "Variant": "EqualUnique", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select 1 from music as m where 1 != 1", - "Query": "select 1 from music as m where m.id = :u2_val2", - "Table": "music", "Values": [ ":u2_val2" ], - "Vindex": "music_user_map" + "Vindex": "music_user_map", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1", + "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals", + "Table": "name_user_vdx", + "Values": [ + "::name" + ], + "Vindex": "user_index" + }, + { + "OperatorType": "Route", + "Variant": "ByDestination", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from music as m where 1 != 1", + "Query": "select 1 from music as m where m.id = :u2_val2", + "Table": "music" + } + ] } ] } diff --git a/go/vt/vtgate/planbuilder/testdata/filter_cases.json b/go/vt/vtgate/planbuilder/testdata/filter_cases.json index 4194a369bd6..edce4ebd0cb 100644 --- a/go/vt/vtgate/planbuilder/testdata/filter_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/filter_cases.json @@ -3404,19 +3404,44 @@ "QueryType": "SELECT", "Original": "select * from multicolvin where column_b = 1", "Instructions": { - "OperatorType": "Route", + "OperatorType": "VindexLookup", "Variant": "EqualUnique", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select * from multicolvin where 1 != 1", - "Query": "select * from multicolvin where column_b = 1", - "Table": "multicolvin", "Values": [ "1" ], - "Vindex": "colb_colc_map" + "Vindex": "colb_colc_map", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select colb, keyspace_id from colb_colc_map where 1 != 1", + "Query": "select colb, keyspace_id from colb_colc_map where colb in ::__vals", + "Table": "colb_colc_map", + "Values": [ + "::colb" + ], + "Vindex": "hash" + }, + { + "OperatorType": "Route", + "Variant": "ByDestination", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select * from multicolvin where 1 != 1", + "Query": "select * from multicolvin where column_b = 1", + "Table": "multicolvin" + } + ] }, "TablesUsed": [ "user.multicolvin" @@ -3430,19 +3455,44 @@ "QueryType": "SELECT", "Original": "select * from multicolvin where column_b = 1 and column_c = 2", "Instructions": { - "OperatorType": "Route", + "OperatorType": "VindexLookup", "Variant": "EqualUnique", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select * from multicolvin where 1 != 1", - "Query": "select * from multicolvin where column_b = 1 and column_c = 2", - "Table": "multicolvin", "Values": [ "1" ], - "Vindex": "colb_colc_map" + "Vindex": "colb_colc_map", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select colb, keyspace_id from colb_colc_map where 1 != 1", + "Query": "select colb, keyspace_id from colb_colc_map where colb in ::__vals", + "Table": "colb_colc_map", + "Values": [ + "::colb" + ], + "Vindex": "hash" + }, + { + "OperatorType": "Route", + "Variant": "ByDestination", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select * from multicolvin where 1 != 1", + "Query": "select * from multicolvin where column_b = 1 and column_c = 2", + "Table": "multicolvin" + } + ] }, "TablesUsed": [ "user.multicolvin" @@ -3456,19 +3506,44 @@ "QueryType": "SELECT", "Original": "select * from multicolvin where column_b = 1 and column_c = 2 and column_a = 3", "Instructions": { - "OperatorType": "Route", + "OperatorType": "VindexLookup", "Variant": "EqualUnique", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select * from multicolvin where 1 != 1", - "Query": "select * from multicolvin where column_b = 1 and column_c = 2 and column_a = 3", - "Table": "multicolvin", "Values": [ "1" ], - "Vindex": "colb_colc_map" + "Vindex": "colb_colc_map", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select colb, keyspace_id from colb_colc_map where 1 != 1", + "Query": "select colb, keyspace_id from colb_colc_map where colb in ::__vals", + "Table": "colb_colc_map", + "Values": [ + "::colb" + ], + "Vindex": "hash" + }, + { + "OperatorType": "Route", + "Variant": "ByDestination", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select * from multicolvin where 1 != 1", + "Query": "select * from multicolvin where column_b = 1 and column_c = 2 and column_a = 3", + "Table": "multicolvin" + } + ] }, "TablesUsed": [ "user.multicolvin" @@ -3482,19 +3557,44 @@ "QueryType": "SELECT", "Original": "select * from multicolvin where column_a = 3 and column_b = 1", "Instructions": { - "OperatorType": "Route", + "OperatorType": "VindexLookup", "Variant": "EqualUnique", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select * from multicolvin where 1 != 1", - "Query": "select * from multicolvin where column_a = 3 and column_b = 1", - "Table": "multicolvin", "Values": [ "1" ], - "Vindex": "colb_colc_map" + "Vindex": "colb_colc_map", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select colb, keyspace_id from colb_colc_map where 1 != 1", + "Query": "select colb, keyspace_id from colb_colc_map where colb in ::__vals", + "Table": "colb_colc_map", + "Values": [ + "::colb" + ], + "Vindex": "hash" + }, + { + "OperatorType": "Route", + "Variant": "ByDestination", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select * from multicolvin where 1 != 1", + "Query": "select * from multicolvin where column_a = 3 and column_b = 1", + "Table": "multicolvin" + } + ] }, "TablesUsed": [ "user.multicolvin" diff --git a/go/vt/vtgate/planbuilder/testdata/from_cases.json b/go/vt/vtgate/planbuilder/testdata/from_cases.json index 2e0fe429c1f..bec64fd7b1e 100644 --- a/go/vt/vtgate/planbuilder/testdata/from_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/from_cases.json @@ -4709,19 +4709,44 @@ ] }, { - "OperatorType": "Route", + "OperatorType": "VindexLookup", "Variant": "EqualUnique", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select 1 from music as m where 1 != 1", - "Query": "select 1 from music as m where m.user_id = 5 and m.id = 20 and m.col = :u_col /* INT16 */", - "Table": "music", "Values": [ "20" ], - "Vindex": "music_user_map" + "Vindex": "music_user_map", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1", + "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals", + "Table": "name_user_vdx", + "Values": [ + "::name" + ], + "Vindex": "user_index" + }, + { + "OperatorType": "Route", + "Variant": "ByDestination", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from music as m where 1 != 1", + "Query": "select 1 from music as m where m.user_id = 5 and m.id = 20 and m.col = :u_col /* INT16 */", + "Table": "music" + } + ] } ] }, diff --git a/go/vt/vtgate/planbuilder/testdata/memory_sort_cases.json b/go/vt/vtgate/planbuilder/testdata/memory_sort_cases.json index 060f073a366..a35949cd4c1 100644 --- a/go/vt/vtgate/planbuilder/testdata/memory_sort_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/memory_sort_cases.json @@ -318,19 +318,44 @@ "Vindex": "user_index" }, { - "OperatorType": "Route", + "OperatorType": "VindexLookup", "Variant": "EqualUnique", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select music.col3 as c, weight_string(music.col3) from music where 1 != 1", - "Query": "select music.col3 as c, weight_string(music.col3) from music where music.id = :user_id", - "Table": "music", "Values": [ ":user_id" ], - "Vindex": "music_user_map" + "Vindex": "music_user_map", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1", + "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals", + "Table": "name_user_vdx", + "Values": [ + "::name" + ], + "Vindex": "user_index" + }, + { + "OperatorType": "Route", + "Variant": "ByDestination", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select music.col3 as c, weight_string(music.col3) from music where 1 != 1", + "Query": "select music.col3 as c, weight_string(music.col3) from music where music.id = :user_id", + "Table": "music" + } + ] } ] } @@ -379,19 +404,44 @@ "Vindex": "user_index" }, { - "OperatorType": "Route", + "OperatorType": "VindexLookup", "Variant": "EqualUnique", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select music.col3, weight_string(music.col3) from music where 1 != 1", - "Query": "select music.col3, weight_string(music.col3) from music where music.id = :user_id", - "Table": "music", "Values": [ ":user_id" ], - "Vindex": "music_user_map" + "Vindex": "music_user_map", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1", + "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals", + "Table": "name_user_vdx", + "Values": [ + "::name" + ], + "Vindex": "user_index" + }, + { + "OperatorType": "Route", + "Variant": "ByDestination", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select music.col3, weight_string(music.col3) from music where 1 != 1", + "Query": "select music.col3, weight_string(music.col3) from music where music.id = :user_id", + "Table": "music" + } + ] } ] } diff --git a/go/vt/vtgate/planbuilder/testdata/postprocess_cases.json b/go/vt/vtgate/planbuilder/testdata/postprocess_cases.json index 36f1472007d..6a8e94c0241 100644 --- a/go/vt/vtgate/planbuilder/testdata/postprocess_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/postprocess_cases.json @@ -544,19 +544,44 @@ "Vindex": "user_index" }, { - "OperatorType": "Route", + "OperatorType": "VindexLookup", "Variant": "EqualUnique", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select music.col3 from music where 1 != 1", - "Query": "select music.col3 from music where music.id = :user_id", - "Table": "music", "Values": [ ":user_id" ], - "Vindex": "music_user_map" + "Vindex": "music_user_map", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1", + "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals", + "Table": "name_user_vdx", + "Values": [ + "::name" + ], + "Vindex": "user_index" + }, + { + "OperatorType": "Route", + "Variant": "ByDestination", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select music.col3 from music where 1 != 1", + "Query": "select music.col3 from music where music.id = :user_id", + "Table": "music" + } + ] } ] }, @@ -597,19 +622,44 @@ "Vindex": "user_index" }, { - "OperatorType": "Route", + "OperatorType": "VindexLookup", "Variant": "EqualUnique", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select music.col3 from music where 1 != 1", - "Query": "select music.col3 from music where music.id = :user_id", - "Table": "music", "Values": [ ":user_id" ], - "Vindex": "music_user_map" + "Vindex": "music_user_map", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1", + "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals", + "Table": "name_user_vdx", + "Values": [ + "::name" + ], + "Vindex": "user_index" + }, + { + "OperatorType": "Route", + "Variant": "ByDestination", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select music.col3 from music where 1 != 1", + "Query": "select music.col3 from music where music.id = :user_id", + "Table": "music" + } + ] } ] }, @@ -650,19 +700,44 @@ "Vindex": "user_index" }, { - "OperatorType": "Route", + "OperatorType": "VindexLookup", "Variant": "EqualUnique", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select music.col3 from music where 1 != 1", - "Query": "select music.col3 from music where music.id = :user_id", - "Table": "music", "Values": [ ":user_id" ], - "Vindex": "music_user_map" + "Vindex": "music_user_map", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1", + "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals", + "Table": "name_user_vdx", + "Values": [ + "::name" + ], + "Vindex": "user_index" + }, + { + "OperatorType": "Route", + "Variant": "ByDestination", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select music.col3 from music where 1 != 1", + "Query": "select music.col3 from music where music.id = :user_id", + "Table": "music" + } + ] } ] }, @@ -770,19 +845,44 @@ "Vindex": "user_index" }, { - "OperatorType": "Route", + "OperatorType": "VindexLookup", "Variant": "EqualUnique", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select music.col3 from music where 1 != 1", - "Query": "select music.col3 from music where music.id = :user_id", - "Table": "music", "Values": [ ":user_id" ], - "Vindex": "music_user_map" + "Vindex": "music_user_map", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1", + "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals", + "Table": "name_user_vdx", + "Values": [ + "::name" + ], + "Vindex": "user_index" + }, + { + "OperatorType": "Route", + "Variant": "ByDestination", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select music.col3 from music where 1 != 1", + "Query": "select music.col3 from music where music.id = :user_id", + "Table": "music" + } + ] } ] }, diff --git a/go/vt/vtgate/planbuilder/testdata/schemas/main.sql b/go/vt/vtgate/planbuilder/testdata/schemas/main.sql new file mode 100644 index 00000000000..8c15b99218c --- /dev/null +++ b/go/vt/vtgate/planbuilder/testdata/schemas/main.sql @@ -0,0 +1,12 @@ +CREATE TABLE `unsharded` ( + `id` INT NOT NULL PRIMARY KEY, + `col1` VARCHAR(255) DEFAULT NULL, + `col2` VARCHAR(255) DEFAULT NULL, + `name` VARCHAR(255) DEFAULT NULL +); + +CREATE TABLE `unsharded_auto` ( + `id` INT NOT NULL PRIMARY KEY, + `col1` VARCHAR(255) DEFAULT NULL, + `col2` VARCHAR(255) DEFAULT NULL +); \ No newline at end of file diff --git a/go/vt/vtgate/planbuilder/testdata/schemas/user.sql b/go/vt/vtgate/planbuilder/testdata/schemas/user.sql new file mode 100644 index 00000000000..55f4078557a --- /dev/null +++ b/go/vt/vtgate/planbuilder/testdata/schemas/user.sql @@ -0,0 +1,100 @@ +CREATE TABLE user +( + id INT PRIMARY KEY, + col BIGINT, + predef1 VARCHAR(255), + predef2 VARCHAR(255), + textcol1 VARCHAR(255), + intcol BIGINT, + textcol2 VARCHAR(255) +); + +CREATE TABLE user_metadata +( + user_id INT, + email VARCHAR(255), + address VARCHAR(255), + md5 VARCHAR(255), + non_planable VARCHAR(255), + PRIMARY KEY (user_id) +); + +CREATE TABLE music +( + user_id INT, + id INT, + PRIMARY KEY (user_id) +); + +CREATE TABLE samecolvin +( + col VARCHAR(255), + PRIMARY KEY (col) +); + +CREATE TABLE multicolvin +( + kid INT, + column_a VARCHAR(255), + column_b VARCHAR(255), + column_c VARCHAR(255), + PRIMARY KEY (kid) +); + +CREATE TABLE customer +( + id INT, + email VARCHAR(255), + phone VARCHAR(255), + PRIMARY KEY (id) +); + +CREATE TABLE multicol_tbl +( + cola VARCHAR(255), + colb VARCHAR(255), + colc VARCHAR(255), + name VARCHAR(255), + PRIMARY KEY (cola, colb) +); + +CREATE TABLE mixed_tbl +( + shard_key VARCHAR(255), + lkp_key VARCHAR(255), + PRIMARY KEY (shard_key) +); + +CREATE TABLE pin_test +( + id INT PRIMARY KEY +); + +CREATE TABLE cfc_vindex_col +( + c1 VARCHAR(255), + c2 VARCHAR(255), + PRIMARY KEY (c1) +); + +CREATE TABLE unq_lkp_idx +( + unq_key INT PRIMARY KEY, + keyspace_id VARCHAR(255) +); + +CREATE TABLE t1 +( + c1 INT, + c2 INT, + c3 INT, + PRIMARY KEY (c1) +); + +CREATE TABLE authoritative +( + user_id bigint NOT NULL, + col1 VARCHAR(255), + col2 bigint, + PRIMARY KEY (user_id) +) ENGINE=InnoDB; \ No newline at end of file diff --git a/go/vt/vtgate/planbuilder/testdata/select_cases.json b/go/vt/vtgate/planbuilder/testdata/select_cases.json index ab69df2cc47..eac13216380 100644 --- a/go/vt/vtgate/planbuilder/testdata/select_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/select_cases.json @@ -92,7 +92,8 @@ "user.user", "user.user_metadata" ] - } + }, + "skip_e2e": true }, { "comment": "select with timeout directive sets QueryTimeout in the route", @@ -197,7 +198,8 @@ "TablesUsed": [ "main.unsharded" ] - } + }, + "skip_e2e": true }, { "comment": "select with partial scatter directive", @@ -402,7 +404,8 @@ { "comment": "test table lookup failure for authoritative code path", "query": "select a.* from authoritative", - "plan": "Unknown table 'a'" + "plan": "Unknown table 'a'", + "skip_e2e": true }, { "comment": "select * from qualified authoritative table", @@ -470,7 +473,8 @@ "user.authoritative", "user.user" ] - } + }, + "skip_e2e": true }, { "comment": "auto-resolve anonymous columns for simple route", @@ -493,7 +497,8 @@ "user.user", "user.user_extra" ] - } + }, + "skip_e2e": true }, { "comment": "json_arrayagg in single sharded query", @@ -519,7 +524,8 @@ "TablesUsed": [ "user.user" ] - } + }, + "skip_e2e": true }, { "comment": "json_objectagg in single sharded query", @@ -545,17 +551,20 @@ "TablesUsed": [ "user.user" ] - } + }, + "skip_e2e": true }, { "comment": "unsupported json aggregation expressions in scatter query", "query": "select count(1) from user where cola = 'abc' group by n_id having json_arrayagg(a_id) = '[]'", - "plan": "VT12001: unsupported: in scatter query: aggregation function 'json_arrayagg(a_id)'" + "plan": "VT12001: unsupported: in scatter query: aggregation function 'json_arrayagg(a_id)'", + "skip_e2e": true }, { "comment": "Cannot auto-resolve for cross-shard joins", "query": "select col from user join user_extra", - "plan": "Column 'col' in field list is ambiguous" + "plan": "Column 'col' in field list is ambiguous", + "skip_e2e": true }, { "comment": "Auto-resolve should work if unique vindex columns are referenced", @@ -597,7 +606,8 @@ "user.user", "user.user_extra" ] - } + }, + "skip_e2e": true }, { "comment": "database calls should be substituted", @@ -619,7 +629,8 @@ "TablesUsed": [ "main.dual" ] - } + }, + "skip_e2e": true }, { "comment": "last_insert_id for unsharded route", @@ -641,7 +652,8 @@ "TablesUsed": [ "main.unsharded" ] - } + }, + "skip_e2e": true }, { "comment": "select from dual on unqualified keyspace", @@ -694,7 +706,8 @@ { "comment": "prefixing dual with a keyspace should not work", "query": "select 1 from user.dual", - "plan": "table dual not found" + "plan": "table dual not found", + "skip_e2e": true }, { "comment": "RHS route referenced", @@ -736,7 +749,8 @@ "user.user", "user.user_extra" ] - } + }, + "skip_e2e": true }, { "comment": "Both routes referenced", @@ -778,7 +792,8 @@ "user.user", "user.user_extra" ] - } + }, + "skip_e2e": true }, { "comment": "Expression with single-route reference", @@ -820,7 +835,8 @@ "user.user", "user.user_extra" ] - } + }, + "skip_e2e": true }, { "comment": "subquery with an aggregation in order by that can be merged into a single route", @@ -847,7 +863,8 @@ "user.user", "user.user_extra" ] - } + }, + "skip_e2e": true }, { "comment": "Jumbled references", @@ -889,7 +906,8 @@ "user.user", "user.user_extra" ] - } + }, + "skip_e2e": true }, { "comment": "Comments", @@ -931,7 +949,8 @@ "user.user", "user.user_extra" ] - } + }, + "skip_e2e": true }, { "comment": "for update", @@ -973,7 +992,8 @@ "user.user", "user.user_extra" ] - } + }, + "skip_e2e": true }, { "comment": "Field query should work for joins select bind vars", @@ -1018,7 +1038,8 @@ "main.unsharded", "user.user" ] - } + }, + "skip_e2e": true }, { "comment": "Case preservation", @@ -1060,12 +1081,14 @@ "user.user", "user.user_extra" ] - } + }, + "skip_e2e": true }, { "comment": "syntax error", "query": "the quick brown fox", - "plan": "syntax error at position 4 near 'the'" + "plan": "syntax error at position 4 near 'the'", + "skip_e2e": true }, { "comment": "Hex number is not treated as a simple value", @@ -1113,7 +1136,8 @@ "TablesUsed": [ "user.user" ] - } + }, + "skip_e2e": true }, { "comment": "Selection but make the planner explicitly use a vindex", @@ -1164,12 +1188,14 @@ "TablesUsed": [ "user.user" ] - } + }, + "skip_e2e": true }, { "comment": "Vindex hint on a non-existing vindex", "query": "select * from user use vindex (does_not_exist) where id = 1", - "plan": "VT09021: Vindex 'does_not_exist' does not exist in table 'user.user'" + "plan": "VT09021: Vindex 'does_not_exist' does not exist in table 'user.user'", + "skip_e2e": true }, { "comment": "sharded limit offset", @@ -1231,7 +1257,8 @@ "TablesUsed": [ "user.music" ] - } + }, + "skip_e2e": true }, { "comment": "Sharding Key Condition in Parenthesis", @@ -1257,7 +1284,8 @@ "TablesUsed": [ "user.user" ] - } + }, + "skip_e2e": true }, { "comment": "Multiple parenthesized expressions", @@ -1283,7 +1311,8 @@ "TablesUsed": [ "user.user" ] - } + }, + "skip_e2e": true }, { "comment": "Multiple parenthesized expressions", @@ -1309,7 +1338,8 @@ "TablesUsed": [ "user.user" ] - } + }, + "skip_e2e": true }, { "comment": "Column Aliasing with Table.Column", @@ -1387,7 +1417,8 @@ "TablesUsed": [ "user.user" ] - } + }, + "skip_e2e": true }, { "comment": "Column as boolean-ish", @@ -1413,7 +1444,8 @@ "TablesUsed": [ "user.user" ] - } + }, + "skip_e2e": true }, { "comment": "PK as fake boolean, and column as boolean-ish", @@ -1439,7 +1471,8 @@ "TablesUsed": [ "user.user" ] - } + }, + "skip_e2e": true }, { "comment": "top level subquery in select", @@ -1484,7 +1517,8 @@ "main.unsharded", "user.user" ] - } + }, + "skip_e2e": true }, { "comment": "sub-expression subquery in select", @@ -1529,7 +1563,8 @@ "main.unsharded", "user.user" ] - } + }, + "skip_e2e": true }, { "comment": "select * from derived table expands specific columns", @@ -1571,17 +1606,20 @@ "user.user", "user.user_extra" ] - } + }, + "skip_e2e": true }, { "comment": "duplicate columns not allowed in derived table", "query": "select * from (select user.id, user_extra.id from user join user_extra) as t", - "plan": "Duplicate column name 'id'" + "plan": "Duplicate column name 'id'", + "skip_e2e": true }, { "comment": "non-existent symbol in cross-shard derived table", "query": "select t.col from (select user.id from user join user_extra) as t", - "plan": "column 't.col' not found" + "plan": "column 't.col' not found", + "skip_e2e": true }, { "comment": "union with the same target shard", @@ -1608,7 +1646,8 @@ "user.music", "user.user" ] - } + }, + "skip_e2e": true }, { "comment": "union with the same target shard last_insert_id", @@ -1635,7 +1674,8 @@ "user.music", "user.user" ] - } + }, + "skip_e2e": true }, { "comment": "unsharded union in derived table", @@ -1793,7 +1833,8 @@ "TablesUsed": [ "main.unsharded" ] - } + }, + "skip_e2e": true }, { "comment": "routing table on music", @@ -1815,7 +1856,8 @@ "TablesUsed": [ "user.music" ] - } + }, + "skip_e2e": true }, { "comment": "testing SingleRow Projection", @@ -1962,7 +2004,8 @@ "main.unsharded_a", "user.user" ] - } + }, + "skip_e2e": true }, { "comment": "Complex expression in a subquery used in NOT IN clause of an aggregate query", @@ -2015,7 +2058,8 @@ "main.unsharded_a", "user.user" ] - } + }, + "skip_e2e": true }, { "comment": "testing SingleRow Projection with arithmetics", @@ -2218,12 +2262,14 @@ { "comment": "sql_calc_found_rows in sub queries", "query": "select * from music where user_id IN (select sql_calc_found_rows * from music limit 10)", - "plan": "Incorrect usage/placement of 'SQL_CALC_FOUND_ROWS'" + "plan": "Incorrect usage/placement of 'SQL_CALC_FOUND_ROWS'", + "skip_e2e": true }, { "comment": "sql_calc_found_rows in derived table", "query": "select sql_calc_found_rows * from (select sql_calc_found_rows * from music limit 10) t limit 1", - "plan": "Incorrect usage/placement of 'SQL_CALC_FOUND_ROWS'" + "plan": "Incorrect usage/placement of 'SQL_CALC_FOUND_ROWS'", + "skip_e2e": true }, { "comment": "select from unsharded keyspace into dumpfile", @@ -2245,7 +2291,8 @@ "TablesUsed": [ "main.unsharded" ] - } + }, + "skip_e2e": true }, { "comment": "select from unsharded keyspace into outfile", @@ -2267,7 +2314,8 @@ "TablesUsed": [ "main.unsharded" ] - } + }, + "skip_e2e": true }, { "comment": "select from unsharded keyspace into outfile s3", @@ -2289,7 +2337,8 @@ "TablesUsed": [ "main.unsharded" ] - } + }, + "skip_e2e": true }, { "comment": "left join with a dual table on left - merge-able", @@ -2500,17 +2549,20 @@ { "comment": "Union after into outfile is incorrect", "query": "select id from user into outfile 'out_file_name' union all select id from music", - "plan": "syntax error at position 55 near 'union'" + "plan": "syntax error at position 55 near 'union'", + "skip_e2e": true }, { "comment": "Into outfile s3 in derived table is incorrect", "query": "select id from (select id from user into outfile s3 'inner_outfile') as t2", - "plan": "syntax error at position 41 near 'into'" + "plan": "syntax error at position 41 near 'into'", + "skip_e2e": true }, { "comment": "Into outfile s3 in derived table with union incorrect", "query": "select id from (select id from user into outfile s3 'inner_outfile' union select 1) as t2", - "plan": "syntax error at position 41 near 'into'" + "plan": "syntax error at position 41 near 'into'", + "skip_e2e": true }, { "comment": "select (select u.id from user as u where u.id = 1), a.id from user as a where a.id = 1", @@ -2579,7 +2631,8 @@ "user.user", "user.user_extra" ] - } + }, + "skip_e2e": true }, { "comment": "((((select 1))))", @@ -2624,7 +2677,8 @@ "main.dual", "user.user" ] - } + }, + "skip_e2e": true }, { "comment": "subquery in select expression of derived table", @@ -2694,7 +2748,8 @@ "user.user", "user.user_extra" ] - } + }, + "skip_e2e": true }, { "comment": "ORDER BY subquery", @@ -2764,7 +2819,8 @@ "user.user", "user.user_extra" ] - } + }, + "skip_e2e": true }, { "comment": "plan test for a natural character set string", @@ -2831,7 +2887,8 @@ "user.user", "user.user_extra" ] - } + }, + "skip_e2e": true }, { "comment": "Straight Join ensures specific ordering of joins", @@ -2876,7 +2933,8 @@ "user.user", "user.user_extra" ] - } + }, + "skip_e2e": true }, { "comment": "Dual query should be handled on the vtgate even with a LIMIT", @@ -2950,7 +3008,8 @@ "user.user", "user.user_extra" ] - } + }, + "skip_e2e": true }, { "comment": "Straight Join preserved in MySQL query", @@ -2973,7 +3032,8 @@ "user.user", "user.user_extra" ] - } + }, + "skip_e2e": true }, { "comment": "correlated subquery in exists clause", @@ -3031,7 +3091,8 @@ "user.user", "user.user_extra" ] - } + }, + "skip_e2e": true }, { "comment": "correlated subquery in exists clause with an order by", @@ -3090,7 +3151,8 @@ "user.user", "user.user_extra" ] - } + }, + "skip_e2e": true }, { "comment": "correlated subquery having dependencies on two tables", @@ -3163,7 +3225,8 @@ "user.user", "user.user_extra" ] - } + }, + "skip_e2e": true }, { "comment": "correlated subquery using a column twice", @@ -3220,7 +3283,8 @@ "user.user", "user.user_extra" ] - } + }, + "skip_e2e": true }, { "comment": "correlated subquery that is dependent on one side of a join, fully mergeable", @@ -3271,7 +3335,8 @@ "user.user", "user.user_extra" ] - } + }, + "skip_e2e": true }, { "comment": "union as a derived table", @@ -3360,7 +3425,8 @@ "user.user", "user.user_extra" ] - } + }, + "skip_e2e": true }, { "comment": "mergeable derived table with order by and limit", @@ -3382,7 +3448,8 @@ "TablesUsed": [ "main.unsharded" ] - } + }, + "skip_e2e": true }, { "comment": "mergeable derived table with group by and limit", @@ -3404,7 +3471,8 @@ "TablesUsed": [ "main.unsharded" ] - } + }, + "skip_e2e": true }, { "comment": "select user.id, trim(leading 'x' from user.name) from user", @@ -3426,7 +3494,8 @@ "TablesUsed": [ "user.user" ] - } + }, + "skip_e2e": true }, { "comment": "json utility functions", @@ -3448,7 +3517,8 @@ "TablesUsed": [ "user.user" ] - } + }, + "skip_e2e": true }, { "comment": "dual query with exists clause", @@ -3546,7 +3616,8 @@ "user.user", "user.user_extra" ] - } + }, + "skip_e2e": true }, { "comment": "yeah, it does not make sense, but it's valid", @@ -3639,7 +3710,8 @@ "TablesUsed": [ "user.user" ] - } + }, + "skip_e2e": true }, { "comment": "groupe by with non aggregated columns and table alias", @@ -3661,7 +3733,8 @@ "TablesUsed": [ "user.user" ] - } + }, + "skip_e2e": true }, { "comment": "Functions that return JSON value attributes", @@ -3866,7 +3939,8 @@ "TablesUsed": [ "user.user" ] - } + }, + "skip_e2e": true }, { "comment": "gtid functions", @@ -3934,7 +4008,8 @@ "user.user_extra", "user.user_metadata" ] - } + }, + "skip_e2e": true }, { "comment": "Join across multiple tables, with conditions on different vindexes, but mergeable through join predicates", @@ -3962,7 +4037,8 @@ "user.music_extra", "user.user" ] - } + }, + "skip_e2e": true }, { "comment": "SQL_CALC_FOUND_ROWS with vindex lookup", @@ -4073,7 +4149,8 @@ "TablesUsed": [ "user.user" ] - } + }, + "skip_e2e": true }, { "comment": "`None` route being merged with another route via join predicate on Vindex columns", @@ -4122,7 +4199,8 @@ "TablesUsed": [ "user.music" ] - } + }, + "skip_e2e": true }, { "comment": "Subquery with `IN` condition using columns with matching lookup vindexes", @@ -4200,7 +4278,8 @@ "TablesUsed": [ "user.music" ] - } + }, + "skip_e2e": true }, { "comment": "Subquery with `IN` condition using columns with matching lookup vindexes", @@ -4314,7 +4393,8 @@ "TablesUsed": [ "user.music" ] - } + }, + "skip_e2e": true }, { "comment": "Mergeable scatter subquery with `GROUP BY` on unique vindex column", @@ -4336,7 +4416,8 @@ "TablesUsed": [ "user.music" ] - } + }, + "skip_e2e": true }, { "comment": "Unmergeable scatter subquery with `GROUP BY` on-non vindex column", @@ -4375,26 +4456,52 @@ }, { "InputName": "Outer", - "OperatorType": "Route", + "OperatorType": "VindexLookup", "Variant": "IN", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where :__sq_has_values and music.id in ::__vals", - "Table": "music", "Values": [ "::__sq1" ], - "Vindex": "music_user_map" + "Vindex": "music_user_map", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1", + "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals", + "Table": "name_user_vdx", + "Values": [ + "::name" + ], + "Vindex": "user_index" + }, + { + "OperatorType": "Route", + "Variant": "ByDestination", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select music.id from music where 1 != 1", + "Query": "select music.id from music where :__sq_has_values and music.id in ::__vals", + "Table": "music" + } + ] } ] }, "TablesUsed": [ "user.music" ] - } + }, + "skip_e2e": true }, { "comment": "Unmergeable scatter subquery with LIMIT", @@ -4430,26 +4537,52 @@ }, { "InputName": "Outer", - "OperatorType": "Route", + "OperatorType": "VindexLookup", "Variant": "IN", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where :__sq_has_values and music.id in ::__vals", - "Table": "music", "Values": [ "::__sq1" ], - "Vindex": "music_user_map" + "Vindex": "music_user_map", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1", + "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals", + "Table": "name_user_vdx", + "Values": [ + "::name" + ], + "Vindex": "user_index" + }, + { + "OperatorType": "Route", + "Variant": "ByDestination", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select music.id from music where 1 != 1", + "Query": "select music.id from music where :__sq_has_values and music.id in ::__vals", + "Table": "music" + } + ] } ] }, "TablesUsed": [ "user.music" ] - } + }, + "skip_e2e": true }, { "comment": "Mergeable subquery with `MAX` aggregate and grouped by unique vindex", @@ -4483,26 +4616,52 @@ }, { "InputName": "Outer", - "OperatorType": "Route", + "OperatorType": "VindexLookup", "Variant": "IN", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where :__sq_has_values and music.id in ::__vals", - "Table": "music", "Values": [ "::__sq1" ], - "Vindex": "music_user_map" + "Vindex": "music_user_map", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1", + "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals", + "Table": "name_user_vdx", + "Values": [ + "::name" + ], + "Vindex": "user_index" + }, + { + "OperatorType": "Route", + "Variant": "ByDestination", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select music.id from music where 1 != 1", + "Query": "select music.id from music where :__sq_has_values and music.id in ::__vals", + "Table": "music" + } + ] } ] }, "TablesUsed": [ "user.music" ] - } + }, + "skip_e2e": true }, { "comment": "Unmergeable subquery with `MAX` aggregate", @@ -4543,19 +4702,44 @@ }, { "InputName": "Outer", - "OperatorType": "Route", + "OperatorType": "VindexLookup", "Variant": "IN", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where :__sq_has_values and music.id in ::__vals", - "Table": "music", "Values": [ "::__sq1" ], - "Vindex": "music_user_map" + "Vindex": "music_user_map", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1", + "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals", + "Table": "name_user_vdx", + "Values": [ + "::name" + ], + "Vindex": "user_index" + }, + { + "OperatorType": "Route", + "Variant": "ByDestination", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select music.id from music where 1 != 1", + "Query": "select music.id from music where :__sq_has_values and music.id in ::__vals", + "Table": "music" + } + ] } ] }, @@ -4596,19 +4780,44 @@ }, { "InputName": "Outer", - "OperatorType": "Route", + "OperatorType": "VindexLookup", "Variant": "IN", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where :__sq_has_values and music.id in ::__vals", - "Table": "music", "Values": [ "::__sq1" ], - "Vindex": "music_user_map" + "Vindex": "music_user_map", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1", + "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals", + "Table": "name_user_vdx", + "Values": [ + "::name" + ], + "Vindex": "user_index" + }, + { + "OperatorType": "Route", + "Variant": "ByDestination", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select music.id from music where 1 != 1", + "Query": "select music.id from music where :__sq_has_values and music.id in ::__vals", + "Table": "music" + } + ] } ] }, @@ -4649,26 +4858,52 @@ }, { "InputName": "Outer", - "OperatorType": "Route", + "OperatorType": "VindexLookup", "Variant": "IN", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where :__sq_has_values and music.id in ::__vals", - "Table": "music", "Values": [ "::__sq1" ], - "Vindex": "music_user_map" + "Vindex": "music_user_map", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1", + "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals", + "Table": "name_user_vdx", + "Values": [ + "::name" + ], + "Vindex": "user_index" + }, + { + "OperatorType": "Route", + "Variant": "ByDestination", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select music.id from music where 1 != 1", + "Query": "select music.id from music where :__sq_has_values and music.id in ::__vals", + "Table": "music" + } + ] } ] }, "TablesUsed": [ "user.music" ] - } + }, + "skip_e2e": true }, { "comment": "Mergeable subquery with multiple levels of derived statements", @@ -4760,26 +4995,52 @@ }, { "InputName": "Outer", - "OperatorType": "Route", + "OperatorType": "VindexLookup", "Variant": "IN", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where :__sq_has_values and music.id in ::__vals", - "Table": "music", "Values": [ "::__sq1" ], - "Vindex": "music_user_map" + "Vindex": "music_user_map", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1", + "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals", + "Table": "name_user_vdx", + "Values": [ + "::name" + ], + "Vindex": "user_index" + }, + { + "OperatorType": "Route", + "Variant": "ByDestination", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select music.id from music where 1 != 1", + "Query": "select music.id from music where :__sq_has_values and music.id in ::__vals", + "Table": "music" + } + ] } ] }, "TablesUsed": [ "user.music" ] - } + }, + "skip_e2e": true }, { "comment": "Unmergeable subquery with multiple levels of derived statements", @@ -4815,26 +5076,52 @@ }, { "InputName": "Outer", - "OperatorType": "Route", + "OperatorType": "VindexLookup", "Variant": "IN", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where :__sq_has_values and music.id in ::__vals", - "Table": "music", "Values": [ "::__sq1" ], - "Vindex": "music_user_map" + "Vindex": "music_user_map", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1", + "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals", + "Table": "name_user_vdx", + "Values": [ + "::name" + ], + "Vindex": "user_index" + }, + { + "OperatorType": "Route", + "Variant": "ByDestination", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select music.id from music where 1 != 1", + "Query": "select music.id from music where :__sq_has_values and music.id in ::__vals", + "Table": "music" + } + ] } ] }, "TablesUsed": [ "user.music" ] - } + }, + "skip_e2e": true }, { "comment": "`None` subquery as top level predicate - outer query changes from `Scatter` to `None` on merge", @@ -5033,7 +5320,8 @@ "user.user", "user.user_extra" ] - } + }, + "skip_e2e": true }, { "comment": "select user.a, t.b from user join (select id, count(*) b, req from user_extra group by req, id) as t on user.id = t.id", @@ -5097,7 +5385,8 @@ "user.user", "user.user_extra" ] - } + }, + "skip_e2e": true }, { "comment": "SELECT music.id FROM (SELECT MAX(id) as maxt FROM music WHERE music.user_id = 5) other JOIN music ON other.maxt = music.id", @@ -5220,7 +5509,8 @@ "main.dual", "main.unsharded_a" ] - } + }, + "skip_e2e": true }, { "comment": "subquery having join table on clause, using column reference of outer select table", @@ -5269,7 +5559,8 @@ "main.unsharded", "main.unsharded_a" ] - } + }, + "skip_e2e": true }, { "comment": "ALL modifier on unsharded table works well", @@ -5292,7 +5583,8 @@ "main.unsharded", "main.unsharded_a" ] - } + }, + "skip_e2e": true }, { "comment": "allow last_insert_id with argument", @@ -5337,7 +5629,8 @@ "user.music_extra", "user.user" ] - } + }, + "skip_e2e": true }, { "comment": "Query with non-plannable lookup vindex", @@ -5363,7 +5656,8 @@ "TablesUsed": [ "user.user_metadata" ] - } + }, + "skip_e2e": true }, { "comment": "join query with lookup and join on different vindex column", @@ -5415,7 +5709,8 @@ "user.user", "user.user_metadata" ] - } + }, + "skip_e2e": true }, { "comment": "pick email as vindex lookup", @@ -5425,7 +5720,7 @@ "Original": "select * from customer where email = 'a@mail.com'", "Instructions": { "OperatorType": "VindexLookup", - "Variant": "Equal", + "Variant": "EqualUnique", "Keyspace": { "Name": "user", "Sharded": true @@ -5510,7 +5805,8 @@ "TablesUsed": [ "user.customer" ] - } + }, + "skip_e2e": true }, { "comment": "email vindex is costly than phone vindex - but phone vindex is backfiling hence ignored", @@ -5520,7 +5816,7 @@ "Original": "select * from customer where email = 'a@mail.com' and phone = 123456", "Instructions": { "OperatorType": "VindexLookup", - "Variant": "Equal", + "Variant": "EqualUnique", "Keyspace": { "Name": "user", "Sharded": true @@ -5571,7 +5867,7 @@ "Original": "select * from customer where phone = 123456 and email = 'a@mail.com'", "Instructions": { "OperatorType": "VindexLookup", - "Variant": "Equal", + "Variant": "EqualUnique", "Keyspace": { "Name": "user", "Sharded": true @@ -5634,7 +5930,8 @@ "TablesUsed": [ "user.samecolvin" ] - } + }, + "skip_e2e": true }, { "comment": "column with qualifier is correctly used", @@ -5677,7 +5974,8 @@ "user.user", "user.user_extra" ] - } + }, + "skip_e2e": true }, { "comment": "Derived tables going to a single shard still need to expand derived table columns", @@ -5722,7 +6020,8 @@ "main.unsharded", "user.user" ] - } + }, + "skip_e2e": true }, { "comment": "column name aliases in outer join queries", @@ -5777,7 +6076,8 @@ "user.user", "user.user_extra" ] - } + }, + "skip_e2e": true }, { "comment": "Over clause works for unsharded tables", @@ -5799,7 +6099,8 @@ "TablesUsed": [ "main.unsharded_a" ] - } + }, + "skip_e2e": true }, { "comment": "join with derived table with alias and join condition - merge into route", diff --git a/go/vt/vtgate/planbuilder/testdata/union_cases.json b/go/vt/vtgate/planbuilder/testdata/union_cases.json index 7feabb0a698..2927c1c6093 100644 --- a/go/vt/vtgate/planbuilder/testdata/union_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/union_cases.json @@ -447,34 +447,84 @@ "OperatorType": "Concatenate", "Inputs": [ { - "OperatorType": "Route", + "OperatorType": "VindexLookup", "Variant": "EqualUnique", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select 1 from music where 1 != 1", - "Query": "select distinct 1 from music where id = 1", - "Table": "music", "Values": [ "1" ], - "Vindex": "music_user_map" + "Vindex": "music_user_map", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1", + "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals", + "Table": "name_user_vdx", + "Values": [ + "::name" + ], + "Vindex": "user_index" + }, + { + "OperatorType": "Route", + "Variant": "ByDestination", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from music where 1 != 1", + "Query": "select distinct 1 from music where id = 1", + "Table": "music" + } + ] }, { - "OperatorType": "Route", + "OperatorType": "VindexLookup", "Variant": "EqualUnique", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select 1 from music where 1 != 1", - "Query": "select distinct 1 from music where id = 2", - "Table": "music", "Values": [ "2" ], - "Vindex": "music_user_map" + "Vindex": "music_user_map", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1", + "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals", + "Table": "name_user_vdx", + "Values": [ + "::name" + ], + "Vindex": "user_index" + }, + { + "OperatorType": "Route", + "Variant": "ByDestination", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from music where 1 != 1", + "Query": "select distinct 1 from music where id = 2", + "Table": "music" + } + ] } ] } diff --git a/go/vt/vtgate/planbuilder/testdata/vschemas/schema.json b/go/vt/vtgate/planbuilder/testdata/vschemas/schema.json index 4fe275f2398..a5de9d3697e 100644 --- a/go/vt/vtgate/planbuilder/testdata/vschemas/schema.json +++ b/go/vt/vtgate/planbuilder/testdata/vschemas/schema.json @@ -58,34 +58,52 @@ "sharded": true, "vindexes": { "user_index": { - "type": "hash_test", + "type": "hash", "owner": "user" }, "kid_index": { - "type": "hash_test", + "type": "hash", "owner": "multicolvin" }, + "hash": { + "type": "hash" + }, "user_md5_index": { "type": "unicode_loose_md5" }, "music_user_map": { - "type": "lookup_test", - "owner": "music" + "type": "lookup_unique", + "owner": "music", + "params": { + "table": "name_user_vdx", + "from": "name", + "to": "keyspace_id" + } }, "cola_map": { - "type": "lookup_test", - "owner": "multicolvin" + "type": "lookup_unique", + "owner": "multicolvin", + "params": { + "table": "cola_map", + "from": "cola", + "to": "keyspace_id" + } }, "colb_colc_map": { - "type": "lookup_test", - "owner": "multicolvin" + "type": "lookup_unique", + "owner": "multicolvin", + "params": { + "table": "colb_colc_map", + "from": "colb,colc", + "to": "keyspace_id" + } }, "cola_kid_map": { - "type": "lookup_test", + "type": "lookup_unique", "owner": "overlap_vindex" }, "name_user_map": { - "type": "name_lkp_test", + "type": "lookup", "owner": "user", "params": { "table": "name_user_vdx", @@ -94,42 +112,56 @@ } }, "email_user_map": { - "type": "lookup_test", + "type": "lookup_unique", "owner": "user_metadata" }, "address_user_map": { - "type": "lookup_test", + "type": "lookup_unique", "owner": "user_metadata" }, "costly_map": { - "type": "costly", - "owner": "user" + "type": "lookup_cost", + "owner": "user", + "params": { + "table": "costly_map", + "from": "costly", + "to": "keyspace_id", + "cost": "100" + } }, "hash_dup": { - "type": "hash_test", + "type": "hash", "owner": "user" }, "vindex1": { - "type": "hash_test", + "type": "hash", "owner": "samecolvin" }, "vindex2": { - "type": "lookup_test", + "type": "lookup_unique", "owner": "samecolvin" }, "cfc": { "type": "cfc" }, "multicolIdx": { - "type": "multiCol_test" + "type": "multicol", + "params": { + "column_count": "2" + } }, "colc_map": { - "type": "lookup_test", + "type": "lookup_unique", "owner": "multicol_tbl" }, "name_muticoltbl_map": { - "type": "name_lkp_test", - "owner": "multicol_tbl" + "type": "lookup", + "owner": "multicol_tbl", + "params": { + "table": "name_user_vdx", + "from": "name", + "to": "keyspace_id" + } }, "non_planable_user_map": { "type": "lookup_unicodeloosemd5_hash", @@ -141,7 +173,7 @@ "owner": "user_metadata" }, "lkp_shard_map": { - "type": "name_lkp_test", + "type": "lookup_unique", "owner": "mixed_tbl", "params": { "table": "lkp_shard_vdx", @@ -153,18 +185,18 @@ "type": "xxhash" }, "unq_lkp_bf_vdx": { - "type": "unq_lkp_test", + "type": "lookup_unique", "owner": "customer", "params": { "table": "unq_lkp_idx", - "from": " ", + "from": "unq_key", "to": "keyspace_id", "cost": "100", "write_only": "true" } }, "unq_lkp_vdx": { - "type": "unq_lkp_test", + "type": "lookup_unique", "owner": "customer", "params": { "table": "unq_lkp_idx", @@ -174,11 +206,11 @@ } }, "lkp_bf_vdx": { - "type": "name_lkp_test", + "type": "lookup_unique", "owner": "customer", "params": { "table": "lkp_shard_vdx", - "from": " ", + "from": "unq_key", "to": "keyspace_id", "write_only": "true" } @@ -352,6 +384,22 @@ } ] }, + "cola_map": { + "column_vindexes": [ + { + "column": "cola", + "name": "hash" + } + ] + }, + "colb_colc_map": { + "column_vindexes": [ + { + "column": "colb", + "name": "hash" + } + ] + }, "overlap_vindex": { "column_vindexes": [ { @@ -462,6 +510,14 @@ } ] }, + "costly_map": { + "column_vindexes": [ + { + "column": "name", + "name": "user_md5_index" + } + ] + }, "mixed_tbl": { "column_vindexes": [ { @@ -641,7 +697,10 @@ "type": "hash_test" }, "multicolIdx": { - "type": "multiCol_test" + "type": "multicol", + "params": { + "column_count": "3" + } } }, "tables": { diff --git a/go/vt/vtgate/vindexes/cached_size.go b/go/vt/vtgate/vindexes/cached_size.go index a97411a6ac8..eeadb69b532 100644 --- a/go/vt/vtgate/vindexes/cached_size.go +++ b/go/vt/vtgate/vindexes/cached_size.go @@ -175,6 +175,18 @@ func (cached *Keyspace) CachedSize(alloc bool) int64 { size += hack.RuntimeAllocSize(int64(len(cached.Name))) return size } +func (cached *LookupCost) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(16) + } + // field LookupNonUnique *vitess.io/vitess/go/vt/vtgate/vindexes.LookupNonUnique + size += cached.LookupNonUnique.CachedSize(true) + return size +} func (cached *LookupHash) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) diff --git a/go/vt/vtgate/vindexes/lookup_cost.go b/go/vt/vtgate/vindexes/lookup_cost.go new file mode 100644 index 00000000000..6556032cea5 --- /dev/null +++ b/go/vt/vtgate/vindexes/lookup_cost.go @@ -0,0 +1,70 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vindexes + +import ( + "strconv" +) + +var ( + _ SingleColumn = (*LookupCost)(nil) + _ Lookup = (*LookupCost)(nil) + _ LookupPlanable = (*LookupCost)(nil) +) + +func init() { + Register("lookup_cost", newLookupCost) +} + +const defaultCost = 5 + +// LookupCost defines a test vindex that uses the cost provided by the user. +// This is a test vindex. +type LookupCost struct { + *LookupNonUnique + cost int +} + +// Cost returns the cost of this vindex as provided. +func (lc *LookupCost) Cost() int { + return lc.cost +} + +func newLookupCost(name string, m map[string]string) (Vindex, error) { + lookup, err := newLookup(name, m) + if err != nil { + return nil, err + } + cost := getInt(m, "cost", defaultCost) + return &LookupCost{ + LookupNonUnique: lookup.(*LookupNonUnique), + cost: cost, + }, nil + +} + +func getInt(m map[string]string, key string, defaultVal int) int { + val, ok := m[key] + if !ok { + return defaultVal + } + intVal, err := strconv.Atoi(val) + if err != nil { + return defaultVal + } + return intVal +} diff --git a/go/vt/vtgate/vindexes/vschema_test.go b/go/vt/vtgate/vindexes/vschema_test.go index 25f8e135698..f9bcf43ddaa 100644 --- a/go/vt/vtgate/vindexes/vschema_test.go +++ b/go/vt/vtgate/vindexes/vschema_test.go @@ -21,6 +21,7 @@ import ( "encoding/json" "errors" "fmt" + "os" "reflect" "strings" "testing" @@ -3551,6 +3552,20 @@ func TestFindTableWithSequences(t *testing.T) { } } +func TestGlobalTables(t *testing.T) { + input, err := os.ReadFile("../planbuilder/testdata/vschemas/schema.json") + require.NoError(t, err) + + var vs vschemapb.SrvVSchema + err = json2.UnmarshalPB(input, &vs) + require.NoError(t, err) + + got := BuildVSchema(&vs, sqlparser.NewTestParser()) + tbl, err := got.findGlobalTable("user", false) + require.NoError(t, err) + assert.NotNil(t, tbl) +} + func vindexNames(vindexes []*ColumnVindex) (result []string) { for _, vindex := range vindexes { result = append(result, vindex.Name) diff --git a/go/vt/vtgate/vschemaacl/vschemaacl.go b/go/vt/vtgate/vschemaacl/vschemaacl.go index 5345d1437fc..08f6c2b0cd4 100644 --- a/go/vt/vtgate/vschemaacl/vschemaacl.go +++ b/go/vt/vtgate/vschemaacl/vschemaacl.go @@ -18,26 +18,67 @@ package vschemaacl import ( "strings" - "sync" "github.com/spf13/pflag" + "github.com/spf13/viper" - "vitess.io/vitess/go/vt/servenv" - + "vitess.io/vitess/go/viperutil" querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/servenv" ) -var ( - // AuthorizedDDLUsers specifies the users that can perform ddl operations - AuthorizedDDLUsers string - - // ddlAllowAll is true if the special value of "*" was specified +type authorizedDDLUsers struct { allowAll bool + acl map[string]struct{} + source string +} + +func NewAuthorizedDDLUsers(users string) *authorizedDDLUsers { + acl := make(map[string]struct{}) + allowAll := false + + switch users { + case "": + case "%": + allowAll = true + default: + for _, user := range strings.Split(users, ",") { + user = strings.TrimSpace(user) + acl[user] = struct{}{} + } + } + + return &authorizedDDLUsers{ + allowAll: allowAll, + acl: acl, + source: users, + } +} - // ddlACL contains a set of allowed usernames - acl map[string]struct{} +func (a *authorizedDDLUsers) String() string { + return a.source +} - initMu sync.Mutex +var ( + // AuthorizedDDLUsers specifies the users that can perform ddl operations + AuthorizedDDLUsers = viperutil.Configure( + "vschema_ddl_authorized_users", + viperutil.Options[*authorizedDDLUsers]{ + FlagName: "vschema_ddl_authorized_users", + Default: &authorizedDDLUsers{}, + Dynamic: true, + GetFunc: func(v *viper.Viper) func(key string) *authorizedDDLUsers { + return func(key string) *authorizedDDLUsers { + newVal := v.GetString(key) + curVal, ok := v.Get(key).(*authorizedDDLUsers) + if ok && newVal == curVal.source { + return curVal + } + return NewAuthorizedDDLUsers(newVal) + } + }, + }, + ) ) // RegisterSchemaACLFlags installs log flags on the given FlagSet. @@ -46,7 +87,8 @@ var ( // calls this function, or call this function directly before parsing // command-line arguments. func RegisterSchemaACLFlags(fs *pflag.FlagSet) { - fs.StringVar(&AuthorizedDDLUsers, "vschema_ddl_authorized_users", AuthorizedDDLUsers, "List of users authorized to execute vschema ddl operations, or '%' to allow all users.") + fs.String("vschema_ddl_authorized_users", "", "List of users authorized to execute vschema ddl operations, or '%' to allow all users.") + viperutil.BindFlags(fs, AuthorizedDDLUsers) } func init() { @@ -55,33 +97,14 @@ func init() { } } -// Init parses the users option and sets allowAll / acl accordingly -func Init() { - initMu.Lock() - defer initMu.Unlock() - acl = make(map[string]struct{}) - allowAll = false - - if AuthorizedDDLUsers == "%" { - allowAll = true - return - } else if AuthorizedDDLUsers == "" { - return - } - - for _, user := range strings.Split(AuthorizedDDLUsers, ",") { - user = strings.TrimSpace(user) - acl[user] = struct{}{} - } -} - // Authorized returns true if the given caller is allowed to execute vschema operations func Authorized(caller *querypb.VTGateCallerID) bool { - if allowAll { + users := AuthorizedDDLUsers.Get() + if users.allowAll { return true } user := caller.GetUsername() - _, ok := acl[user] + _, ok := users.acl[user] return ok } diff --git a/go/vt/vtgate/vschemaacl/vschemaacl_test.go b/go/vt/vtgate/vschemaacl/vschemaacl_test.go index faa2dbfc294..cfd1de705af 100644 --- a/go/vt/vtgate/vschemaacl/vschemaacl_test.go +++ b/go/vt/vtgate/vschemaacl/vschemaacl_test.go @@ -35,8 +35,7 @@ func TestVschemaAcl(t *testing.T) { } // Test wildcard - AuthorizedDDLUsers = "%" - Init() + AuthorizedDDLUsers.Set(NewAuthorizedDDLUsers("%")) if !Authorized(&redUser) { t.Errorf("user should be authorized") @@ -46,8 +45,7 @@ func TestVschemaAcl(t *testing.T) { } // Test user list - AuthorizedDDLUsers = "oneUser, twoUser, redUser, blueUser" - Init() + AuthorizedDDLUsers.Set(NewAuthorizedDDLUsers("oneUser, twoUser, redUser, blueUser")) if !Authorized(&redUser) { t.Errorf("user should be authorized") @@ -57,8 +55,7 @@ func TestVschemaAcl(t *testing.T) { } // Revert to baseline state for other tests - AuthorizedDDLUsers = "" - Init() + AuthorizedDDLUsers.Set(NewAuthorizedDDLUsers("")) // By default no users are allowed in if Authorized(&redUser) { diff --git a/go/vt/vtorc/logic/keyspace_shard_discovery.go b/go/vt/vtorc/logic/keyspace_shard_discovery.go index b1e93fe2a01..0dd17cb65fd 100644 --- a/go/vt/vtorc/logic/keyspace_shard_discovery.go +++ b/go/vt/vtorc/logic/keyspace_shard_discovery.go @@ -29,17 +29,16 @@ import ( ) // RefreshAllKeyspacesAndShards reloads the keyspace and shard information for the keyspaces that vtorc is concerned with. -func RefreshAllKeyspacesAndShards() { +func RefreshAllKeyspacesAndShards(ctx context.Context) error { var keyspaces []string if len(clustersToWatch) == 0 { // all known keyspaces - ctx, cancel := context.WithTimeout(context.Background(), topo.RemoteOperationTimeout) + ctx, cancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout) defer cancel() var err error // Get all the keyspaces keyspaces, err = ts.GetKeyspaces(ctx) if err != nil { - log.Error(err) - return + return err } } else { // Parse input and build list of keyspaces @@ -55,14 +54,14 @@ func RefreshAllKeyspacesAndShards() { } if len(keyspaces) == 0 { log.Errorf("Found no keyspaces for input: %+v", clustersToWatch) - return + return nil } } // Sort the list of keyspaces. // The list can have duplicates because the input to clusters to watch may have multiple shards of the same keyspace sort.Strings(keyspaces) - refreshCtx, refreshCancel := context.WithTimeout(context.Background(), topo.RemoteOperationTimeout) + refreshCtx, refreshCancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout) defer refreshCancel() var wg sync.WaitGroup for idx, keyspace := range keyspaces { @@ -83,6 +82,8 @@ func RefreshAllKeyspacesAndShards() { }(keyspace) } wg.Wait() + + return nil } // RefreshKeyspaceAndShard refreshes the keyspace record and shard record for the given keyspace and shard. diff --git a/go/vt/vtorc/logic/keyspace_shard_discovery_test.go b/go/vt/vtorc/logic/keyspace_shard_discovery_test.go index 097865db84a..5cbe139728b 100644 --- a/go/vt/vtorc/logic/keyspace_shard_discovery_test.go +++ b/go/vt/vtorc/logic/keyspace_shard_discovery_test.go @@ -92,7 +92,7 @@ func TestRefreshAllKeyspaces(t *testing.T) { // Set clusters to watch to only watch ks1 and ks3 onlyKs1and3 := []string{"ks1/-80", "ks3/-80", "ks3/80-"} clustersToWatch = onlyKs1and3 - RefreshAllKeyspacesAndShards() + require.NoError(t, RefreshAllKeyspacesAndShards(context.Background())) // Verify that we only have ks1 and ks3 in vtorc's db. verifyKeyspaceInfo(t, "ks1", keyspaceDurabilityNone, "") @@ -107,7 +107,7 @@ func TestRefreshAllKeyspaces(t *testing.T) { clustersToWatch = nil // Change the durability policy of ks1 reparenttestutil.SetKeyspaceDurability(ctx, t, ts, "ks1", "semi_sync") - RefreshAllKeyspacesAndShards() + require.NoError(t, RefreshAllKeyspacesAndShards(context.Background())) // Verify that all the keyspaces are correctly reloaded verifyKeyspaceInfo(t, "ks1", keyspaceDurabilitySemiSync, "") diff --git a/go/vt/vtorc/logic/tablet_discovery.go b/go/vt/vtorc/logic/tablet_discovery.go index e62c0652e62..990192a23f7 100644 --- a/go/vt/vtorc/logic/tablet_discovery.go +++ b/go/vt/vtorc/logic/tablet_discovery.go @@ -27,7 +27,6 @@ import ( "time" "github.com/spf13/pflag" - "google.golang.org/protobuf/encoding/prototext" "google.golang.org/protobuf/proto" @@ -38,7 +37,6 @@ import ( "vitess.io/vitess/go/vt/vtorc/config" "vitess.io/vitess/go/vt/vtorc/db" "vitess.io/vitess/go/vt/vtorc/inst" - "vitess.io/vitess/go/vt/vtorc/process" "vitess.io/vitess/go/vt/vttablet/tmclient" topodatapb "vitess.io/vitess/go/vt/proto/topodata" @@ -69,36 +67,33 @@ func OpenTabletDiscovery() <-chan time.Time { if _, err := db.ExecVTOrc("DELETE FROM vitess_tablet"); err != nil { log.Error(err) } - // We refresh all information from the topo once before we start the ticks to do it on a timer. - populateAllInformation() + // We refresh all information from the topo once before we start the ticks to do + // it on a timer. + ctx, cancel := context.WithTimeout(context.Background(), topo.RemoteOperationTimeout) + defer cancel() + if err := refreshAllInformation(ctx); err != nil { + log.Errorf("failed to initialize topo information: %+v", err) + } return time.Tick(config.GetTopoInformationRefreshDuration()) //nolint SA1015: using time.Tick leaks the underlying ticker } -// populateAllInformation initializes all the information for VTOrc to function. -func populateAllInformation() { - refreshAllInformation() - // We have completed one full discovery cycle. We should update the process health. - process.FirstDiscoveryCycleComplete.Store(true) -} - // refreshAllTablets reloads the tablets from topo and discovers the ones which haven't been refreshed in a while -func refreshAllTablets() { - refreshTabletsUsing(func(tabletAlias string) { +func refreshAllTablets(ctx context.Context) error { + return refreshTabletsUsing(ctx, func(tabletAlias string) { DiscoverInstance(tabletAlias, false /* forceDiscovery */) }, false /* forceRefresh */) } -func refreshTabletsUsing(loader func(tabletAlias string), forceRefresh bool) { +func refreshTabletsUsing(ctx context.Context, loader func(tabletAlias string), forceRefresh bool) error { if len(clustersToWatch) == 0 { // all known clusters - ctx, cancel := context.WithTimeout(context.Background(), topo.RemoteOperationTimeout) + ctx, cancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout) defer cancel() cells, err := ts.GetKnownCells(ctx) if err != nil { - log.Error(err) - return + return err } - refreshCtx, refreshCancel := context.WithTimeout(context.Background(), topo.RemoteOperationTimeout) + refreshCtx, refreshCancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout) defer refreshCancel() var wg sync.WaitGroup for _, cell := range cells { @@ -119,7 +114,7 @@ func refreshTabletsUsing(loader func(tabletAlias string), forceRefresh bool) { keyspaceShards = append(keyspaceShards, &topo.KeyspaceShard{Keyspace: input[0], Shard: input[1]}) } else { // Assume this is a keyspace and find all shards in keyspace - ctx, cancel := context.WithTimeout(context.Background(), topo.RemoteOperationTimeout) + ctx, cancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout) defer cancel() shards, err := ts.GetShardNames(ctx, ks) if err != nil { @@ -138,9 +133,9 @@ func refreshTabletsUsing(loader func(tabletAlias string), forceRefresh bool) { } if len(keyspaceShards) == 0 { log.Errorf("Found no keyspaceShards for input: %+v", clustersToWatch) - return + return nil } - refreshCtx, refreshCancel := context.WithTimeout(context.Background(), topo.RemoteOperationTimeout) + refreshCtx, refreshCancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout) defer refreshCancel() var wg sync.WaitGroup for _, ks := range keyspaceShards { @@ -152,6 +147,7 @@ func refreshTabletsUsing(loader func(tabletAlias string), forceRefresh bool) { } wg.Wait() } + return nil } func refreshTabletsInCell(ctx context.Context, cell string, loader func(tabletAlias string), forceRefresh bool) { diff --git a/go/vt/vtorc/logic/tablet_discovery_test.go b/go/vt/vtorc/logic/tablet_discovery_test.go index bc9eeba1fb7..f6a7af38382 100644 --- a/go/vt/vtorc/logic/tablet_discovery_test.go +++ b/go/vt/vtorc/logic/tablet_discovery_test.go @@ -37,7 +37,6 @@ import ( "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver/testutil" "vitess.io/vitess/go/vt/vtorc/db" "vitess.io/vitess/go/vt/vtorc/inst" - "vitess.io/vitess/go/vt/vtorc/process" ) var ( @@ -369,25 +368,6 @@ func TestGetLockAction(t *testing.T) { } } -// TestProcessHealth tests that the health of the process reflects that we have run the first discovery once correctly. -func TestProcessHealth(t *testing.T) { - require.False(t, process.FirstDiscoveryCycleComplete.Load()) - originalTs := ts - defer func() { - ts = originalTs - process.FirstDiscoveryCycleComplete.Store(false) - }() - // Verify in the beginning, we have the first DiscoveredOnce field false. - _, discoveredOnce := process.HealthTest() - require.False(t, discoveredOnce) - ts = memorytopo.NewServer(context.Background(), cell1) - populateAllInformation() - require.True(t, process.FirstDiscoveryCycleComplete.Load()) - // Verify after we populate all information, we have the first DiscoveredOnce field true. - _, discoveredOnce = process.HealthTest() - require.True(t, discoveredOnce) -} - func TestSetReadOnly(t *testing.T) { tests := []struct { name string diff --git a/go/vt/vtorc/logic/vtorc.go b/go/vt/vtorc/logic/vtorc.go index b8cf404d050..39326525ce2 100644 --- a/go/vt/vtorc/logic/vtorc.go +++ b/go/vt/vtorc/logic/vtorc.go @@ -17,12 +17,14 @@ package logic import ( + "context" "sync" "sync/atomic" "time" "github.com/patrickmn/go-cache" "github.com/sjmudd/stopwatch" + "golang.org/x/sync/errgroup" "vitess.io/vitess/go/stats" "vitess.io/vitess/go/vt/log" @@ -32,6 +34,7 @@ import ( "vitess.io/vitess/go/vt/vtorc/discovery" "vitess.io/vitess/go/vt/vtorc/inst" ometrics "vitess.io/vitess/go/vt/vtorc/metrics" + "vitess.io/vitess/go/vt/vtorc/process" "vitess.io/vitess/go/vt/vtorc/util" ) @@ -304,30 +307,34 @@ func ContinuousDiscovery() { go inst.SnapshotTopologies() }() case <-tabletTopoTick: - refreshAllInformation() + ctx, cancel := context.WithTimeout(context.Background(), config.GetTopoInformationRefreshDuration()) + if err := refreshAllInformation(ctx); err != nil { + log.Errorf("failed to refresh topo information: %+v", err) + } + cancel() } } } // refreshAllInformation refreshes both shard and tablet information. This is meant to be run on tablet topo ticks. -func refreshAllInformation() { - // Create a wait group - var wg sync.WaitGroup +func refreshAllInformation(ctx context.Context) error { + // Create an errgroup + eg, ctx := errgroup.WithContext(ctx) // Refresh all keyspace information. - wg.Add(1) - go func() { - defer wg.Done() - RefreshAllKeyspacesAndShards() - }() + eg.Go(func() error { + return RefreshAllKeyspacesAndShards(ctx) + }) // Refresh all tablets. - wg.Add(1) - go func() { - defer wg.Done() - refreshAllTablets() - }() + eg.Go(func() error { + return refreshAllTablets(ctx) + }) // Wait for both the refreshes to complete - wg.Wait() + err := eg.Wait() + if err == nil { + process.FirstDiscoveryCycleComplete.Store(true) + } + return err } diff --git a/go/vt/vtorc/logic/vtorc_test.go b/go/vt/vtorc/logic/vtorc_test.go index c8f2ac3bfdc..edd8141e8b7 100644 --- a/go/vt/vtorc/logic/vtorc_test.go +++ b/go/vt/vtorc/logic/vtorc_test.go @@ -1,11 +1,17 @@ package logic import ( + "context" "sync/atomic" "testing" "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/vt/topo/memorytopo" + "vitess.io/vitess/go/vt/vtorc/db" + "vitess.io/vitess/go/vt/vtorc/process" ) func TestWaitForLocksRelease(t *testing.T) { @@ -54,3 +60,41 @@ func waitForLocksReleaseAndGetTimeWaitedFor() time.Duration { waitForLocksRelease() return time.Since(start) } + +func TestRefreshAllInformation(t *testing.T) { + // Store the old flags and restore on test completion + oldTs := ts + defer func() { + ts = oldTs + }() + + // Clear the database after the test. The easiest way to do that is to run all the initialization commands again. + defer func() { + db.ClearVTOrcDatabase() + }() + + // Verify in the beginning, we have the first DiscoveredOnce field false. + _, discoveredOnce := process.HealthTest() + require.False(t, discoveredOnce) + + // Create a memory topo-server and create the keyspace and shard records + ts = memorytopo.NewServer(context.Background(), cell1) + _, err := ts.GetOrCreateShard(context.Background(), keyspace, shard) + require.NoError(t, err) + + // Test error + ctx, cancel := context.WithCancel(context.Background()) + cancel() // cancel context to simulate timeout + require.Error(t, refreshAllInformation(ctx)) + require.False(t, process.FirstDiscoveryCycleComplete.Load()) + _, discoveredOnce = process.HealthTest() + require.False(t, discoveredOnce) + + // Test success + ctx2, cancel2 := context.WithCancel(context.Background()) + defer cancel2() + require.NoError(t, refreshAllInformation(ctx2)) + require.True(t, process.FirstDiscoveryCycleComplete.Load()) + _, discoveredOnce = process.HealthTest() + require.True(t, discoveredOnce) +} diff --git a/go/vt/vtorc/process/health.go b/go/vt/vtorc/process/health.go index f72d7b05210..d448f03bb83 100644 --- a/go/vt/vtorc/process/health.go +++ b/go/vt/vtorc/process/health.go @@ -62,7 +62,7 @@ func writeHealthToDatabase() bool { func HealthTest() (health *NodeHealth, discoveredOnce bool) { ThisNodeHealth.LastReported = time.Now() discoveredOnce = FirstDiscoveryCycleComplete.Load() - ThisNodeHealth.Healthy = writeHealthToDatabase() + ThisNodeHealth.Healthy = discoveredOnce && writeHealthToDatabase() return ThisNodeHealth, discoveredOnce } diff --git a/go/vt/vtorc/process/health_test.go b/go/vt/vtorc/process/health_test.go new file mode 100644 index 00000000000..c198deda4e4 --- /dev/null +++ b/go/vt/vtorc/process/health_test.go @@ -0,0 +1,46 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package process + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestHealthTest(t *testing.T) { + defer func() { + FirstDiscoveryCycleComplete.Store(false) + ThisNodeHealth = &NodeHealth{} + }() + + require.Zero(t, ThisNodeHealth.LastReported) + require.False(t, ThisNodeHealth.Healthy) + + ThisNodeHealth = &NodeHealth{} + health, discoveredOnce := HealthTest() + require.False(t, health.Healthy) + require.False(t, discoveredOnce) + require.NotZero(t, ThisNodeHealth.LastReported) + + ThisNodeHealth = &NodeHealth{} + FirstDiscoveryCycleComplete.Store(true) + health, discoveredOnce = HealthTest() + require.True(t, health.Healthy) + require.True(t, discoveredOnce) + require.NotZero(t, ThisNodeHealth.LastReported) +} diff --git a/go/vt/vttablet/endtoend/connecttcp/main_test.go b/go/vt/vttablet/endtoend/connecttcp/main_test.go index 9d52b1287a1..43be05893cc 100644 --- a/go/vt/vttablet/endtoend/connecttcp/main_test.go +++ b/go/vt/vttablet/endtoend/connecttcp/main_test.go @@ -22,6 +22,7 @@ import ( "fmt" "os" "testing" + "time" "vitess.io/vitess/go/mysql" vttestpb "vitess.io/vitess/go/vt/proto/vttest" @@ -86,8 +87,7 @@ func TestMain(m *testing.M) { defer cancel() config := tabletenv.NewDefaultConfig() - config.TwoPCEnable = true - config.TwoPCAbandonAge = 1 + config.TwoPCAbandonAge = 1 * time.Second if err := framework.StartCustomServer(ctx, connParams, connAppDebugParams, cluster.DbName(), config); err != nil { fmt.Fprintf(os.Stderr, "%v", err) diff --git a/go/vt/vttablet/endtoend/framework/server.go b/go/vt/vttablet/endtoend/framework/server.go index 0124bb992ba..3374aadb450 100644 --- a/go/vt/vttablet/endtoend/framework/server.go +++ b/go/vt/vttablet/endtoend/framework/server.go @@ -108,8 +108,7 @@ func StartCustomServer(ctx context.Context, connParams, connAppDebugParams mysql func StartServer(ctx context.Context, connParams, connAppDebugParams mysql.ConnParams, dbName string) error { config := tabletenv.NewDefaultConfig() config.StrictTableACL = true - config.TwoPCEnable = true - config.TwoPCAbandonAge = 1 + config.TwoPCAbandonAge = 1 * time.Second config.HotRowProtection.Mode = tabletenv.Enable config.TrackSchemaVersions = true config.GracePeriods.Shutdown = 2 * time.Second diff --git a/go/vt/vttablet/endtoend/twopc/main_test.go b/go/vt/vttablet/endtoend/twopc/main_test.go index 090751503d4..3b68ce273e1 100644 --- a/go/vt/vttablet/endtoend/twopc/main_test.go +++ b/go/vt/vttablet/endtoend/twopc/main_test.go @@ -22,6 +22,7 @@ import ( "fmt" "os" "testing" + "time" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/vt/vttablet/endtoend/framework" @@ -83,8 +84,7 @@ func TestMain(m *testing.M) { defer cancel() config := tabletenv.NewDefaultConfig() - config.TwoPCEnable = true - config.TwoPCAbandonAge = 1 + config.TwoPCAbandonAge = 1 * time.Second err := framework.StartCustomServer(ctx, connParams, connAppDebugParams, cluster.DbName(), config) if err != nil { fmt.Fprintf(os.Stderr, "%v", err) diff --git a/go/vt/vttablet/grpctmserver/server.go b/go/vt/vttablet/grpctmserver/server.go index 889448a7cd3..6f0fd2aa4dc 100644 --- a/go/vt/vttablet/grpctmserver/server.go +++ b/go/vt/vttablet/grpctmserver/server.go @@ -354,7 +354,7 @@ func (s *server) MysqlHostMetrics(ctx context.Context, request *tabletmanagerdat func (s *server) ReplicationStatus(ctx context.Context, request *tabletmanagerdatapb.ReplicationStatusRequest) (response *tabletmanagerdatapb.ReplicationStatusResponse, err error) { defer s.tm.HandleRPCPanic(ctx, "ReplicationStatus", request, response, false /*verbose*/, &err) ctx = callinfo.GRPCCallInfo(ctx) - response = &tabletmanagerdatapb.ReplicationStatusResponse{BackupRunning: s.tm.IsBackupRunning()} + response = &tabletmanagerdatapb.ReplicationStatusResponse{} status, err := s.tm.ReplicationStatus(ctx) if err == nil { response.Status = status @@ -638,8 +638,6 @@ func (s *server) StopReplicationAndGetStatus(ctx context.Context, request *table response.Status = statusResponse.Status } - response.BackupRunning = s.tm.IsBackupRunning() - return response, err } diff --git a/go/vt/vttablet/tabletmanager/rpc_replication.go b/go/vt/vttablet/tabletmanager/rpc_replication.go index 90e4d835a79..f13efa66124 100644 --- a/go/vt/vttablet/tabletmanager/rpc_replication.go +++ b/go/vt/vttablet/tabletmanager/rpc_replication.go @@ -46,7 +46,11 @@ func (tm *TabletManager) ReplicationStatus(ctx context.Context) (*replicationdat if err != nil { return nil, err } - return replication.ReplicationStatusToProto(status), nil + + protoStatus := replication.ReplicationStatusToProto(status) + protoStatus.BackupRunning = tm.IsBackupRunning() + + return protoStatus, nil } // FullStatus returns the full status of MySQL including the replication information, semi-sync information, GTID information among others @@ -893,6 +897,7 @@ func (tm *TabletManager) StopReplicationAndGetStatus(ctx context.Context, stopRe return StopReplicationAndGetStatusResponse{}, vterrors.Wrap(err, "before status failed") } before := replication.ReplicationStatusToProto(rs) + before.BackupRunning = tm.IsBackupRunning() if stopReplicationMode == replicationdatapb.StopReplicationMode_IOTHREADONLY { if !rs.IOHealthy() { @@ -939,6 +944,7 @@ func (tm *TabletManager) StopReplicationAndGetStatus(ctx context.Context, stopRe }, vterrors.Wrap(err, "acquiring replication status failed") } after := replication.ReplicationStatusToProto(rsAfter) + after.BackupRunning = tm.IsBackupRunning() rs.Position = rsAfter.Position rs.RelayLogPosition = rsAfter.RelayLogPosition diff --git a/go/vt/vttablet/tabletmanager/rpc_vreplication_test.go b/go/vt/vttablet/tabletmanager/rpc_vreplication_test.go index 0a5bd9f26fd..3f8bc85ac7f 100644 --- a/go/vt/vttablet/tabletmanager/rpc_vreplication_test.go +++ b/go/vt/vttablet/tabletmanager/rpc_vreplication_test.go @@ -305,7 +305,6 @@ func TestCreateVReplicationWorkflow(t *testing.T) { // results returned. Followed by ensuring that SwitchTraffic // and ReverseTraffic also work as expected. func TestMoveTablesUnsharded(t *testing.T) { - t.Skip("Skipping test temporarily as it is flaky on CI, pending investigation") ctx, cancel := context.WithCancel(context.Background()) defer cancel() sourceKs := "sourceks" @@ -403,6 +402,9 @@ func TestMoveTablesUnsharded(t *testing.T) { ftc.vrdbClient.AddInvariant(getCopyStateQuery, &sqltypes.Result{}) tenv.tmc.setVReplicationExecResults(ftc.tablet, getCopyState, &sqltypes.Result{}) ftc.vrdbClient.ExpectRequest(fmt.Sprintf(readAllWorkflows, tenv.dbName, ""), &sqltypes.Result{}, nil) + for _, table := range defaultSchema.TableDefinitions { + tenv.db.AddQuery(fmt.Sprintf(getNonEmptyTableQuery, table.Name), &sqltypes.Result{}) + } insert := fmt.Sprintf(`%s values ('%s', 'keyspace:"%s" shard:"%s" filter:{rules:{match:"t1" filter:"select * from t1"}}', '', 0, 0, '%s', 'primary,replica,rdonly', now(), 0, 'Stopped', '%s', %d, 0, 0, '{}')`, insertVReplicationPrefix, wf, sourceKs, sourceShard, tenv.cells[0], tenv.dbName, vreplID) ftc.vrdbClient.ExpectRequest(insert, &sqltypes.Result{InsertID: 1}, nil) @@ -1780,7 +1782,7 @@ func addInvariants(dbClient *binlogplayer.MockDBClient, vreplID, sourceTabletUID "0", )) dbClient.AddInvariant(fmt.Sprintf(updatePickedSourceTablet, cell, sourceTabletUID, vreplID), &sqltypes.Result{}) - + dbClient.AddInvariant("update _vt.vreplication set state='Running', message='' where id=1", &sqltypes.Result{}) } func addMaterializeSettingsTablesToSchema(ms *vtctldatapb.MaterializeSettings, tenv *testEnv, venv *vtenv.Environment) { diff --git a/go/vt/vttablet/tabletmanager/vreplication/external_connector_test.go b/go/vt/vttablet/tabletmanager/vreplication/external_connector_test.go index e00a5578171..c671d2a086d 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/external_connector_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/external_connector_test.go @@ -163,8 +163,7 @@ func TestExternalConnectorPlay(t *testing.T) { expectDBClientAndVreplicationQueries(t, []string{ "begin", - "insert into tab1(id,val) values (1,_binary'a')", - "insert into tab1(id,val) values (2,_binary'b')", + "insert into tab1(id,val) values (1,_binary'a'), (2,_binary'b')", "/update _vt.vreplication set pos=", "commit", }, pos) diff --git a/go/vt/vttablet/tabletmanager/vreplication/framework_test.go b/go/vt/vttablet/tabletmanager/vreplication/framework_test.go index fe8b62d3cef..12a05a69dbc 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/framework_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/framework_test.go @@ -141,7 +141,6 @@ func setup(ctx context.Context) (func(), int) { resetBinlogClient() vttablet.InitVReplicationConfigDefaults() - vttablet.DefaultVReplicationConfig.ExperimentalFlags = 0 // Engines cannot be initialized in testenv because it introduces circular dependencies. streamerEngine = vstreamer.NewEngine(env.TabletEnv, env.SrvTopo, env.SchemaEngine, nil, env.Cells[0]) diff --git a/go/vt/vttablet/tabletmanager/vreplication/vcopier_test.go b/go/vt/vttablet/tabletmanager/vreplication/vcopier_test.go index a95e0bf17c5..a7e4794ba76 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vcopier_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vcopier_test.go @@ -684,6 +684,14 @@ func testPlayerCopyBigTable(t *testing.T) { reset := vstreamer.AdjustPacketSize(1) defer reset() + // The test is written to match the behavior w/o + // VReplicationExperimentalFlagOptimizeInserts enabled. + origExperimentalFlags := vttablet.DefaultVReplicationConfig.ExperimentalFlags + vttablet.DefaultVReplicationConfig.ExperimentalFlags = 0 + defer func() { + vttablet.DefaultVReplicationConfig.ExperimentalFlags = origExperimentalFlags + }() + savedCopyPhaseDuration := vttablet.DefaultVReplicationConfig.CopyPhaseDuration // copyPhaseDuration should be low enough to have time to send one row. vttablet.DefaultVReplicationConfig.CopyPhaseDuration = 500 * time.Millisecond @@ -814,6 +822,14 @@ func testPlayerCopyWildcardRule(t *testing.T) { reset := vstreamer.AdjustPacketSize(1) defer reset() + // The test is written to match the behavior w/o + // VReplicationExperimentalFlagOptimizeInserts enabled. + origExperimentalFlags := vttablet.DefaultVReplicationConfig.ExperimentalFlags + vttablet.DefaultVReplicationConfig.ExperimentalFlags = 0 + defer func() { + vttablet.DefaultVReplicationConfig.ExperimentalFlags = origExperimentalFlags + }() + savedCopyPhaseDuration := vttablet.DefaultVReplicationConfig.CopyPhaseDuration // copyPhaseDuration should be low enough to have time to send one row. vttablet.DefaultVReplicationConfig.CopyPhaseDuration = 500 * time.Millisecond diff --git a/go/vt/vttablet/tabletmanager/vreplication/vdbclient.go b/go/vt/vttablet/tabletmanager/vreplication/vdbclient.go index 8a4409db06c..63538be881d 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vdbclient.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vdbclient.go @@ -30,6 +30,8 @@ import ( "vitess.io/vitess/go/vt/vterrors" ) +const beginStmtLen = int64(len("begin;")) + // vdbClient is a wrapper on binlogplayer.DBClient. // It allows us to retry a failed transactions on lock errors. type vdbClient struct { @@ -56,16 +58,19 @@ func (vc *vdbClient) Begin() error { if vc.InTransaction { return nil } - if err := vc.DBClient.Begin(); err != nil { - return err + if vc.maxBatchSize > 0 { + // We are batching the contents of the transaction, which + // starts with the BEGIN and ends with the COMMIT, so we + // do not send a BEGIN down the wire ahead of time. + vc.queriesPos = int64(len(vc.queries)) + vc.batchSize = beginStmtLen + } else { + // We're not batching so we start the transaction here + // by sending the BEGIN down the wire. + if err := vc.DBClient.Begin(); err != nil { + return err + } } - - // If we're batching, we only batch the contents of the - // transaction, which starts with the begin and ends with - // the commit. - vc.queriesPos = int64(len(vc.queries)) - vc.batchSize = 6 // begin and semicolon - vc.queries = append(vc.queries, "begin") vc.InTransaction = true vc.startTime = time.Now() diff --git a/go/vt/vttablet/tabletmanager/vreplication/vplayer.go b/go/vt/vttablet/tabletmanager/vreplication/vplayer.go index db2f3f341ac..98e36119622 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vplayer.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vplayer.go @@ -133,7 +133,8 @@ func newVPlayer(vr *vreplicator, settings binlogplayer.VRSettings, copyState map return vr.dbClient.Commit() } batchMode := false - if vr.workflowConfig.ExperimentalFlags&vttablet.VReplicationExperimentalFlagVPlayerBatching != 0 { + // We only do batching in the running/replicating phase. + if len(copyState) == 0 && vr.workflowConfig.ExperimentalFlags&vttablet.VReplicationExperimentalFlagVPlayerBatching != 0 { batchMode = true } if batchMode { diff --git a/go/vt/vttablet/tabletmanager/vreplication/vplayer_flaky_test.go b/go/vt/vttablet/tabletmanager/vreplication/vplayer_flaky_test.go index 0cc568c1cf1..50d93e60e5a 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vplayer_flaky_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vplayer_flaky_test.go @@ -628,7 +628,6 @@ func TestPlayerStatementModeWithFilterAndErrorHandling(t *testing.T) { // It does not work when filter is enabled output := qh.Expect( - "begin", "rollback", fmt.Sprintf("/update _vt.vreplication set message='%s", expectedMsg), ) @@ -975,8 +974,7 @@ func TestPlayerFilters(t *testing.T) { input: "insert into src4 values (1,100,'aaa'),(2,200,'bbb'),(3,100,'ccc')", output: qh.Expect( "begin", - "insert into dst4(id1,val) values (1,_binary'aaa')", - "insert into dst4(id1,val) values (3,_binary'ccc')", + "insert into dst4(id1,val) values (1,_binary'aaa'), (3,_binary'ccc')", "/update _vt.vreplication set pos=", "commit", ), @@ -987,8 +985,7 @@ func TestPlayerFilters(t *testing.T) { input: "insert into src5 values (1,100,'abc'),(2,200,'xyz'),(3,100,'xyz'),(4,300,'abc'),(5,200,'xyz')", output: qh.Expect( "begin", - "insert into dst5(id1,val) values (1,_binary'abc')", - "insert into dst5(id1,val) values (4,_binary'abc')", + "insert into dst5(id1,val) values (1,_binary'abc'), (4,_binary'abc')", "/update _vt.vreplication set pos=", "commit", ), @@ -1495,9 +1492,7 @@ func TestPlayerRowMove(t *testing.T) { }) expectDBClientQueries(t, qh.Expect( "begin", - "insert into dst(val1,sval2,rcount) values (1,ifnull(1, 0),1) on duplicate key update sval2=sval2+ifnull(values(sval2), 0), rcount=rcount+1", - "insert into dst(val1,sval2,rcount) values (2,ifnull(2, 0),1) on duplicate key update sval2=sval2+ifnull(values(sval2), 0), rcount=rcount+1", - "insert into dst(val1,sval2,rcount) values (2,ifnull(3, 0),1) on duplicate key update sval2=sval2+ifnull(values(sval2), 0), rcount=rcount+1", + "insert into dst(val1,sval2,rcount) values (1,ifnull(1, 0),1), (2,ifnull(2, 0),1), (2,ifnull(3, 0),1) on duplicate key update sval2=sval2+ifnull(values(sval2), 0), rcount=rcount+1", "/update _vt.vreplication set pos=", "commit", )) @@ -1505,7 +1500,7 @@ func TestPlayerRowMove(t *testing.T) { {"1", "1", "1"}, {"2", "5", "2"}, }) - validateQueryCountStat(t, "replicate", 3) + validateQueryCountStat(t, "replicate", 1) execStatements(t, []string{ "update src set val1=1, val2=4 where id=3", @@ -1521,7 +1516,7 @@ func TestPlayerRowMove(t *testing.T) { {"1", "5", "2"}, {"2", "2", "1"}, }) - validateQueryCountStat(t, "replicate", 5) + validateQueryCountStat(t, "replicate", 3) } func TestPlayerTypes(t *testing.T) { @@ -2179,6 +2174,14 @@ func TestPlayerSplitTransaction(t *testing.T) { func TestPlayerLockErrors(t *testing.T) { defer deleteTablet(addTablet(100)) + // The immediate retry behavior does not apply when doing + // VPlayer Batching. + origExperimentalFlags := vttablet.DefaultVReplicationConfig.ExperimentalFlags + vttablet.DefaultVReplicationConfig.ExperimentalFlags = 0 + defer func() { + vttablet.DefaultVReplicationConfig.ExperimentalFlags = origExperimentalFlags + }() + execStatements(t, []string{ "create table t1(id int, val varchar(128), primary key(id))", fmt.Sprintf("create table %s.t1(id int, val varchar(128), primary key(id))", vrepldb), @@ -2258,6 +2261,14 @@ func TestPlayerLockErrors(t *testing.T) { func TestPlayerCancelOnLock(t *testing.T) { defer deleteTablet(addTablet(100)) + // The immediate retry behavior does not apply when doing + // VPlayer Batching. + origExperimentalFlags := vttablet.DefaultVReplicationConfig.ExperimentalFlags + vttablet.DefaultVReplicationConfig.ExperimentalFlags = 0 + defer func() { + vttablet.DefaultVReplicationConfig.ExperimentalFlags = origExperimentalFlags + }() + execStatements(t, []string{ "create table t1(id int, val varchar(128), primary key(id))", fmt.Sprintf("create table %s.t1(id int, val varchar(128), primary key(id))", vrepldb), diff --git a/go/vt/vttablet/tabletmanager/vreplication/vreplicator.go b/go/vt/vttablet/tabletmanager/vreplication/vreplicator.go index 9ec274ab0ea..42701288a44 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vreplicator.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vreplicator.go @@ -508,8 +508,14 @@ func (vr *vreplicator) setState(state binlogdatapb.VReplicationWorkflowState, me } vr.stats.State.Store(state.String()) query := fmt.Sprintf("update _vt.vreplication set state='%v', message=%v where id=%v", state, encodeString(binlogplayer.MessageTruncate(message)), vr.id) - if _, err := vr.dbClient.ExecuteFetch(query, 1); err != nil { - return fmt.Errorf("could not set state: %v: %v", query, err) + // If we're batching a transaction, then include the state update + // in the current transaction batch. + if vr.dbClient.InTransaction && vr.dbClient.maxBatchSize > 0 { + vr.dbClient.AddQueryToTrxBatch(query) + } else { // Otherwise, send it down the wire + if _, err := vr.dbClient.ExecuteFetch(query, 1); err != nil { + return fmt.Errorf("could not set state: %v: %v", query, err) + } } if state == vr.state { return nil diff --git a/go/vt/vttablet/tabletserver/dt_executor_test.go b/go/vt/vttablet/tabletserver/dt_executor_test.go index d5322f352f9..b21667392d6 100644 --- a/go/vt/vttablet/tabletserver/dt_executor_test.go +++ b/go/vt/vttablet/tabletserver/dt_executor_test.go @@ -705,8 +705,10 @@ func TestNoTwopc(t *testing.T) { want := "2pc is not enabled" for _, tc := range testcases { - err := tc.fun() - require.EqualError(t, err, want) + t.Run(tc.desc, func(t *testing.T) { + err := tc.fun() + require.EqualError(t, err, want) + }) } } diff --git a/go/vt/vttablet/tabletserver/query_executor_test.go b/go/vt/vttablet/tabletserver/query_executor_test.go index 78daad2e616..ade79ecaef5 100644 --- a/go/vt/vttablet/tabletserver/query_executor_test.go +++ b/go/vt/vttablet/tabletserver/query_executor_test.go @@ -1514,20 +1514,17 @@ func newTestTabletServer(ctx context.Context, flags executorFlags, db *fakesqldb } else { cfg.StrictTableACL = false } - if flags&noTwopc > 0 { - cfg.TwoPCEnable = false - } else { - cfg.TwoPCEnable = true - } if flags&disableOnlineDDL > 0 { cfg.EnableOnlineDDL = false } else { cfg.EnableOnlineDDL = true } - if flags&shortTwopcAge > 0 { - cfg.TwoPCAbandonAge = 0.5 + if flags&noTwopc > 0 { + cfg.TwoPCAbandonAge = 0 + } else if flags&shortTwopcAge > 0 { + cfg.TwoPCAbandonAge = 500 * time.Millisecond } else { - cfg.TwoPCAbandonAge = 10 + cfg.TwoPCAbandonAge = 10 * time.Second } if flags&smallResultSize > 0 { cfg.Oltp.MaxRows = 2 diff --git a/go/vt/vttablet/tabletserver/tabletenv/config.go b/go/vt/vttablet/tabletserver/tabletenv/config.go index 158f40d5202..994999f2368 100644 --- a/go/vt/vttablet/tabletserver/tabletenv/config.go +++ b/go/vt/vttablet/tabletserver/tabletenv/config.go @@ -156,8 +156,12 @@ func registerTabletEnvFlags(fs *pflag.FlagSet) { fs.BoolVar(¤tConfig.WatchReplication, "watch_replication_stream", false, "When enabled, vttablet will stream the MySQL replication stream from the local server, and use it to update schema when it sees a DDL.") fs.BoolVar(¤tConfig.TrackSchemaVersions, "track_schema_versions", false, "When enabled, vttablet will store versions of schemas at each position that a DDL is applied and allow retrieval of the schema corresponding to a position") fs.Int64Var(¤tConfig.SchemaVersionMaxAgeSeconds, "schema-version-max-age-seconds", 0, "max age of schema version records to kept in memory by the vreplication historian") - fs.BoolVar(¤tConfig.TwoPCEnable, "twopc_enable", defaultConfig.TwoPCEnable, "if the flag is on, 2pc is enabled. Other 2pc flags must be supplied.") - SecondsVar(fs, ¤tConfig.TwoPCAbandonAge, "twopc_abandon_age", defaultConfig.TwoPCAbandonAge, "time in seconds. Any unresolved transaction older than this time will be sent to the coordinator to be resolved.") + + _ = fs.Bool("twopc_enable", true, "TwoPC is enabled") + _ = fs.MarkDeprecated("twopc_enable", "TwoPC is always enabled, the transaction abandon age can be configured") + flagutil.FloatDuration(fs, ¤tConfig.TwoPCAbandonAge, "twopc_abandon_age", defaultConfig.TwoPCAbandonAge, + "Any unresolved transaction older than this time will be sent to the coordinator to be resolved. NOTE: Providing time as seconds (float64) is deprecated. Use time.Duration format (e.g., '1s', '2m', '1h').") + // Tx throttler config flagutil.DualFormatBoolVar(fs, ¤tConfig.EnableTxThrottler, "enable_tx_throttler", defaultConfig.EnableTxThrottler, "If true replication-lag-based throttling on transactions will be enabled.") flagutil.DualFormatVar(fs, currentConfig.TxThrottlerConfig, "tx_throttler_config", "The configuration of the transaction throttler as a text-formatted throttlerdata.Configuration protocol buffer message.") @@ -331,12 +335,11 @@ type TabletConfig struct { ExternalConnections map[string]*dbconfigs.DBConfigs `json:"externalConnections,omitempty"` - SanitizeLogMessages bool `json:"-"` - StrictTableACL bool `json:"-"` - EnableTableACLDryRun bool `json:"-"` - TableACLExemptACL string `json:"-"` - TwoPCEnable bool `json:"-"` - TwoPCAbandonAge Seconds `json:"-"` + SanitizeLogMessages bool `json:"-"` + StrictTableACL bool `json:"-"` + EnableTableACLDryRun bool `json:"-"` + TableACLExemptACL string `json:"-"` + TwoPCAbandonAge time.Duration `json:"-"` EnableTxThrottler bool `json:"-"` TxThrottlerConfig *TxThrottlerConfigFlag `json:"-"` @@ -1054,6 +1057,8 @@ var defaultConfig = TabletConfig{ }, EnablePerWorkloadTableMetrics: false, + + TwoPCAbandonAge: 15 * time.Minute, } // defaultTxThrottlerConfig returns the default TxThrottlerConfigFlag object based on diff --git a/go/vt/vttablet/tabletserver/tabletenv/seconds.go b/go/vt/vttablet/tabletserver/tabletenv/seconds.go deleted file mode 100644 index ae11121f2de..00000000000 --- a/go/vt/vttablet/tabletserver/tabletenv/seconds.go +++ /dev/null @@ -1,42 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package tabletenv - -import ( - "time" - - "github.com/spf13/pflag" -) - -// Seconds provides convenience functions for extracting -// duration from float64 seconds values. -type Seconds float64 - -// SecondsVar is like a flag.Float64Var, but it works for Seconds. -func SecondsVar(fs *pflag.FlagSet, p *Seconds, name string, value Seconds, usage string) { - fs.Float64Var((*float64)(p), name, float64(value), usage) -} - -// Get converts Seconds to time.Duration -func (s Seconds) Get() time.Duration { - return time.Duration(s * Seconds(1*time.Second)) -} - -// Set sets the value from time.Duration -func (s *Seconds) Set(d time.Duration) { - *s = Seconds(d) / Seconds(1*time.Second) -} diff --git a/go/vt/vttablet/tabletserver/tabletenv/seconds_test.go b/go/vt/vttablet/tabletserver/tabletenv/seconds_test.go deleted file mode 100644 index dc09a3f419f..00000000000 --- a/go/vt/vttablet/tabletserver/tabletenv/seconds_test.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package tabletenv - -import ( - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "vitess.io/vitess/go/yaml2" -) - -func TestSecondsYaml(t *testing.T) { - type testSecond struct { - Value Seconds `json:"value"` - } - - ts := testSecond{ - Value: 1, - } - gotBytes, err := yaml2.Marshal(&ts) - require.NoError(t, err) - wantBytes := "value: 1\n" - assert.Equal(t, wantBytes, string(gotBytes)) - - var gotts testSecond - err = yaml2.Unmarshal([]byte(wantBytes), &gotts) - require.NoError(t, err) - assert.Equal(t, ts, gotts) -} - -func TestSecondsGetSet(t *testing.T) { - var val Seconds - val.Set(2 * time.Second) - assert.Equal(t, Seconds(2), val) - assert.Equal(t, 2*time.Second, val.Get()) -} diff --git a/go/vt/vttablet/tabletserver/tx_engine.go b/go/vt/vttablet/tabletserver/tx_engine.go index b6e0e69b86d..d0ca4ec498c 100644 --- a/go/vt/vttablet/tabletserver/tx_engine.go +++ b/go/vt/vttablet/tabletserver/tx_engine.go @@ -116,20 +116,19 @@ func NewTxEngine(env tabletenv.Env, dxNotifier func()) *TxEngine { te.txPool = NewTxPool(env, limiter) // We initially allow twoPC (handles vttablet restarts). // We will disallow them for a few reasons - - // 1. when a new tablet is promoted if semi-sync is turned off. + // 1. When a new tablet is promoted if semi-sync is turned off. // 2. TabletControls have been set by a Resharding workflow. te.twopcAllowed = make([]bool, TwoPCAllowed_Len) for idx := range te.twopcAllowed { te.twopcAllowed[idx] = true } - te.twopcEnabled = config.TwoPCEnable - if te.twopcEnabled { - if config.TwoPCAbandonAge <= 0 { - log.Error("2PC abandon age not specified: Disabling 2PC") - te.twopcEnabled = false - } + te.twopcEnabled = true + if config.TwoPCAbandonAge <= 0 { + log.Error("2PC abandon age not specified: Disabling 2PC") + te.twopcEnabled = false } - te.abandonAge = config.TwoPCAbandonAge.Get() + + te.abandonAge = config.TwoPCAbandonAge te.ticks = timer.NewTimer(te.abandonAge / 2) // Set the prepared pool capacity to something lower than diff --git a/go/vt/vttablet/tabletserver/tx_engine_test.go b/go/vt/vttablet/tabletserver/tx_engine_test.go index f4dd0596691..cba7bf86e8f 100644 --- a/go/vt/vttablet/tabletserver/tx_engine_test.go +++ b/go/vt/vttablet/tabletserver/tx_engine_test.go @@ -613,8 +613,7 @@ func TestCheckReceivedError(t *testing.T) { cfg := tabletenv.NewDefaultConfig() cfg.DB = newDBConfigs(db) env := tabletenv.NewEnv(vtenv.NewTestEnv(), cfg, "TabletServerTest") - env.Config().TwoPCEnable = true - env.Config().TwoPCAbandonAge = 5 + env.Config().TwoPCAbandonAge = 5 * time.Second te := NewTxEngine(env, nil) te.AcceptReadWrite() @@ -791,8 +790,7 @@ func TestPrepareTx(t *testing.T) { db.AddRejectedQuery("retryable query", sqlerror.NewSQLError(sqlerror.CRConnectionError, "", "Retryable error")) cfg := tabletenv.NewDefaultConfig() cfg.DB = newDBConfigs(db) - cfg.TwoPCEnable = true - cfg.TwoPCAbandonAge = 200 + cfg.TwoPCAbandonAge = 200 * time.Second te := NewTxEngine(tabletenv.NewEnv(vtenv.NewTestEnv(), cfg, "TabletServerTest"), nil) te.AcceptReadWrite() db.ResetQueryLog() diff --git a/go/vt/vttablet/tabletserver/vstreamer/vstreamer.go b/go/vt/vttablet/tabletserver/vstreamer/vstreamer.go index ea7f75cdc38..59db723ff2b 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/vstreamer.go +++ b/go/vt/vttablet/tabletserver/vstreamer/vstreamer.go @@ -375,7 +375,7 @@ func (vs *vstreamer) parseEvents(ctx context.Context, events <-chan mysql.Binlog } return fmt.Errorf("unexpected server EOF") } - vevents, err := vs.parseEvent(ev) + vevents, err := vs.parseEvent(ev, bufferAndTransmit) if err != nil { vs.vse.errorCounts.Add("ParseEvent", 1) return err @@ -416,7 +416,11 @@ func (vs *vstreamer) parseEvents(ctx context.Context, events <-chan mysql.Binlog } // parseEvent parses an event from the binlog and converts it to a list of VEvents. -func (vs *vstreamer) parseEvent(ev mysql.BinlogEvent) ([]*binlogdatapb.VEvent, error) { +// The bufferAndTransmit function must be passed if the event is a TransactionPayloadEvent +// as for larger payloads (> ZstdInMemoryDecompressorMaxSize) the internal events need +// to be streamed directly here in order to avoid holding the entire payload's contents, +// which can be 10s or even 100s of GiBs, all in memory. +func (vs *vstreamer) parseEvent(ev mysql.BinlogEvent, bufferAndTransmit func(vevent *binlogdatapb.VEvent) error) ([]*binlogdatapb.VEvent, error) { if !ev.IsValid() { return nil, fmt.Errorf("can't parse binlog event: invalid data: %#v", ev) } @@ -672,11 +676,31 @@ func (vs *vstreamer) parseEvent(ev mysql.BinlogEvent) ([]*binlogdatapb.VEvent, e } return nil, err } - tpvevents, err := vs.parseEvent(tpevent) + tpvevents, err := vs.parseEvent(tpevent, nil) // Parse the internal event if err != nil { return nil, vterrors.Wrap(err, "failed to parse transaction payload's internal event") } - vevents = append(vevents, tpvevents...) + if tp.StreamingContents { + // Transmit each internal event individually to avoid buffering + // the large transaction's entire payload of events in memory, as + // the uncompressed size can be 10s or even 100s of GiBs in size. + if bufferAndTransmit == nil { + return nil, vterrors.New(vtrpcpb.Code_INTERNAL, "[bug] cannot stream compressed transaction payload's internal events as no bufferAndTransmit function was provided") + } + for _, tpvevent := range tpvevents { + tpvevent.Timestamp = int64(ev.Timestamp()) + tpvevent.CurrentTime = time.Now().UnixNano() + if err := bufferAndTransmit(tpvevent); err != nil { + if err == io.EOF { + return nil, nil + } + vs.vse.errorCounts.Add("TransactionPayloadBufferAndTransmit", 1) + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "error sending compressed transaction payload's internal event: %v", err) + } + } + } else { // Process the payload's internal events all at once + vevents = append(vevents, tpvevents...) + } } vs.vse.vstreamerCompressedTransactionsDecoded.Add(1) } diff --git a/go/vt/wrangler/materializer.go b/go/vt/wrangler/materializer.go index 7e24945cde7..bd7ae553130 100644 --- a/go/vt/wrangler/materializer.go +++ b/go/vt/wrangler/materializer.go @@ -551,11 +551,8 @@ func (wr *Wrangler) prepareCreateLookup(ctx context.Context, keyspace string, sp if len(vindexFromCols) != 1 { return nil, nil, nil, fmt.Errorf("unique vindex 'from' should have only one column: %v", vindex) } - } else { - if len(vindexFromCols) < 2 { - return nil, nil, nil, fmt.Errorf("non-unique vindex 'from' should have more than one column: %v", vindex) - } } + vindexToCol = vindex.Params["to"] // Make the vindex write_only. If one exists already in the vschema, // it will need to match this vindex exactly, including the write_only setting. diff --git a/go/vt/wrangler/materializer_test.go b/go/vt/wrangler/materializer_test.go index 1728ba6efc2..1871d778c6b 100644 --- a/go/vt/wrangler/materializer_test.go +++ b/go/vt/wrangler/materializer_test.go @@ -1599,7 +1599,7 @@ func TestCreateLookupVindexFailures(t *testing.T) { }, err: "unique vindex 'from' should have only one column", }, { - description: "non-unique lookup should have more than one column", + description: "non-unique lookup can have only one column", input: &vschemapb.Keyspace{ Vindexes: map[string]*vschemapb.Vindex{ "v": { @@ -1612,7 +1612,7 @@ func TestCreateLookupVindexFailures(t *testing.T) { }, }, }, - err: "non-unique vindex 'from' should have more than one column", + err: "", }, { description: "vindex not found", input: &vschemapb.Keyspace{ diff --git a/go/vt/wrangler/testlib/backup_test.go b/go/vt/wrangler/testlib/backup_test.go index cb61c4bab99..b540fc9f8f0 100644 --- a/go/vt/wrangler/testlib/backup_test.go +++ b/go/vt/wrangler/testlib/backup_test.go @@ -28,7 +28,6 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/mysql/capabilities" "vitess.io/vitess/go/mysql/fakesqldb" "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/sqltypes" @@ -36,6 +35,7 @@ import ( "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/mysqlctl" "vitess.io/vitess/go/vt/mysqlctl/backupstorage" + "vitess.io/vitess/go/vt/mysqlctl/blackbox" "vitess.io/vitess/go/vt/mysqlctl/filebackupstorage" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/topo/topoproto" @@ -132,7 +132,7 @@ func testBackupRestore(t *testing.T, cDetails *compressionDetails) error { require.NoError(t, os.MkdirAll(s, os.ModePerm)) } - needIt, err := needInnoDBRedoLogSubdir() + needIt, err := blackbox.NeedInnoDBRedoLogSubdir() require.NoError(t, err) if needIt { newPath := path.Join(sourceInnodbLogDir, mysql.DynamicRedoLogSubdir) @@ -371,7 +371,7 @@ func TestBackupRestoreLagged(t *testing.T) { } require.NoError(t, os.WriteFile(path.Join(sourceInnodbDataDir, "innodb_data_1"), []byte("innodb data 1 contents"), os.ModePerm)) - needIt, err := needInnoDBRedoLogSubdir() + needIt, err := blackbox.NeedInnoDBRedoLogSubdir() require.NoError(t, err) if needIt { newPath := path.Join(sourceInnodbLogDir, mysql.DynamicRedoLogSubdir) @@ -591,7 +591,7 @@ func TestRestoreUnreachablePrimary(t *testing.T) { } require.NoError(t, os.WriteFile(path.Join(sourceInnodbDataDir, "innodb_data_1"), []byte("innodb data 1 contents"), os.ModePerm)) - needIt, err := needInnoDBRedoLogSubdir() + needIt, err := blackbox.NeedInnoDBRedoLogSubdir() require.NoError(t, err) if needIt { newPath := path.Join(sourceInnodbLogDir, mysql.DynamicRedoLogSubdir) @@ -767,7 +767,7 @@ func TestDisableActiveReparents(t *testing.T) { } require.NoError(t, os.WriteFile(path.Join(sourceInnodbDataDir, "innodb_data_1"), []byte("innodb data 1 contents"), os.ModePerm)) - needIt, err := needInnoDBRedoLogSubdir() + needIt, err := blackbox.NeedInnoDBRedoLogSubdir() require.NoError(t, err) if needIt { newPath := path.Join(sourceInnodbLogDir, mysql.DynamicRedoLogSubdir) @@ -877,25 +877,3 @@ func TestDisableActiveReparents(t *testing.T) { assert.False(t, destTablet.FakeMysqlDaemon.Replicating) assert.True(t, destTablet.FakeMysqlDaemon.Running) } - -// needInnoDBRedoLogSubdir indicates whether we need to create a redo log subdirectory. -// Starting with MySQL 8.0.30, the InnoDB redo logs are stored in a subdirectory of the -// (/. by default) called "#innodb_redo". See: -// -// https://dev.mysql.com/doc/refman/8.0/en/innodb-redo-log.html#innodb-modifying-redo-log-capacity -func needInnoDBRedoLogSubdir() (needIt bool, err error) { - mysqldVersionStr, err := mysqlctl.GetVersionString() - if err != nil { - return needIt, err - } - _, sv, err := mysqlctl.ParseVersionString(mysqldVersionStr) - if err != nil { - return needIt, err - } - versionStr := fmt.Sprintf("%d.%d.%d", sv.Major, sv.Minor, sv.Patch) - capableOf := mysql.ServerVersionCapableOf(versionStr) - if capableOf == nil { - return needIt, fmt.Errorf("cannot determine database flavor details for version %s", versionStr) - } - return capableOf(capabilities.DynamicRedoLogCapacityFlavorCapability) -} diff --git a/proto/replicationdata.proto b/proto/replicationdata.proto index 76ca3e02103..e788fc64bc3 100644 --- a/proto/replicationdata.proto +++ b/proto/replicationdata.proto @@ -66,7 +66,6 @@ message Configuration { message StopReplicationStatus { replicationdata.Status before = 1; replicationdata.Status after = 2; - bool backup_running = 3; } // StopReplicationMode is used to provide controls over how replication is stopped. diff --git a/proto/tabletmanagerdata.proto b/proto/tabletmanagerdata.proto index 9bb60184267..bb20e712e7f 100644 --- a/proto/tabletmanagerdata.proto +++ b/proto/tabletmanagerdata.proto @@ -360,7 +360,6 @@ message ReplicationStatusRequest { message ReplicationStatusResponse { replicationdata.Status status = 1; - bool backup_running = 2; } message PrimaryStatusRequest { @@ -549,7 +548,6 @@ message StopReplicationAndGetStatusResponse { // Status represents the replication status call right before, and right after telling the replica to stop. replicationdata.StopReplicationStatus status = 2; - bool backup_running = 3; } message PromoteReplicaRequest { diff --git a/test.go b/test.go index 14f51a06e9d..95d62af892f 100755 --- a/test.go +++ b/test.go @@ -77,7 +77,7 @@ For example: // Flags var ( flavor = flag.String("flavor", "mysql80", "comma-separated bootstrap flavor(s) to run against (when using Docker mode). Available flavors: all,"+flavors) - bootstrapVersion = flag.String("bootstrap-version", "38", "the version identifier to use for the docker images") + bootstrapVersion = flag.String("bootstrap-version", "39", "the version identifier to use for the docker images") runCount = flag.Int("runs", 1, "run each test this many times") retryMax = flag.Int("retry", 3, "max number of retries, to detect flaky tests") logPass = flag.Bool("log-pass", false, "log test output even if it passes") diff --git a/test/ci_workflow_gen.go b/test/ci_workflow_gen.go index 7956c491408..b24db1154fb 100644 --- a/test/ci_workflow_gen.go +++ b/test/ci_workflow_gen.go @@ -103,6 +103,7 @@ var ( "vtgate_vindex_heavy", "vtgate_vschema", "vtgate_queries", + "vtgate_plantests", "vtgate_schema_tracker", "vtgate_foreignkey_stress", "vtorc", @@ -157,6 +158,9 @@ var ( "vreplication_migrate", "vreplication_vtctldclient_vdiff2_movetables_tz", } + clusterRequiringMinio = []string{ + "21", + } ) type unitTest struct { @@ -174,6 +178,7 @@ type clusterTest struct { EnableBinlogTransactionCompression bool PartialKeyspace bool Cores16 bool + NeedsMinio bool } type vitessTesterTest struct { @@ -286,6 +291,13 @@ func generateClusterWorkflows(list []string, tpl string) { break } } + minioClusters := canonnizeList(clusterRequiringMinio) + for _, minioCluster := range minioClusters { + if minioCluster == cluster { + test.NeedsMinio = true + break + } + } if mysqlVersion == mysql57 { test.Platform = string(mysql57) } diff --git a/test/config.json b/test/config.json index 1e278546c7a..da0026f0125 100644 --- a/test/config.json +++ b/test/config.json @@ -136,6 +136,15 @@ "RetryMax": 1, "Tags": [] }, + "backup_s3": { + "File": "unused.go", + "Args": ["vitess.io/vitess/go/test/endtoend/backup/s3", "-timeout", "30m"], + "Command": [], + "Manual": false, + "Shard": "21", + "RetryMax": 1, + "Tags": [] + }, "backup_only": { "File": "unused.go", "Args": ["vitess.io/vitess/go/test/endtoend/backup/vtbackup", "-timeout", "20m"], @@ -340,17 +349,6 @@ "RetryMax": 1, "Tags": [] }, - "pitr": { - "File": "unused.go", - "Args": ["vitess.io/vitess/go/test/endtoend/recovery/pitr"], - "Command": [], - "Manual": false, - "Shard": "10", - "RetryMax": 1, - "Tags": [ - "site_test" - ] - }, "recovery": { "File": "unused.go", "Args": ["vitess.io/vitess/go/test/endtoend/recovery/unshardedrecovery"], @@ -887,6 +885,15 @@ "RetryMax": 1, "Tags": [] }, + "vtgate_plantests": { + "File": "unused.go", + "Args": ["vitess.io/vitess/go/test/endtoend/vtgate/plan_tests"], + "Command": [], + "Manual": false, + "Shard": "vtgate_plantests", + "RetryMax": 1, + "Tags": [] + }, "vtgate_unsharded": { "File": "unused.go", "Args": ["vitess.io/vitess/go/test/endtoend/vtgate/unsharded"], @@ -1384,6 +1391,15 @@ "RetryMax": 1, "Tags": [] }, + "loopkup_index": { + "File": "unused.go", + "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "TestLookupIndex"], + "Command": [], + "Manual": false, + "Shard": "vreplication_vtctldclient_vdiff2_movetables_tz", + "RetryMax": 1, + "Tags": [] + }, "vtadmin": { "File": "unused.go", "Args": ["vitess.io/vitess/go/test/endtoend/vtadmin"], diff --git a/test/templates/cluster_endtoend_test.tpl b/test/templates/cluster_endtoend_test.tpl index 6fe58fae361..8d0a2f650b5 100644 --- a/test/templates/cluster_endtoend_test.tpl +++ b/test/templates/cluster_endtoend_test.tpl @@ -157,6 +157,15 @@ jobs: {{end}} + {{if .NeedsMinio }} + - name: Install Minio + if: steps.skip-workflow.outputs.skip-workflow == 'false' + run: | + wget https://dl.min.io/server/minio/release/linux-amd64/minio + chmod +x minio + mv minio /usr/local/bin + {{end}} + {{if .MakeTools}} - name: Installing zookeeper and consul diff --git a/test/templates/dockerfile.tpl b/test/templates/dockerfile.tpl index 82388850947..af4376d3ca9 100644 --- a/test/templates/dockerfile.tpl +++ b/test/templates/dockerfile.tpl @@ -1,4 +1,4 @@ -ARG bootstrap_version=38 +ARG bootstrap_version=39 ARG image="vitess/bootstrap:${bootstrap_version}-{{.Platform}}" FROM "${image}" diff --git a/web/vtadmin/package-lock.json b/web/vtadmin/package-lock.json index 6004278321a..8ad7c67a5b4 100644 --- a/web/vtadmin/package-lock.json +++ b/web/vtadmin/package-lock.json @@ -10863,15 +10863,16 @@ } }, "node_modules/nanoid": { - "version": "3.3.7", - "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.7.tgz", - "integrity": "sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g==", + "version": "3.3.8", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.8.tgz", + "integrity": "sha512-WNLf5Sd8oZxOm+TzppcYk8gVOgP+l58xNy58D0nbUnOxOWRWvlcCV4kUF7ltmI6PsrLl/BgKEyS4mqsGChFN0w==", "funding": [ { "type": "github", "url": "https://github.com/sponsors/ai" } ], + "license": "MIT", "bin": { "nanoid": "bin/nanoid.cjs" }, diff --git a/web/vtadmin/src/proto/vtadmin.d.ts b/web/vtadmin/src/proto/vtadmin.d.ts index 410aaa644ff..7b921fc034a 100644 --- a/web/vtadmin/src/proto/vtadmin.d.ts +++ b/web/vtadmin/src/proto/vtadmin.d.ts @@ -26590,9 +26590,6 @@ export namespace tabletmanagerdata { /** ReplicationStatusResponse status */ status?: (replicationdata.IStatus|null); - - /** ReplicationStatusResponse backup_running */ - backup_running?: (boolean|null); } /** Represents a ReplicationStatusResponse. */ @@ -26607,9 +26604,6 @@ export namespace tabletmanagerdata { /** ReplicationStatusResponse status. */ public status?: (replicationdata.IStatus|null); - /** ReplicationStatusResponse backup_running. */ - public backup_running: boolean; - /** * Creates a new ReplicationStatusResponse instance using the specified properties. * @param [properties] Properties to set @@ -31004,9 +30998,6 @@ export namespace tabletmanagerdata { /** StopReplicationAndGetStatusResponse status */ status?: (replicationdata.IStopReplicationStatus|null); - - /** StopReplicationAndGetStatusResponse backup_running */ - backup_running?: (boolean|null); } /** Represents a StopReplicationAndGetStatusResponse. */ @@ -31021,9 +31012,6 @@ export namespace tabletmanagerdata { /** StopReplicationAndGetStatusResponse status. */ public status?: (replicationdata.IStopReplicationStatus|null); - /** StopReplicationAndGetStatusResponse backup_running. */ - public backup_running: boolean; - /** * Creates a new StopReplicationAndGetStatusResponse instance using the specified properties. * @param [properties] Properties to set @@ -48219,9 +48207,6 @@ export namespace replicationdata { /** StopReplicationStatus after */ after?: (replicationdata.IStatus|null); - - /** StopReplicationStatus backup_running */ - backup_running?: (boolean|null); } /** Represents a StopReplicationStatus. */ @@ -48239,9 +48224,6 @@ export namespace replicationdata { /** StopReplicationStatus after. */ public after?: (replicationdata.IStatus|null); - /** StopReplicationStatus backup_running. */ - public backup_running: boolean; - /** * Creates a new StopReplicationStatus instance using the specified properties. * @param [properties] Properties to set diff --git a/web/vtadmin/src/proto/vtadmin.js b/web/vtadmin/src/proto/vtadmin.js index b8ab0c1186a..e5bcf766b40 100644 --- a/web/vtadmin/src/proto/vtadmin.js +++ b/web/vtadmin/src/proto/vtadmin.js @@ -61392,7 +61392,6 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { * @memberof tabletmanagerdata * @interface IReplicationStatusResponse * @property {replicationdata.IStatus|null} [status] ReplicationStatusResponse status - * @property {boolean|null} [backup_running] ReplicationStatusResponse backup_running */ /** @@ -61418,14 +61417,6 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { */ ReplicationStatusResponse.prototype.status = null; - /** - * ReplicationStatusResponse backup_running. - * @member {boolean} backup_running - * @memberof tabletmanagerdata.ReplicationStatusResponse - * @instance - */ - ReplicationStatusResponse.prototype.backup_running = false; - /** * Creates a new ReplicationStatusResponse instance using the specified properties. * @function create @@ -61452,8 +61443,6 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { writer = $Writer.create(); if (message.status != null && Object.hasOwnProperty.call(message, "status")) $root.replicationdata.Status.encode(message.status, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.backup_running != null && Object.hasOwnProperty.call(message, "backup_running")) - writer.uint32(/* id 2, wireType 0 =*/16).bool(message.backup_running); return writer; }; @@ -61492,10 +61481,6 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { message.status = $root.replicationdata.Status.decode(reader, reader.uint32()); break; } - case 2: { - message.backup_running = reader.bool(); - break; - } default: reader.skipType(tag & 7); break; @@ -61536,9 +61521,6 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { if (error) return "status." + error; } - if (message.backup_running != null && message.hasOwnProperty("backup_running")) - if (typeof message.backup_running !== "boolean") - return "backup_running: boolean expected"; return null; }; @@ -61559,8 +61541,6 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { throw TypeError(".tabletmanagerdata.ReplicationStatusResponse.status: object expected"); message.status = $root.replicationdata.Status.fromObject(object.status); } - if (object.backup_running != null) - message.backup_running = Boolean(object.backup_running); return message; }; @@ -61577,14 +61557,10 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { if (!options) options = {}; let object = {}; - if (options.defaults) { + if (options.defaults) object.status = null; - object.backup_running = false; - } if (message.status != null && message.hasOwnProperty("status")) object.status = $root.replicationdata.Status.toObject(message.status, options); - if (message.backup_running != null && message.hasOwnProperty("backup_running")) - object.backup_running = message.backup_running; return object; }; @@ -70588,7 +70564,6 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { * @memberof tabletmanagerdata * @interface IStopReplicationAndGetStatusResponse * @property {replicationdata.IStopReplicationStatus|null} [status] StopReplicationAndGetStatusResponse status - * @property {boolean|null} [backup_running] StopReplicationAndGetStatusResponse backup_running */ /** @@ -70614,14 +70589,6 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { */ StopReplicationAndGetStatusResponse.prototype.status = null; - /** - * StopReplicationAndGetStatusResponse backup_running. - * @member {boolean} backup_running - * @memberof tabletmanagerdata.StopReplicationAndGetStatusResponse - * @instance - */ - StopReplicationAndGetStatusResponse.prototype.backup_running = false; - /** * Creates a new StopReplicationAndGetStatusResponse instance using the specified properties. * @function create @@ -70648,8 +70615,6 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { writer = $Writer.create(); if (message.status != null && Object.hasOwnProperty.call(message, "status")) $root.replicationdata.StopReplicationStatus.encode(message.status, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); - if (message.backup_running != null && Object.hasOwnProperty.call(message, "backup_running")) - writer.uint32(/* id 3, wireType 0 =*/24).bool(message.backup_running); return writer; }; @@ -70688,10 +70653,6 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { message.status = $root.replicationdata.StopReplicationStatus.decode(reader, reader.uint32()); break; } - case 3: { - message.backup_running = reader.bool(); - break; - } default: reader.skipType(tag & 7); break; @@ -70732,9 +70693,6 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { if (error) return "status." + error; } - if (message.backup_running != null && message.hasOwnProperty("backup_running")) - if (typeof message.backup_running !== "boolean") - return "backup_running: boolean expected"; return null; }; @@ -70755,8 +70713,6 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { throw TypeError(".tabletmanagerdata.StopReplicationAndGetStatusResponse.status: object expected"); message.status = $root.replicationdata.StopReplicationStatus.fromObject(object.status); } - if (object.backup_running != null) - message.backup_running = Boolean(object.backup_running); return message; }; @@ -70773,14 +70729,10 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { if (!options) options = {}; let object = {}; - if (options.defaults) { + if (options.defaults) object.status = null; - object.backup_running = false; - } if (message.status != null && message.hasOwnProperty("status")) object.status = $root.replicationdata.StopReplicationStatus.toObject(message.status, options); - if (message.backup_running != null && message.hasOwnProperty("backup_running")) - object.backup_running = message.backup_running; return object; }; @@ -117256,7 +117208,6 @@ export const replicationdata = $root.replicationdata = (() => { * @interface IStopReplicationStatus * @property {replicationdata.IStatus|null} [before] StopReplicationStatus before * @property {replicationdata.IStatus|null} [after] StopReplicationStatus after - * @property {boolean|null} [backup_running] StopReplicationStatus backup_running */ /** @@ -117290,14 +117241,6 @@ export const replicationdata = $root.replicationdata = (() => { */ StopReplicationStatus.prototype.after = null; - /** - * StopReplicationStatus backup_running. - * @member {boolean} backup_running - * @memberof replicationdata.StopReplicationStatus - * @instance - */ - StopReplicationStatus.prototype.backup_running = false; - /** * Creates a new StopReplicationStatus instance using the specified properties. * @function create @@ -117326,8 +117269,6 @@ export const replicationdata = $root.replicationdata = (() => { $root.replicationdata.Status.encode(message.before, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); if (message.after != null && Object.hasOwnProperty.call(message, "after")) $root.replicationdata.Status.encode(message.after, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); - if (message.backup_running != null && Object.hasOwnProperty.call(message, "backup_running")) - writer.uint32(/* id 3, wireType 0 =*/24).bool(message.backup_running); return writer; }; @@ -117370,10 +117311,6 @@ export const replicationdata = $root.replicationdata = (() => { message.after = $root.replicationdata.Status.decode(reader, reader.uint32()); break; } - case 3: { - message.backup_running = reader.bool(); - break; - } default: reader.skipType(tag & 7); break; @@ -117419,9 +117356,6 @@ export const replicationdata = $root.replicationdata = (() => { if (error) return "after." + error; } - if (message.backup_running != null && message.hasOwnProperty("backup_running")) - if (typeof message.backup_running !== "boolean") - return "backup_running: boolean expected"; return null; }; @@ -117447,8 +117381,6 @@ export const replicationdata = $root.replicationdata = (() => { throw TypeError(".replicationdata.StopReplicationStatus.after: object expected"); message.after = $root.replicationdata.Status.fromObject(object.after); } - if (object.backup_running != null) - message.backup_running = Boolean(object.backup_running); return message; }; @@ -117468,14 +117400,11 @@ export const replicationdata = $root.replicationdata = (() => { if (options.defaults) { object.before = null; object.after = null; - object.backup_running = false; } if (message.before != null && message.hasOwnProperty("before")) object.before = $root.replicationdata.Status.toObject(message.before, options); if (message.after != null && message.hasOwnProperty("after")) object.after = $root.replicationdata.Status.toObject(message.after, options); - if (message.backup_running != null && message.hasOwnProperty("backup_running")) - object.backup_running = message.backup_running; return object; };