From b4db9b2ef545640c9b9b160ecde36bed6cbd47bd Mon Sep 17 00:00:00 2001 From: Matias S <83959431+mativm02@users.noreply.github.com> Date: Fri, 9 Sep 2022 18:52:48 +0200 Subject: [PATCH 001/102] TT-3514 - Adding CosmosDB support (#481) * adding cosmosDB support + unit tests * adding unit tests * adding switch statement and DBs errors as constants * adding info about different mongo instances in docs * adding Indexes field to unit tests * refactoring code based on golangci-lint * refactoring code based on golangci-lint --- README.md | 3 ++- pumps/mongo.go | 15 ++++++++++++--- pumps/mongo_aggregate.go | 19 +++++++++++-------- pumps/mongo_selective.go | 19 +++++++++++-------- pumps/mongo_test.go | 39 ++++++++++++++++++++++++++++++--------- 5 files changed, 66 insertions(+), 29 deletions(-) diff --git a/README.md b/README.md index d8bb189b8..e4a689b52 100644 --- a/README.md +++ b/README.md @@ -298,7 +298,8 @@ TYK_PMP_PUMPS_STATSD_META_SEPARATEDMETHOD=false ## Mongo & Tyk Dashboard. There are 3 mongo pumps. You may use one or multiple depending on the data you want. -The Tyk Dashboard uses various Mongo collections to store and visualize API traffic analytics. Please visit [this link](https://tyk.io/docs/tyk-pump/tyk-pump-configuration/tyk-pump-dashboard-config/) for steps on configuration. +The Tyk Dashboard uses various Mongo collections to store and visualize API traffic analytics. Please visit [this link](https://tyk.io/docs/tyk-pump/tyk-pump-configuration/tyk-pump-dashboard-config/) for steps on configuration. +Available Mongo instances are: Standard Mongo, DocumentDB (AWS), CosmosDB (Azure). All of them using the same configuration (CosmosDB does not support "expireAt" index, so it will be skipped) ###### JSON / Conf File diff --git a/pumps/mongo.go b/pumps/mongo.go index 2b1f168bc..79aef9826 100644 --- a/pumps/mongo.go +++ b/pumps/mongo.go @@ -49,6 +49,12 @@ type MongoType int const ( StandardMongo MongoType = iota AWSDocumentDB + CosmosDB +) + +const ( + AWSDBError = 303 + CosmosDBError = 115 ) type BaseMongoConf struct { @@ -68,7 +74,7 @@ type BaseMongoConf struct { // Path to the PEM file which contains both client certificate and private key. This is // required for Mutual TLS. MongoSSLPEMKeyfile string `json:"mongo_ssl_pem_keyfile" mapstructure:"mongo_ssl_pem_keyfile"` - // Specifies the mongo DB Type. If it's 0, it means that you are using standard mongo db, but if it's 1 it means you are using AWS Document DB. + // Specifies the mongo DB Type. If it's 0, it means that you are using standard mongo db, if it's 1 it means you are using AWS Document DB, if it's 2, it means you are using CosmosDB. // Defaults to Standard mongo (0). MongoDBType MongoType `json:"mongo_db_type" mapstructure:"mongo_db_type"` // Set to true to disable the default tyk index creation. @@ -162,9 +168,12 @@ func mongoType(session *mgo.Session) MongoType { } session.Run("features", &result) - if result.Code == 303 { + switch result.Code { + case AWSDBError: return AWSDocumentDB - } else { + case CosmosDBError: + return CosmosDB + default: return StandardMongo } } diff --git a/pumps/mongo_aggregate.go b/pumps/mongo_aggregate.go index 95640a7f3..683251eaa 100644 --- a/pumps/mongo_aggregate.go +++ b/pumps/mongo_aggregate.go @@ -221,15 +221,18 @@ func (m *MongoAggregatePump) ensureIndexes(c *mgo.Collection) error { } var err error - ttlIndex := mgo.Index{ - Key: []string{"expireAt"}, - ExpireAfter: 0, - Background: m.dbConf.MongoDBType == StandardMongo, - } + // CosmosDB does not support "expireAt" option + if m.dbConf.MongoDBType != CosmosDB { + ttlIndex := mgo.Index{ + Key: []string{"expireAt"}, + ExpireAfter: 0, + Background: m.dbConf.MongoDBType == StandardMongo, + } - err = mgohacks.EnsureTTLIndex(c, ttlIndex) - if err != nil { - return err + err = mgohacks.EnsureTTLIndex(c, ttlIndex) + if err != nil { + return err + } } apiIndex := mgo.Index{ diff --git a/pumps/mongo_selective.go b/pumps/mongo_selective.go index fbb6fa544..616c1890f 100644 --- a/pumps/mongo_selective.go +++ b/pumps/mongo_selective.go @@ -137,15 +137,18 @@ func (m *MongoSelectivePump) ensureIndexes(c *mgo.Collection) error { } var err error - ttlIndex := mgo.Index{ - Key: []string{"expireAt"}, - ExpireAfter: 0, - Background: m.dbConf.MongoDBType == StandardMongo, - } + // CosmosDB does not support "expireAt" option + if m.dbConf.MongoDBType != CosmosDB { + ttlIndex := mgo.Index{ + Key: []string{"expireAt"}, + ExpireAfter: 0, + Background: m.dbConf.MongoDBType == StandardMongo, + } - err = mgohacks.EnsureTTLIndex(c, ttlIndex) - if err != nil { - return err + err = mgohacks.EnsureTTLIndex(c, ttlIndex) + if err != nil { + return err + } } apiIndex := mgo.Index{ diff --git a/pumps/mongo_test.go b/pumps/mongo_test.go index 08792c267..26f761097 100644 --- a/pumps/mongo_test.go +++ b/pumps/mongo_test.go @@ -55,52 +55,73 @@ func TestMongoPumpOmitIndexCreation(t *testing.T) { tcs := []struct { testName string shouldDropCollection bool - Indexes int + ExpectedIndexes int OmitIndexCreation bool dbType MongoType }{ { testName: "omitting index creation - StandardMongo", shouldDropCollection: true, - Indexes: 1, //1 index corresponding to _id + ExpectedIndexes: 1, // 1 index corresponding to _id OmitIndexCreation: true, dbType: StandardMongo, }, { testName: "not omitting index creation but mongo collection already exists - StandardMongo", shouldDropCollection: false, - Indexes: 1, //1 index corresponding to _id + ExpectedIndexes: 1, // 1 index corresponding to _id OmitIndexCreation: false, dbType: StandardMongo, }, { testName: "not omitting index creation but mongo collection doesn't exists - StandardMongo", shouldDropCollection: true, - Indexes: 4, //1 index corresponding to _id + 3 from tyk + ExpectedIndexes: 4, // 1 index corresponding to _id + 3 from tyk OmitIndexCreation: false, dbType: StandardMongo, }, { testName: "omitting index creation - DocDB", shouldDropCollection: true, - Indexes: 1, //1 index corresponding to _id + ExpectedIndexes: 1, // 1 index corresponding to _id OmitIndexCreation: true, dbType: AWSDocumentDB, }, { testName: "not omitting index creation but mongo collection already exists - DocDB", shouldDropCollection: false, - Indexes: 4, //1 index corresponding to _id + 3 from tyk + ExpectedIndexes: 4, // 1 index corresponding to _id + 3 from tyk OmitIndexCreation: false, dbType: AWSDocumentDB, }, { testName: "not omitting index creation but mongo collection doesn't exists - DocDB", shouldDropCollection: true, - Indexes: 4, //1 index corresponding to _id + 3 from tyk + ExpectedIndexes: 4, // 1 index corresponding to _id + 3 from tyk OmitIndexCreation: false, dbType: AWSDocumentDB, }, + { + testName: "omitting index creation - CosmosDB", + shouldDropCollection: true, + ExpectedIndexes: 1, // 1 index corresponding to _id + OmitIndexCreation: true, + dbType: CosmosDB, + }, + { + testName: "not omitting index creation but mongo collection already exists - CosmosDB", + shouldDropCollection: false, + ExpectedIndexes: 4, // 1 index corresponding to _id + 3 from tyk + OmitIndexCreation: false, + dbType: CosmosDB, + }, + { + testName: "not omitting index creation but mongo collection doesn't exists - CosmosDB", + shouldDropCollection: true, + ExpectedIndexes: 4, // 1 index corresponding to _id + 3 from tyk + OmitIndexCreation: false, + dbType: CosmosDB, + }, } for _, tc := range tcs { @@ -125,8 +146,8 @@ func TestMongoPumpOmitIndexCreation(t *testing.T) { t.Error("error getting indexes:", errIndexes) } - if len(indexes) != tc.Indexes { - t.Errorf("wanted %v index but got %v indexes", tc.Indexes, len(indexes)) + if len(indexes) != tc.ExpectedIndexes { + t.Errorf("wanted %v index but got %v indexes", tc.ExpectedIndexes, len(indexes)) } }) } From fac0819107b34c92e56d0423e8c5b90feaed6e8d Mon Sep 17 00:00:00 2001 From: Mladen Kolavcic <45770178+kolavcic@users.noreply.github.com> Date: Mon, 12 Sep 2022 11:17:58 +0200 Subject: [PATCH 002/102] Bump version to v1.7 (#487) * Bump version to v1.7 * Update pumps/version.go Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: Tomas Buchaillot Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- pumps/version.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/pumps/version.go b/pumps/version.go index 7ac2ae97f..26c4f5c1f 100644 --- a/pumps/version.go +++ b/pumps/version.go @@ -1,4 +1,6 @@ package pumps -var VERSION = "v1.5.1" -var builtBy, Commit, buildDate string +var ( + VERSION = "v1.7" + builtBy, Commit, buildDate string +) From bc27ee0aa465d87dc27d95bc8e0ab89b4575d327 Mon Sep 17 00:00:00 2001 From: Esteban Ricardo Mirizio <35462288+ermirizio@users.noreply.github.com> Date: Tue, 11 Oct 2022 10:14:03 -0300 Subject: [PATCH 003/102] Releng sync (#495) * Releng sync Co-authored-by: Gromit --- .github/dependabot.yml | 4 ++- .github/workflows/del-env.yml | 2 +- .github/workflows/release.yml | 56 ++++++++++++++++---------------- ci/Dockerfile.std | 2 +- ci/aws/byol.pkr.hcl | 2 +- ci/goreleaser/goreleaser-el7.yml | 3 +- ci/goreleaser/goreleaser.yml | 3 +- ci/install/before_install.sh | 2 +- ci/install/post_install.sh | 2 +- ci/install/post_remove.sh | 2 +- ci/install/post_trans.sh | 2 +- ci/terraform/outputs.tf | 2 +- 12 files changed, 41 insertions(+), 41 deletions(-) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 7408ef51e..04c636919 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -1,6 +1,6 @@ # Generated by: gromit policy -# Generated on: Mon Aug 8 03:03:25 UTC 2022 +# Generated on: Mon Oct 10 21:30:09 UTC 2022 version: 2 updates: @@ -9,6 +9,8 @@ updates: directory: "/" schedule: interval: "weekly" + reviewers: + - "TykTechnologies/devops" - package-ecosystem: "gomod" # Look for `go.mod` file in the `root` directory diff --git a/.github/workflows/del-env.yml b/.github/workflows/del-env.yml index adc7bcc54..b7f57244b 100644 --- a/.github/workflows/del-env.yml +++ b/.github/workflows/del-env.yml @@ -1,6 +1,6 @@ # Generated by: gromit policy -# Generated on: Mon Aug 8 03:03:25 UTC 2022 +# Generated on: Mon Oct 10 21:30:09 UTC 2022 name: Retiring dev env diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 769bcb9c1..9db6234d9 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -1,6 +1,6 @@ # Generated by: gromit policy -# Generated on: Mon Aug 8 03:03:25 UTC 2022 +# Generated on: Mon Oct 10 21:30:09 UTC 2022 # Distribution channels covered by this workflow @@ -55,23 +55,23 @@ jobs: git config --global url."https://${TOKEN}@github.com".insteadOf "https://github.com" - name: Checkout of tyk-pump - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: fetch-depth: 1 - - uses: docker/setup-qemu-action@v1 + - uses: docker/setup-qemu-action@v2 - - uses: docker/setup-buildx-action@v1 + - uses: docker/setup-buildx-action@v2 - name: Login to DockerHub if: startsWith(github.ref, 'refs/tags') - uses: docker/login-action@v1 + uses: docker/login-action@v2 with: username: ${{ secrets.DOCKER_USERNAME }} password: ${{ secrets.DOCKER_PASSWORD }} - name: Login to Cloudsmith if: startsWith(github.ref, 'refs/tags') - uses: docker/login-action@v1 + uses: docker/login-action@v2 with: registry: docker.tyk.io username: ${{ secrets.CLOUDSMITH_USERNAME }} @@ -104,7 +104,7 @@ jobs: *.txt.sig *.txt - - uses: goreleaser/goreleaser-action@v2 + - uses: goreleaser/goreleaser-action@v3 with: version: latest args: release --rm-dist -f ${{ matrix.goreleaser }} @@ -120,7 +120,7 @@ jobs: RPMVERS: ${{ matrix.rpmvers }} PACKAGECLOUD_TOKEN: ${{ secrets.PACKAGECLOUD_TOKEN }} - - uses: actions/upload-artifact@v2 + - uses: actions/upload-artifact@v3 with: name: deb retention-days: 1 @@ -128,7 +128,7 @@ jobs: dist/*.deb !dist/*PAYG*.deb - - uses: actions/upload-artifact@v2 + - uses: actions/upload-artifact@v3 with: name: rpm retention-days: 1 @@ -142,11 +142,11 @@ jobs: steps: - name: Shallow checkout of tyk-pump - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: fetch-depth: 1 - name: Setup Terraform - uses: hashicorp/setup-terraform@v1 + uses: hashicorp/setup-terraform@v2 with: cli_config_credentials_token: ${{ secrets.TF_API_TOKEN }} terraform_wrapper: false @@ -175,16 +175,16 @@ jobs: id: login-ecr uses: aws-actions/amazon-ecr-login@v1 - - uses: actions/download-artifact@v2 + - uses: actions/download-artifact@v3 with: name: deb - - uses: docker/setup-qemu-action@v1 + - uses: docker/setup-qemu-action@v2 - - uses: docker/setup-buildx-action@v1 + - uses: docker/setup-buildx-action@v2 - name: CI build - uses: docker/build-push-action@v2 + uses: docker/build-push-action@v3 with: push: true context: "." @@ -212,7 +212,7 @@ jobs: --author 'Bender' \ --author-icon 'https://hcoop.net/~alephnull/bender/bender-arms.jpg' \ --author-link 'https://github.com/TykTechnologies/tyk-ci' \ - --channel '#integration' \ + --channel '#service-integration' \ --color $colour \ --fields '{"title": "Repo", "value": "${{ github.repository }}", "short": false}' \ --footer 'github-actions' \ @@ -239,17 +239,17 @@ jobs: - debian:bullseye steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 with: fetch-depth: 1 - - uses: actions/download-artifact@v2 + - uses: actions/download-artifact@v3 with: name: deb - - uses: docker/setup-qemu-action@v1 + - uses: docker/setup-qemu-action@v2 - - uses: docker/setup-buildx-action@v1 + - uses: docker/setup-buildx-action@v2 - name: generate dockerfile run: | @@ -262,7 +262,7 @@ jobs: ' > Dockerfile - name: install on ${{ matrix.distro }} - uses: docker/build-push-action@v2 + uses: docker/build-push-action@v3 with: context: "." platforms: linux/${{ matrix.arch }} @@ -280,15 +280,15 @@ jobs: - ubi8/ubi steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 with: fetch-depth: 1 - - uses: actions/download-artifact@v2 + - uses: actions/download-artifact@v3 with: name: rpm - - uses: docker/setup-buildx-action@v1 + - uses: docker/setup-buildx-action@v2 - name: generate dockerfile run: | @@ -302,7 +302,7 @@ jobs: ' > Dockerfile - name: install on ${{ matrix.distro }} - uses: docker/build-push-action@v2 + uses: docker/build-push-action@v3 with: context: "." file: Dockerfile @@ -315,7 +315,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 with: fetch-depth: 1 @@ -363,11 +363,11 @@ jobs: steps: - name: Checkout tyk-pump - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: fetch-depth: 1 - - uses: actions/download-artifact@v2 + - uses: actions/download-artifact@v3 with: name: rpm path: aws diff --git a/ci/Dockerfile.std b/ci/Dockerfile.std index be28c0d02..ca2e28612 100644 --- a/ci/Dockerfile.std +++ b/ci/Dockerfile.std @@ -1,6 +1,6 @@ # Generated by: gromit policy -# Generated on: Mon Aug 8 03:03:25 UTC 2022 +# Generated on: Mon Oct 10 21:30:09 UTC 2022 FROM debian:bullseye-slim ARG TARGETARCH diff --git a/ci/aws/byol.pkr.hcl b/ci/aws/byol.pkr.hcl index 0d237bb05..e54a55435 100644 --- a/ci/aws/byol.pkr.hcl +++ b/ci/aws/byol.pkr.hcl @@ -1,6 +1,6 @@ # Generated by: gromit policy -# Generated on: Mon Aug 8 03:03:25 UTC 2022 +# Generated on: Mon Oct 10 21:30:09 UTC 2022 packer { required_plugins { diff --git a/ci/goreleaser/goreleaser-el7.yml b/ci/goreleaser/goreleaser-el7.yml index b7dae67c3..edae1c741 100644 --- a/ci/goreleaser/goreleaser-el7.yml +++ b/ci/goreleaser/goreleaser-el7.yml @@ -1,8 +1,7 @@ # Generated by: gromit policy -# Generated on: Mon Aug 8 03:03:25 UTC 2022 +# Generated on: Mon Oct 10 21:30:09 UTC 2022 # Check the documentation at http://goreleaser.com # This project needs CGO_ENABLED=1 and the cross-compiler toolchains for # - arm64 # - amd64 - diff --git a/ci/goreleaser/goreleaser.yml b/ci/goreleaser/goreleaser.yml index baa6128e7..0927e9ccc 100644 --- a/ci/goreleaser/goreleaser.yml +++ b/ci/goreleaser/goreleaser.yml @@ -1,11 +1,10 @@ # Generated by: gromit policy -# Generated on: Mon Aug 8 03:03:25 UTC 2022 +# Generated on: Mon Oct 10 21:30:09 UTC 2022 # Check the documentation at http://goreleaser.com # This project needs CGO_ENABLED=1 and the cross-compiler toolchains for # - arm64 # - amd64 - builds: - id: std ldflags: diff --git a/ci/install/before_install.sh b/ci/install/before_install.sh index 741d69e12..c48789750 100755 --- a/ci/install/before_install.sh +++ b/ci/install/before_install.sh @@ -1,7 +1,7 @@ #!/bin/bash # Generated by: gromit policy -# Generated on: Mon Aug 8 03:03:25 UTC 2022 +# Generated on: Mon Oct 10 21:30:09 UTC 2022 echo "Creating user and group..." GROUPNAME="tyk" diff --git a/ci/install/post_install.sh b/ci/install/post_install.sh index 8312d8548..5f1a0cb8f 100755 --- a/ci/install/post_install.sh +++ b/ci/install/post_install.sh @@ -2,7 +2,7 @@ # Generated by: gromit policy -# Generated on: Mon Aug 8 03:03:25 UTC 2022 +# Generated on: Mon Oct 10 21:30:09 UTC 2022 # If "True" the install directory ownership will be changed to "tyk:tyk" change_ownership="True" diff --git a/ci/install/post_remove.sh b/ci/install/post_remove.sh index 1cc122148..a57d5195d 100755 --- a/ci/install/post_remove.sh +++ b/ci/install/post_remove.sh @@ -1,7 +1,7 @@ #!/bin/sh # Generated by: gromit policy -# Generated on: Mon Aug 8 03:03:25 UTC 2022 +# Generated on: Mon Oct 10 21:30:09 UTC 2022 cleanRemove() { diff --git a/ci/install/post_trans.sh b/ci/install/post_trans.sh index 270321c24..b3adcd737 100644 --- a/ci/install/post_trans.sh +++ b/ci/install/post_trans.sh @@ -1,7 +1,7 @@ #!/bin/sh # Generated by: gromit policy -# Generated on: Mon Aug 8 03:03:25 UTC 2022 +# Generated on: Mon Oct 10 21:30:09 UTC 2022 if command -V systemctl >/dev/null 2>&1; then if [ ! -f /lib/systemd/system/tyk-pump.service ]; then diff --git a/ci/terraform/outputs.tf b/ci/terraform/outputs.tf index 1ce74343a..2197a6bb2 100644 --- a/ci/terraform/outputs.tf +++ b/ci/terraform/outputs.tf @@ -1,6 +1,6 @@ # Generated by: gromit policy -# Generated on: Mon Aug 8 03:03:25 UTC 2022 +# Generated on: Mon Oct 10 21:30:09 UTC 2022 From 4c490cbe6e8b76d7f4947d9b6af00084c7b3a5d4 Mon Sep 17 00:00:00 2001 From: Matias <83959431+mativm02@users.noreply.github.com> Date: Thu, 13 Oct 2022 15:03:58 -0300 Subject: [PATCH 004/102] TT-6550 | Document size is mis-calculated in Mongo Selective Pump (#496) * modifying sizeBytes formula * converting RawResponse to bytes * adding unit tests * adding comments and removing testing prints * fixing linter errors --- pumps/mgo_helper_test.go | 12 +++++ pumps/mongo_selective.go | 3 +- pumps/mongo_selective_test.go | 98 +++++++++++++++++++++++++++++++++++ 3 files changed, 112 insertions(+), 1 deletion(-) create mode 100644 pumps/mongo_selective_test.go diff --git a/pumps/mgo_helper_test.go b/pumps/mgo_helper_test.go index ca230b514..d338a3b01 100644 --- a/pumps/mgo_helper_test.go +++ b/pumps/mgo_helper_test.go @@ -97,3 +97,15 @@ func defaultConf() MongoConf { return conf } + +func defaultSelectiveConf() MongoSelectiveConf { + conf := MongoSelectiveConf{ + MaxInsertBatchSizeBytes: 10 * MiB, + MaxDocumentSizeBytes: 10 * MiB, + } + + conf.MongoURL = dbAddr + conf.MongoSSLInsecureSkipVerify = true + + return conf +} diff --git a/pumps/mongo_selective.go b/pumps/mongo_selective.go index 616c1890f..d55d6a29b 100644 --- a/pumps/mongo_selective.go +++ b/pumps/mongo_selective.go @@ -244,7 +244,8 @@ func (m *MongoSelectivePump) AccumulateSet(data []interface{}) [][]interface{} { if thisItem.ResponseCode == -1 { continue } - sizeBytes := len([]byte(thisItem.RawRequest)) + len([]byte(thisItem.RawRequest)) + // Add 1 KB for metadata as average + sizeBytes := len([]byte(thisItem.RawRequest)) + len([]byte(thisItem.RawResponse)) + 1024 skip := false if sizeBytes > m.dbConf.MaxDocumentSizeBytes { diff --git a/pumps/mongo_selective_test.go b/pumps/mongo_selective_test.go new file mode 100644 index 000000000..bad1d8952 --- /dev/null +++ b/pumps/mongo_selective_test.go @@ -0,0 +1,98 @@ +package pumps + +import ( + "testing" + + "github.com/TykTechnologies/tyk-pump/analytics" + "github.com/stretchr/testify/assert" +) + +func TestMongoSelectivePump_AccumulateSet(t *testing.T) { + run := func(recordsGenerator func(numRecords int) []interface{}, expectedRecordsCount, maxDocumentSizeBytes int) func(t *testing.T) { + return func(t *testing.T) { + mPump := MongoSelectivePump{} + conf := defaultSelectiveConf() + conf.MaxDocumentSizeBytes = maxDocumentSizeBytes + + numRecords := 100 + + mPump.dbConf = &conf + mPump.log = log.WithField("prefix", mongoPrefix) + + data := recordsGenerator(numRecords) + expectedGraphRecordSkips := 0 + for _, recordData := range data { + record, ok := recordData.(analytics.AnalyticsRecord) + if !ok { + continue + } + if record.IsGraphRecord() { + expectedGraphRecordSkips++ + } + } + set := mPump.AccumulateSet(data) + + recordsCount := 0 + for _, setEntry := range set { + recordsCount += len(setEntry) + } + assert.Equal(t, expectedRecordsCount, recordsCount) + } + } + + t.Run("should accumulate all records", run( + func(numRecords int) []interface{} { + record := analytics.AnalyticsRecord{} + data := make([]interface{}, 0) + for i := 0; i < numRecords; i++ { + data = append(data, record) + } + return data + }, + 100, + 5120, + )) + + t.Run("should accumulate 0 records because maxDocumentSizeBytes < 1024", run( + func(numRecords int) []interface{} { + record := analytics.AnalyticsRecord{} + data := make([]interface{}, 0) + for i := 0; i < numRecords; i++ { + data = append(data, record) + } + return data + }, + 0, + 100, + )) + + t.Run("should accumulate 0 records because the length of the data (1500) is > 1024", run( + func(numRecords int) []interface{} { + record := analytics.AnalyticsRecord{} + record.RawResponse = "1" + data := make([]interface{}, 0) + for i := 0; i < 1500; i++ { + data = append(data, record) + } + return data + }, + 0, + 1024, + )) + + t.Run("should accumulate 99 records because one of the 100 records exceeds the limit of 1024", run( + func(numRecords int) []interface{} { + data := make([]interface{}, 0) + for i := 0; i < 100; i++ { + record := analytics.AnalyticsRecord{} + if i == 94 { + record.RawResponse = "1" + } + data = append(data, record) + } + return data + }, + 99, + 1024, + )) +} From 97bbeccbe5c08b43c3d6fc26dcf0277bab91127e Mon Sep 17 00:00:00 2001 From: Sredny M Date: Tue, 18 Oct 2022 02:34:12 -0500 Subject: [PATCH 005/102] TT-6482 Histogram type label validation (#497) * added validation for histograms to have always the type label * remove debug line * added test to TestEnsureLabels * do not export ensureLabels func * fix lintern errors * added the label in the first position * fix typo --- pumps/prometheus.go | 34 +++++++++++++----- pumps/prometheus_test.go | 75 +++++++++++++++++++++++++++++++++++++++- 2 files changed, 100 insertions(+), 9 deletions(-) diff --git a/pumps/prometheus.go b/pumps/prometheus.go index fc4beb246..4f1b61c20 100644 --- a/pumps/prometheus.go +++ b/pumps/prometheus.go @@ -270,7 +270,8 @@ func (p *PrometheusPump) WriteData(ctx context.Context, data []interface{}) erro // InitVec inits the prometheus metric based on the metric_type. It only can create counter and histogram, // if the metric_type is anything else it returns an error func (pm *PrometheusMetric) InitVec() error { - if pm.MetricType == "counter" { + switch pm.MetricType { + case COUNTER_TYPE: pm.counterVec = prometheus.NewCounterVec( prometheus.CounterOpts{ Name: pm.Name, @@ -280,11 +281,13 @@ func (pm *PrometheusMetric) InitVec() error { ) pm.counterMap = make(map[string]uint64) prometheus.MustRegister(pm.counterVec) - } else if pm.MetricType == "histogram" { + case HISTOGRAM_TYPE: bkts := pm.Buckets if len(bkts) == 0 { bkts = buckets } + + pm.ensureLabels() pm.histogramVec = prometheus.NewHistogramVec( prometheus.HistogramOpts{ Name: pm.Name, @@ -295,7 +298,7 @@ func (pm *PrometheusMetric) InitVec() error { ) pm.histogramMap = make(map[string]histogramCounter) prometheus.MustRegister(pm.histogramVec) - } else { + default: return errors.New("invalid metric type:" + pm.MetricType) } @@ -303,6 +306,26 @@ func (pm *PrometheusMetric) InitVec() error { return nil } +// EnsureLabels ensure the data validity and consistency of the metric labels +func (pm *PrometheusMetric) ensureLabels() { + // for histograms we need to be sure that type was added + if pm.MetricType == HISTOGRAM_TYPE { + // remove all references to `type` + var i int + for _, label := range pm.Labels { + if label == "type" { + continue + } + pm.Labels[i] = label + i++ + } + pm.Labels = pm.Labels[:i] + + // then add `type` at the beginning + pm.Labels = append([]string{"type"}, pm.Labels...) + } +} + // GetLabelsValues return a list of string values based on the custom metric labels. func (pm *PrometheusMetric) GetLabelsValues(decoded analytics.AnalyticsRecord) []string { values := []string{} @@ -339,11 +362,6 @@ func (pm *PrometheusMetric) GetLabelsValues(decoded analytics.AnalyticsRecord) [ func (pm *PrometheusMetric) Inc(values ...string) error { switch pm.MetricType { case COUNTER_TYPE: - // "response_code", "api_name", "method" - // key = map[500--apitest-GET] = 4 - - //map[] - pm.counterMap[strings.Join(values, "--")] += 1 default: return errors.New("invalid metric type:" + pm.MetricType) diff --git a/pumps/prometheus_test.go b/pumps/prometheus_test.go index 647285643..47ed40c56 100644 --- a/pumps/prometheus_test.go +++ b/pumps/prometheus_test.go @@ -36,6 +36,16 @@ func TestInitVec(t *testing.T) { expectedErr: nil, isEnabled: true, }, + { + testName: "Histogram metric without type label set", + customMetric: PrometheusMetric{ + Name: "testHistogramMetricWithoutTypeSet", + MetricType: HISTOGRAM_TYPE, + Labels: []string{"api_id"}, + }, + expectedErr: nil, + isEnabled: true, + }, { testName: "RandomType metric", customMetric: PrometheusMetric{ @@ -65,7 +75,7 @@ func TestInitVec(t *testing.T) { } else if tc.customMetric.MetricType == HISTOGRAM_TYPE { assert.NotNil(t, tc.customMetric.histogramVec) assert.Equal(t, tc.isEnabled, prometheus.Unregister(tc.customMetric.histogramVec)) - + assert.Equal(t, tc.customMetric.Labels[0], "type") } }) @@ -528,3 +538,66 @@ func TestPrometheusCreateBasicMetrics(t *testing.T) { assert.Equal(t, 1, actualMetricTypeCounter[HISTOGRAM_TYPE]) } + +func TestEnsureLabels(t *testing.T) { + testCases := []struct { + name string + metricType string + labels []string + typeLabelShouldExist bool + }{ + { + name: "histogram type, type label should be added if not exist", + labels: []string{"response_code", "api_name", "method", "api_key", "alias", "path"}, + metricType: HISTOGRAM_TYPE, + typeLabelShouldExist: true, + }, + { + name: "counter type, type label should not be added", + labels: []string{"response_code", "api_name", "method", "api_key", "alias", "path"}, + metricType: COUNTER_TYPE, + typeLabelShouldExist: false, + }, + { + name: "histogram type, type label should not be repeated and in the 1st position", + labels: []string{"type", "response_code", "api_name", "method", "api_key", "alias", "path"}, + metricType: HISTOGRAM_TYPE, + typeLabelShouldExist: true, + }, + { + name: "histogram type, type label should not be repeated (even if user repeated it), and always in the 1st position", + labels: []string{"response_code", "api_name", "type", "method", "api_key", "alias", "path", "type"}, + metricType: HISTOGRAM_TYPE, + typeLabelShouldExist: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + pm := PrometheusMetric{ + MetricType: tc.metricType, + Labels: tc.labels, + } + + pm.ensureLabels() + typeLabelFound := false + numberOfTimesOfTypeLabel := 0 + + for _, label := range pm.Labels { + if label == "type" { + typeLabelFound = true + numberOfTimesOfTypeLabel++ + } + } + + assert.Equal(t, tc.typeLabelShouldExist, typeLabelFound) + + // if should exist then it should be only one time + if tc.typeLabelShouldExist { + assert.Equal(t, 1, numberOfTimesOfTypeLabel) + // label `type` should be in the 1st position always + assert.Equal(t, pm.Labels[0], "type") + } + }) + } +} From ba8a4b56cbdc25942df2c4eb0633c0b033955b25 Mon Sep 17 00:00:00 2001 From: Kofo Okesola Date: Tue, 18 Oct 2022 09:01:27 +0100 Subject: [PATCH 006/102] [TT-6012] Implement New Graph Granular Mongo Pump (#489) * convert record to graph data * implemented the pump and modidfied old mongo pump * separate graph record logic * fix go vet * staticcheck and golangci-lint * increased test coverage and fixed bugs * goimports * Use ast constants for operation type * rename variable * fix failing tests * pr comments * added tests for mongo graph pump * reorganized tests * added some comments * modifying tests * added pump tests * added test cases for empty bodies * test changes * modified test and documentation --- README.md | 20 ++ analytics/aggregate.go | 16 +- analytics/aggregate_test.go | 1 - analytics/analytics.go | 8 +- analytics/analytics_filters_test.go | 2 - analytics/demo/demo.go | 10 +- analytics/graph_record.go | 321 +++++++++++++++++++ analytics/graph_record_test.go | 460 ++++++++++++++++++++++++++++ analytics/uptime_data.go | 2 +- go.mod | 14 +- go.sum | 314 +++++++++++++++++-- pumps/graph_mongo.go | 149 +++++++++ pumps/graph_mongo_test.go | 343 +++++++++++++++++++++ pumps/init.go | 1 + pumps/mongo.go | 15 +- pumps/mongo_test.go | 2 +- 16 files changed, 1620 insertions(+), 58 deletions(-) create mode 100644 analytics/graph_record.go create mode 100644 analytics/graph_record_test.go create mode 100644 pumps/graph_mongo.go create mode 100644 pumps/graph_mongo_test.go diff --git a/README.md b/README.md index e4a689b52..cd156b282 100644 --- a/README.md +++ b/README.md @@ -345,6 +345,26 @@ TYK_PMP_PUMPS_MONGOAGG_TYPE=mongo-pump-aggregate TYK_PMP_PUMPS_MONGOAGG_META_USEMIXEDCOLLECTION=true ``` +###### Mongo Graph Pump +As of Pump 1.7+, a new mongo is available called the `mongo_graph` pump. This pump is specifically for parsing +GraphQL and UDG requests, tracking information like types requested, fields requested, specific graphql body errors etc. + +A sample config looks like this: +```.json +{ + ... + "pumps": { + ... + "mongo-graph": { + "type": "mongo-graph", + "meta": { + "collection_name": "graph_analytics", + "mongo_url": "mongodb://localhost:27017/tyk_analytics" + } + } +} +``` + ## Elasticsearch Config `"index_name"` - The name of the index that all the analytics data will be placed in. Defaults to "tyk_analytics" diff --git a/analytics/aggregate.go b/analytics/aggregate.go index 7c00cc85a..a2f82e37d 100644 --- a/analytics/aggregate.go +++ b/analytics/aggregate.go @@ -167,12 +167,12 @@ func OnConflictAssignments(tableName string, tempTable string) map[string]interf colName := "counter_" + jsonTag switch jsonTag { - //hits, error, success"s, open_connections, closed_connections, bytes_in, bytes_out,total_request_time, total_upstream_latency, total_latency + // hits, error, success"s, open_connections, closed_connections, bytes_in, bytes_out,total_request_time, total_upstream_latency, total_latency case "hits", "error", "success", "open_connections", "closed_connections", "bytes_in", "bytes_out", "total_request_time", "total_latency", "total_upstream_latency": assignments[colName] = gorm.Expr(tableName + "." + colName + " + " + tempTable + "." + colName) - //request_time, upstream_latency,latency + // request_time, upstream_latency,latency case "request_time", "upstream_latency", "latency": - //AVG = (oldTotal + newTotal ) / (oldHits + newHits) + // AVG = (oldTotal + newTotal ) / (oldHits + newHits) var totalVal, totalCol string switch jsonTag { case "request_time": @@ -187,13 +187,13 @@ func OnConflictAssignments(tableName string, tempTable string) map[string]interf assignments[colName] = gorm.Expr("(" + tableName + "." + totalCol + " +" + totalVal + ")/CAST( " + tableName + ".counter_hits + " + tempTable + ".counter_hits" + " AS REAL)") case "max_upstream_latency", "max_latency": - //math max: 0.5 * ((@val1 + @val2) + ABS(@val1 - @val2)) + // math max: 0.5 * ((@val1 + @val2) + ABS(@val1 - @val2)) val1 := tableName + "." + colName val2 := tempTable + "." + colName assignments[colName] = gorm.Expr("0.5 * ((" + val1 + " + " + val2 + ") + ABS(" + val1 + " - " + val2 + "))") case "min_latency", "min_upstream_latency": - //math min: 0.5 * ((@val1 + @val2) - ABS(@val1 - @val2)) + // math min: 0.5 * ((@val1 + @val2) - ABS(@val1 - @val2)) val1 := tableName + "." + colName val2 := tempTable + "." + colName assignments[colName] = gorm.Expr("0.5 * ((" + val1 + " + " + val2 + ") - ABS(" + val1 + " - " + val2 + ")) ") @@ -225,7 +225,6 @@ func (f AnalyticsRecordAggregate) New() AnalyticsRecordAggregate { } func (f *AnalyticsRecordAggregate) generateBSONFromProperty(parent, thisUnit string, incVal *Counter, newUpdate bson.M) bson.M { - constructor := parent + "." + thisUnit + "." if parent == "" { constructor = thisUnit + "." @@ -262,7 +261,6 @@ func (f *AnalyticsRecordAggregate) generateBSONFromProperty(parent, thisUnit str } func (f *AnalyticsRecordAggregate) generateSetterForTime(parent, thisUnit string, realTime float64, newUpdate bson.M) bson.M { - constructor := parent + "." + thisUnit + "." if parent == "" { constructor = thisUnit + "." @@ -430,7 +428,7 @@ func (f *AnalyticsRecordAggregate) AsTimeUpdate() bson.M { // We need to create lists of API data so that we can aggregate across the list // in order to present top-20 style lists of APIs, Tokens etc. - //apis := make([]Counter, 0) + // apis := make([]Counter, 0) newUpdate["$set"].(bson.M)["lists.apiid"] = f.getRecords("apiid", f.APIID, newUpdate) newUpdate["$set"].(bson.M)["lists.errors"] = f.getRecords("errors", f.Errors, newUpdate) @@ -471,7 +469,7 @@ func (f *AnalyticsRecordAggregate) AsTimeUpdate() bson.M { return newUpdate } -//DiscardAggregations this method discard the aggregations of X field specified in the aggregated pump configuration +// DiscardAggregations this method discard the aggregations of X field specified in the aggregated pump configuration func (f *AnalyticsRecordAggregate) DiscardAggregations(fields []string) { for _, field := range fields { switch field { diff --git a/analytics/aggregate_test.go b/analytics/aggregate_test.go index 72bd42739..6e92b6f97 100644 --- a/analytics/aggregate_test.go +++ b/analytics/aggregate_test.go @@ -19,7 +19,6 @@ func TestCode_ProcessStatusCodes(t *testing.T) { assert.Equal(t, 4, c.Code400) assert.Equal(t, 5, c.Code4x) - } func TestAggregate_Tags(t *testing.T) { diff --git a/analytics/analytics.go b/analytics/analytics.go index d10a3fa3a..7edbd29f3 100644 --- a/analytics/analytics.go +++ b/analytics/analytics.go @@ -72,10 +72,15 @@ type AnalyticsRecord struct { ApiSchema string `json:"api_schema" bson:"-" gorm:"-"` } -func (ar *AnalyticsRecord) TableName() string { +func (a *AnalyticsRecord) TableName() string { return SQLTable } +type GraphError struct { + Message string `json:"message"` + Path []interface{} `json:"path"` +} + type Country struct { ISOCode string `maxminddb:"iso_code" json:"iso_code"` } @@ -313,7 +318,6 @@ func (a *AnalyticsRecord) GetGeo(ipStr string, GeoIPDB *maxminddb.Reader) { a.Geo.Location = geo.Location a.Geo.Country = geo.Country a.Geo.City = geo.City - } func GeoIPLookup(ipStr string, GeoIPDB *maxminddb.Reader) (*GeoData, error) { diff --git a/analytics/analytics_filters_test.go b/analytics/analytics_filters_test.go index 0c729f6ce..e3ca415d9 100644 --- a/analytics/analytics_filters_test.go +++ b/analytics/analytics_filters_test.go @@ -102,7 +102,6 @@ func TestShouldFilter(t *testing.T) { assert.Equal(t, tc.expectedFiltering, shouldFilter) }) } - } func TestHasFilter(t *testing.T) { @@ -120,5 +119,4 @@ func TestHasFilter(t *testing.T) { if hasFilter == false { t.Fatal("HasFilter should be true.") } - } diff --git a/analytics/demo/demo.go b/analytics/demo/demo.go index 334b992ab..b0975e444 100644 --- a/analytics/demo/demo.go +++ b/analytics/demo/demo.go @@ -12,9 +12,11 @@ import ( uuid "github.com/satori/go.uuid" ) -var apiKeys []string -var apiID string -var apiVersion string +var ( + apiKeys []string + apiID string + apiVersion string +) func DemoInit(orgId, apiId, version string) { apiID = apiId @@ -31,7 +33,7 @@ func randomInRange(min, max int) int { } func randomMethod() string { - var methods = []string{"GET", "PUT", "POST", "DELETE", "OPTIONS", "HEAD"} + methods := []string{"GET", "PUT", "POST", "DELETE", "OPTIONS", "HEAD"} rand.Seed(time.Now().Unix()) return methods[rand.Intn(len(methods))] diff --git a/analytics/graph_record.go b/analytics/graph_record.go new file mode 100644 index 000000000..ddfb7500f --- /dev/null +++ b/analytics/graph_record.go @@ -0,0 +1,321 @@ +package analytics + +import ( + "bufio" + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization" + "github.com/TykTechnologies/graphql-go-tools/pkg/astparser" + gql "github.com/TykTechnologies/graphql-go-tools/pkg/graphql" + "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" + "github.com/buger/jsonparser" +) + +type GraphRecord struct { + Types map[string][]string + + AnalyticsRecord `bson:",inline"` + + OperationType string + Variables string + Errors []GraphError + HasErrors bool +} + +func (a *AnalyticsRecord) ToGraphRecord() (GraphRecord, error) { + record := GraphRecord{ + AnalyticsRecord: *a, + } + if a.ResponseCode >= 400 { + record.HasErrors = true + } + rawRequest, err := base64.StdEncoding.DecodeString(a.RawRequest) + if err != nil { + return record, fmt.Errorf("error decoding raw request: %w", err) + } + + schemaBody, err := base64.StdEncoding.DecodeString(a.ApiSchema) + if err != nil { + return record, fmt.Errorf("error decoding schema: %w", err) + } + + request, schema, operationName, err := generateNormalizedDocuments(rawRequest, schemaBody) + if err != nil { + return record, fmt.Errorf("error generating documents: %w", err) + } + if len(request.Input.Variables) != 0 && string(request.Input.Variables) != "null" { + record.Variables = base64.StdEncoding.EncodeToString(request.Input.Variables) + } + + // get the operation ref + operationRef := 0 + if operationName != "" { + for i := range request.OperationDefinitions { + if request.OperationDefinitionNameString(i) == operationName { + operationRef = i + break + } + } + } else if len(request.OperationDefinitions) > 1 { + return record, errors.New("no operation name specified") + } + + // get operation type + switch request.OperationDefinitions[operationRef].OperationType { + case ast.OperationTypeMutation: + record.OperationType = string(ast.DefaultMutationTypeName) + case ast.OperationTypeSubscription: + record.OperationType = string(ast.DefaultSubscriptionTypeName) + case ast.OperationTypeQuery: + record.OperationType = string(ast.DefaultQueryTypeName) + } + + // get the selection set types to start with + fieldTypeList, err := extractTypesOfSelectionSet(operationRef, request, schema) + if err != nil { + log.WithError(err).Error("error extracting selection set types") + return record, err + } + typesToFieldsMap := make(map[string][]string) + for fieldRef, typeDefRef := range fieldTypeList { + extractTypesAndFields(fieldRef, typeDefRef, typesToFieldsMap, request, schema) + } + record.Types = typesToFieldsMap + + // get response and check to see errors + responseDecoded, err := base64.StdEncoding.DecodeString(a.RawResponse) + if err != nil { + return record, nil + } + resp, err := http.ReadResponse(bufio.NewReader(bytes.NewReader(responseDecoded)), nil) + if err != nil { + log.WithError(err).Error("error reading raw response") + return record, err + } + defer resp.Body.Close() + + dat, err := ioutil.ReadAll(resp.Body) + if err != nil { + log.WithError(err).Error("error reading response body") + return record, err + } + errBytes, t, _, err := jsonparser.Get(dat, "errors") + if err != nil && err != jsonparser.KeyPathNotFoundError { + log.WithError(err).Error("error getting response errors") + return record, err + } + if t != jsonparser.NotExist { + if err := json.Unmarshal(errBytes, &record.Errors); err != nil { + log.WithError(err).Error("error parsing graph errors") + return record, err + } + record.HasErrors = true + } + + return record, nil +} + +// extractTypesOfSelectionSet extracts all type names of the selection sets in the operation +// it returns a map of the FieldRef in the req to the type Definition in the schema +func extractTypesOfSelectionSet(operationRef int, req, schema *ast.Document) (map[int]int, error) { + fieldTypeMap := make(map[int]int) + operationDef := req.OperationDefinitions[operationRef] + if !operationDef.HasSelections { + return nil, errors.New("operation has no selection set") + } + + for _, selRef := range req.SelectionSets[operationDef.SelectionSet].SelectionRefs { + sel := req.Selections[selRef] + if sel.Kind != ast.SelectionKindField { + continue + } + // get selection field def + selFieldDefRef, err := getOperationSelectionFieldDefinition(operationDef.OperationType, req.FieldNameString(sel.Ref), schema) + if selFieldDefRef == ast.InvalidRef || err != nil { + if err != nil { + log.WithError(err).Error("error getting operation field definition") + } + return nil, errors.New("error getting selection set") + } + + typeRef := schema.ResolveUnderlyingType(schema.FieldDefinitions[selFieldDefRef].Type) + if schema.TypeIsScalar(typeRef, schema) || schema.TypeIsEnum(typeRef, schema) { + continue + } + fieldTypeMap[sel.Ref] = getObjectTypeRefWithName(schema.TypeNameString(typeRef), schema) + } + return fieldTypeMap, nil +} + +// extractTypesAndFields extracts all types and type fields used in this request +func extractTypesAndFields(fieldRef, typeDef int, resp map[string][]string, req, schema *ast.Document) { + field := req.Fields[fieldRef] + fieldListForType := make([]string, 0) + + if !field.HasSelections { + return + } + for _, selRef := range req.SelectionSets[field.SelectionSet].SelectionRefs { + sel := req.Selections[selRef] + if sel.Kind != ast.SelectionKindField { + continue + } + fieldListForType = append(fieldListForType, req.FieldNameString(sel.Ref)) + + // get the field definition and run this function on it + fieldDefRef := getObjectFieldRefWithName(req.FieldNameString(sel.Ref), typeDef, schema) + if fieldDefRef == ast.InvalidRef { + continue + } + + fieldDefType := schema.ResolveUnderlyingType(schema.FieldDefinitions[fieldDefRef].Type) + if schema.TypeIsScalar(fieldDefType, schema) || schema.TypeIsEnum(fieldDefType, schema) { + continue + } + + objTypeRef := getObjectTypeRefWithName(schema.TypeNameString(fieldDefType), schema) + if objTypeRef == ast.InvalidRef { + continue + } + + extractTypesAndFields(sel.Ref, objTypeRef, resp, req, schema) + } + + objectTypeName := schema.ObjectTypeDefinitionNameString(typeDef) + _, ok := resp[objectTypeName] + if ok { + resp[objectTypeName] = append(resp[objectTypeName], fieldListForType...) + } else { + resp[objectTypeName] = fieldListForType + } + + resp[objectTypeName] = fieldListForType +} + +// getObjectFieldRefWithName gets the object field reference from the object type using the name from the schame +func getObjectFieldRefWithName(name string, objTypeRef int, schema *ast.Document) int { + objectTypeDefinition := schema.ObjectTypeDefinitions[objTypeRef] + if !objectTypeDefinition.HasFieldDefinitions { + return ast.InvalidRef + } + for _, r := range objectTypeDefinition.FieldsDefinition.Refs { + if schema.FieldDefinitionNameString(r) == name { + return r + } + } + return ast.InvalidRef +} + +// getObjectTypeRefWithName gets the ref of the type from the schema using the name +func getObjectTypeRefWithName(name string, schema *ast.Document) int { + n, ok := schema.Index.FirstNodeByNameStr(name) + if !ok { + return ast.InvalidRef + } + if n.Kind != ast.NodeKindObjectTypeDefinition { + return ast.InvalidRef + } + return n.Ref +} + +// generateNormalizedDocuments generates and normalizes the ast documents from the raw request and the raw schema +func generateNormalizedDocuments(requestRaw, schemaRaw []byte) (r, s *ast.Document, operationName string, err error) { + httpRequest, err := http.ReadRequest(bufio.NewReader(bytes.NewReader(requestRaw))) + if err != nil { + log.WithError(err).Error("error parsing request") + return + } + var gqlRequest gql.Request + err = gql.UnmarshalRequest(httpRequest.Body, &gqlRequest) + if err != nil { + log.WithError(err).Error("error unmarshalling request") + return + } + operationName = gqlRequest.OperationName + + schema, err := gql.NewSchemaFromString(string(schemaRaw)) + if err != nil { + return + } + schemaDoc, operationReport := astparser.ParseGraphqlDocumentBytes(schema.Document()) + if operationReport.HasErrors() { + err = operationReport + return + } + s = &schemaDoc + + requestDoc, operationReport := astparser.ParseGraphqlDocumentString(gqlRequest.Query) + if operationReport.HasErrors() { + err = operationReport + log.WithError(err).Error("error parsing request document") + return + } + r = &requestDoc + r.Input.Variables = gqlRequest.Variables + normalizer := astnormalization.NewWithOpts( + astnormalization.WithRemoveFragmentDefinitions(), + ) + + var report operationreport.Report + if operationName != "" { + normalizer.NormalizeNamedOperation(r, s, []byte(operationName), &report) + } else { + normalizer.NormalizeOperation(r, s, &report) + } + if report.HasErrors() { + log.WithError(report).Error("error normalizing") + err = report + return + } + return +} + +// getOperationSelectionFieldDefinition gets the schema's field definition ref for the selection set of the operation type in question +func getOperationSelectionFieldDefinition(operationType ast.OperationType, opSelectionName string, schema *ast.Document) (int, error) { + var ( + node ast.Node + found bool + ) + switch operationType { + case ast.OperationTypeQuery: + node, found = schema.Index.FirstNodeByNameBytes(schema.Index.QueryTypeName) + if !found { + return ast.InvalidRef, fmt.Errorf("missing query type declaration") + } + case ast.OperationTypeMutation: + node, found = schema.Index.FirstNodeByNameBytes(schema.Index.MutationTypeName) + if !found { + return ast.InvalidRef, fmt.Errorf("missing mutation type declaration") + } + case ast.OperationTypeSubscription: + node, found = schema.Index.FirstNodeByNameBytes(schema.Index.SubscriptionTypeName) + if !found { + return ast.InvalidRef, fmt.Errorf("missing subscription type declaration") + } + default: + return ast.InvalidRef, fmt.Errorf("unknown operation") + } + if node.Kind != ast.NodeKindObjectTypeDefinition { + return ast.InvalidRef, fmt.Errorf("invalid node type") + } + + operationObjDefinition := schema.ObjectTypeDefinitions[node.Ref] + if !operationObjDefinition.HasFieldDefinitions { + return ast.InvalidRef, nil + } + + for _, fieldRef := range operationObjDefinition.FieldsDefinition.Refs { + if opSelectionName == schema.FieldDefinitionNameString(fieldRef) { + return fieldRef, nil + } + } + + return ast.InvalidRef, fmt.Errorf("field not found") +} diff --git a/analytics/graph_record_test.go b/analytics/graph_record_test.go new file mode 100644 index 000000000..a2788320c --- /dev/null +++ b/analytics/graph_record_test.go @@ -0,0 +1,460 @@ +package analytics + +import ( + "encoding/base64" + "fmt" + "testing" + "time" + + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" + "github.com/TykTechnologies/graphql-go-tools/pkg/astparser" + gql "github.com/TykTechnologies/graphql-go-tools/pkg/graphql" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/stretchr/testify/assert" +) + +const ( + requestTemplate = "POST / HTTP/1.1\r\nHost: localhost:8281\r\nUser-Agent: test-agent\r\nContent-Length: %d\r\n\r\n%s" + responseTemplate = "HTTP/0.0 200 OK\r\nContent-Length: %d\r\nConnection: close\r\nContent-Type: application/json\r\n\r\n%s" +) + +const sampleSchema = ` +type Query { + characters(filter: FilterCharacter, page: Int): Characters + listCharacters(): [Characters]! +} + +type Mutation { + changeCharacter(): String +} + +type Subscription { + listenCharacter(): Characters +} +input FilterCharacter { + name: String + status: String + species: String + type: String + gender: String! = "M" +} +type Characters { + info: Info + secondInfo: String + results: [Character] +} +type Info { + count: Int + next: Int + pages: Int + prev: Int +} +type Character { + gender: String + id: ID + name: String +} + +type EmptyType{ +}` + +func getSampleSchema() (*ast.Document, error) { + schema, err := gql.NewSchemaFromString(string(sampleSchema)) + if err != nil { + return nil, err + } + schemaDoc, operationReport := astparser.ParseGraphqlDocumentBytes(schema.Document()) + if operationReport.HasErrors() { + return nil, operationReport + } + return &schemaDoc, nil +} + +func TestAnalyticsRecord_ToGraphRecord(t *testing.T) { + recordSample := AnalyticsRecord{ + TimeStamp: time.Date(2022, 1, 1, 0, 0, 0, 0, time.UTC), + Method: "POST", + Host: "localhost:8281", + Path: "/", + RawPath: "/", + APIName: "test-api", + APIID: "test-api", + ApiSchema: base64.StdEncoding.EncodeToString([]byte(sampleSchema)), + ResponseCode: 200, + Day: 1, + Month: 1, + Year: 2022, + Hour: 0, + } + graphRecordSample := GraphRecord{ + AnalyticsRecord: recordSample, + Types: make(map[string][]string), + } + + testCases := []struct { + expected func(string, string) GraphRecord + modifyRecord func(a AnalyticsRecord) AnalyticsRecord + title string + request string + response string + expectedErr string + }{ + { + title: "no error", + request: `{"query":"query{\n characters(filter: {\n \n }){\n info{\n count\n }\n }\n}"}`, + response: `{"data":{"characters":{"info":{"count":758}}}}`, + expected: func(request, response string) GraphRecord { + g := graphRecordSample + g.HasErrors = false + g.Types = map[string][]string{ + "Characters": {"info"}, + "Info": {"count"}, + } + g.OperationType = "Query" + return g + }, + }, + { + title: "no error mutation", + request: `{"query":"mutation{\n changeCharacter()\n}"}`, + response: `{"data":{"characters":{"info":{"count":758}}}}`, + expected: func(request, response string) GraphRecord { + g := graphRecordSample + g.HasErrors = false + g.OperationType = "Mutation" + return g + }, + }, + { + title: "no error subscription", + request: `{"query":"subscription{\n listenCharacter(){\n info{\n count\n }\n }\n}"}`, + response: `{"data":{"characters":{"info":{"count":758}}}}`, + expected: func(request, response string) GraphRecord { + g := graphRecordSample + g.HasErrors = false + g.Types = map[string][]string{ + "Characters": {"info"}, + "Info": {"count"}, + } + g.OperationType = "Subscription" + return g + }, + }, + { + title: "bad document", + request: `{"query":"subscriptiona{\n listenCharacter(){\n info{\n count\n }\n }\n}"}`, + response: `{"data":{"characters":{"info":{"count":758}}}}`, + expected: func(request, response string) GraphRecord { + return GraphRecord{} + }, + expectedErr: "error generating documents", + }, + { + title: "no error list operation", + request: `{"query":"query{\n listCharacters(){\n info{\n count\n }\n }\n}"}`, + response: `{"data":{"characters":{"info":{"count":758}}}}`, + expected: func(request, response string) GraphRecord { + g := graphRecordSample + g.HasErrors = false + g.Types = map[string][]string{ + "Characters": {"info"}, + "Info": {"count"}, + } + g.OperationType = "Query" + return g + }, + }, + { + title: "has variables", + request: `{"query":"query{\n characters(filter: {\n \n }){\n info{\n count\n }\n }\n}","variables":{"a":"test"}}`, + response: `{"data":{"characters":{"info":{"count":758}}}}`, + expected: func(request, response string) GraphRecord { + g := graphRecordSample + g.HasErrors = false + g.Types = map[string][]string{ + "Characters": {"info"}, + "Info": {"count"}, + } + g.OperationType = "Query" + g.Variables = base64.StdEncoding.EncodeToString([]byte(`{"a":"test"}`)) + return g + }, + }, + { + title: "no operation", + request: `{"query":"query main {\ncharacters {\ninfo\n}\n}\n\nquery second {\nlistCharacters{\ninfo\n}\n}","variables":null,"operationName":""}`, + response: `{"data":{"characters":{"info":{"count":758}}}}`, + expected: func(request, response string) GraphRecord { + return GraphRecord{} + }, + expectedErr: "no operation name specified", + }, + { + title: "operation name specified", + request: `{"query":"query main {\ncharacters {\ninfo\n}\n}\n\nquery second {\nlistCharacters{\ninfo\n secondInfo}\n}","variables":null,"operationName":"second"}`, + response: `{"data":{"characters":{"info":{"count":758}}}}`, + expected: func(request, response string) GraphRecord { + g := graphRecordSample + g.HasErrors = false + g.Types = map[string][]string{ + "Characters": {"info", "secondInfo"}, + } + g.OperationType = "Query" + return g + }, + expectedErr: "", + }, + { + title: "has errors", + request: `{"query":"query{\n characters(filter: {\n \n }){\n info{\n count\n }\n }\n}"}`, + response: `{ + "errors": [ + { + "message": "Name for character with ID 1002 could not be fetched.", + "locations": [{ "line": 6, "column": 7 }], + "path": ["hero", "heroFriends", 1, "name"] + } + ] +}`, + expected: func(request, response string) GraphRecord { + g := graphRecordSample + g.HasErrors = true + g.Types = map[string][]string{ + "Characters": {"info"}, + "Info": {"count"}, + } + g.OperationType = "Query" + g.Errors = append(g.Errors, GraphError{ + Message: "Name for character with ID 1002 could not be fetched.", + Path: []interface{}{"hero", "heroFriends", float64(1), "name"}, + }) + return g + }, + }, + { + title: "corrupted raw request should error out", + modifyRecord: func(a AnalyticsRecord) AnalyticsRecord { + a.RawRequest = "this isn't a base64 is it?" + return a + }, + expectedErr: "error decoding raw request", + expected: func(s, s2 string) GraphRecord { + return GraphRecord{} + }, + }, + { + title: "corrupted schema should error out", + modifyRecord: func(a AnalyticsRecord) AnalyticsRecord { + a.ApiSchema = "this isn't a base64 is it?" + return a + }, + expectedErr: "error decoding schema", + expected: func(s, s2 string) GraphRecord { + return GraphRecord{} + }, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.title, func(t *testing.T) { + a := recordSample + a.RawRequest = base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf( + requestTemplate, + len(testCase.request), + testCase.request, + ))) + a.RawResponse = base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf( + responseTemplate, + len(testCase.response), + testCase.response, + ))) + if testCase.modifyRecord != nil { + a = testCase.modifyRecord(a) + } + expected := testCase.expected(testCase.request, testCase.response) + expected.AnalyticsRecord = a + gotten, err := a.ToGraphRecord() + if testCase.expectedErr != "" { + assert.ErrorContains(t, err, testCase.expectedErr) + return + } + assert.NoError(t, err) + if diff := cmp.Diff(expected, gotten, cmpopts.IgnoreFields(GraphRecord{}, "RawRequest", "RawResponse")); diff != "" { + t.Fatal(diff) + } + }) + } +} + +func Test_getObjectTypeRefWithName(t *testing.T) { + schema, err := getSampleSchema() + assert.NoError(t, err) + + testCases := []struct { + name string + typeName string + expectedRef int + }{ + { + name: "fail", + typeName: "invalidType", + expectedRef: -1, + }, + { + name: "successful", + typeName: "Character", + expectedRef: 5, + }, + { + name: "invalid because input", + typeName: "FilterCharacter", + expectedRef: -1, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ref := getObjectTypeRefWithName(tc.typeName, schema) + assert.Equal(t, tc.expectedRef, ref) + }) + } +} + +func Test_getObjectFieldRefWithName(t *testing.T) { + schema, err := getSampleSchema() + assert.NoError(t, err) + + testCases := []struct { + name string + fieldName string + objectName string + expectedRef int + }{ + { + name: "successful run", + fieldName: "info", + objectName: "Characters", + expectedRef: 8, + }, + { + name: "failed run due to invalid field", + fieldName: "infos", + objectName: "Characters", + expectedRef: -1, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + objRef := getObjectTypeRefWithName(tc.objectName, schema) + assert.NotEqual(t, -1, objRef) + ref := getObjectFieldRefWithName(tc.fieldName, objRef, schema) + assert.Equal(t, tc.expectedRef, ref) + }) + } +} + +func Test_generateNormalizedDocuments(t *testing.T) { + rQuery := `{"query":"mutation{\n changeCharacter()\n}"}` + sampleQuery := []byte(fmt.Sprintf(requestTemplate, len(rQuery), rQuery)) + + t.Run("test valid request", func(t *testing.T) { + _, _, _, err := generateNormalizedDocuments(sampleQuery, []byte(sampleSchema)) + assert.NoError(t, err) + }) + t.Run("test invalid request", func(t *testing.T) { + _, _, _, err := generateNormalizedDocuments(sampleQuery[:10], []byte(sampleSchema)) + assert.ErrorContains(t, err, `malformed HTTP version "HTT"`) + }) + t.Run("invalid schema", func(t *testing.T) { + _, _, _, err := generateNormalizedDocuments(sampleQuery, []byte(`type Test{`)) + assert.Error(t, err) + }) + t.Run("invalid request for normalization", func(t *testing.T) { + query := `{"query":"mutation{\n changeCharactersss()\n}"}` + _, _, _, err := generateNormalizedDocuments([]byte(fmt.Sprintf(requestTemplate, len(query), query)), []byte(sampleSchema)) + assert.Error(t, err) + }) +} + +func Test_getOperationSelectionFieldDefinition(t *testing.T) { + schema, err := getSampleSchema() + assert.NoError(t, err) + + testCases := []struct { + modifySchema func(ast.Document) *ast.Document + name string + operationName string + expectedErr string + expectedRef int + operationType ast.OperationType + }{ + { + name: "successful query", + operationType: ast.OperationTypeQuery, + operationName: "characters", + expectedRef: 0, + expectedErr: "", + }, + { + name: "invalid query", + operationType: ast.OperationTypeQuery, + operationName: "invalidQuery", + expectedRef: -1, + expectedErr: "field not found", + }, + { + name: "invalid query type name", + operationType: ast.OperationTypeQuery, + operationName: "testOperation", + expectedRef: -1, + expectedErr: "missing query type declaration", + modifySchema: func(document ast.Document) *ast.Document { + document.Index.QueryTypeName = ast.ByteSlice("Querys") + return &document + }, + }, + { + name: "invalid mutation type name", + operationType: ast.OperationTypeMutation, + operationName: "testOperation", + expectedRef: -1, + expectedErr: "missing mutation type declaration", + modifySchema: func(document ast.Document) *ast.Document { + document.Index.MutationTypeName = ast.ByteSlice("Mutations") + return &document + }, + }, + { + name: "invalid subscription type name", + operationType: ast.OperationTypeSubscription, + operationName: "testOperation", + expectedRef: -1, + expectedErr: "missing subscription type declaration", + modifySchema: func(document ast.Document) *ast.Document { + document.Index.SubscriptionTypeName = ast.ByteSlice("Subscriptions") + return &document + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + var sc *ast.Document + if tc.modifySchema != nil { + sc = tc.modifySchema(*schema) + } else { + sc = schema + } + ref, err := getOperationSelectionFieldDefinition(tc.operationType, tc.operationName, sc) + if tc.expectedErr != "" { + assert.ErrorContains(t, err, tc.expectedErr) + } else { + assert.NoError(t, err) + } + + assert.Equal(t, tc.expectedRef, ref) + }) + } +} diff --git a/analytics/uptime_data.go b/analytics/uptime_data.go index 0393e7e4b..9db8f0f15 100644 --- a/analytics/uptime_data.go +++ b/analytics/uptime_data.go @@ -188,7 +188,7 @@ func AggregateUptimeData(data []UptimeReportData) map[string]UptimeReportAggrega if (thisV.ResponseCode < 300) && (thisV.ResponseCode >= 200) { thisCounter.Success = 1 thisAggregate.Total.Success++ - //using the errorMap as ResponseCode Map for SQL purpose + // using the errorMap as ResponseCode Map for SQL purpose thisCounter.ErrorMap[strconv.Itoa(thisV.ResponseCode)]++ thisAggregate.Total.ErrorMap[strconv.Itoa(thisV.ResponseCode)]++ } diff --git a/go.mod b/go.mod index 7c660df38..7991791da 100644 --- a/go.mod +++ b/go.mod @@ -4,8 +4,8 @@ go 1.15 require ( github.com/DataDog/datadog-go v4.7.0+incompatible - github.com/Microsoft/go-winio v0.5.0 // indirect github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d // indirect + github.com/TykTechnologies/graphql-go-tools v1.6.2-0.20220811124354-8d1f142966f8 github.com/TykTechnologies/murmur3 v0.0.0-20180602122059-1915e687e465 github.com/TykTechnologies/tyk v0.0.0-20200207055804-cf1d1ad81206 github.com/aws/aws-sdk-go-v2 v1.16.14 @@ -13,12 +13,13 @@ require ( github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.9.0 github.com/beeker1121/goque v0.0.0-20170321141813-4044bc29b280 // indirect github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 // indirect + github.com/buger/jsonparser v1.1.1 github.com/fatih/structs v1.1.0 github.com/go-ole/go-ole v1.2.4 // indirect github.com/go-redis/redis/v8 v8.3.1 github.com/gocraft/health v0.0.0-20170925182251-8675af27fef0 - github.com/golang/protobuf v1.4.3 - github.com/google/go-cmp v0.5.8 + github.com/golang/protobuf v1.5.0 + github.com/google/go-cmp v0.5.9 github.com/gorilla/mux v1.8.0 github.com/influxdata/influxdb v1.8.3 github.com/influxdata/influxdb-client-go/v2 v2.6.0 @@ -27,7 +28,7 @@ require ( github.com/lintianzhi/graylogd v0.0.0-20180503131252-dc68342f04dc // indirect github.com/logzio/logzio-go v0.0.0-20200316143903-ac8fc0e2910e github.com/lonelycode/mgohacks v0.0.0-20150820024025-f9c291f7e57e - github.com/mitchellh/mapstructure v1.1.2 + github.com/mitchellh/mapstructure v1.2.2 github.com/moesif/moesifapi-go v1.0.6 github.com/olivere/elastic v6.2.31+incompatible // indirect github.com/olivere/elastic/v7 v7.0.28 @@ -42,14 +43,13 @@ require ( github.com/segmentio/kafka-go v0.3.6 github.com/shirou/gopsutil v3.20.11+incompatible // indirect github.com/sirupsen/logrus v1.8.1 - github.com/stretchr/testify v1.7.0 + github.com/stretchr/testify v1.8.0 github.com/syndtr/goleveldb v0.0.0-20190318030020-c3a204f8e965 // indirect github.com/x-cray/logrus-prefixed-formatter v0.5.2 github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c // indirect golang.org/x/lint v0.0.0-20200302205851-738671d3881b // indirect golang.org/x/net v0.0.0-20210614182718-04defd469f4e - golang.org/x/tools v0.0.0-20200623185156-456ad74e1464 // indirect - google.golang.org/protobuf v1.26.0-rc.1 + google.golang.org/protobuf v1.27.1 gopkg.in/alecthomas/kingpin.v2 v2.2.6 gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22 gopkg.in/olivere/elastic.v3 v3.0.56 diff --git a/go.sum b/go.sum index b41a27eef..1d569414e 100644 --- a/go.sum +++ b/go.sum @@ -18,6 +18,9 @@ cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiy cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/99designs/gqlgen v0.13.1-0.20210728041543-7e38dd46943c h1:tEDQ6XnvZQ98sZd7iqq5pe4YsstBu7TOS6T5GhNsp2s= +github.com/99designs/gqlgen v0.13.1-0.20210728041543-7e38dd46943c/go.mod h1:S7z4boV+Nx4VvzMUpVrY/YuHjFX4n7rDyuTqvAkuoRE= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= @@ -25,12 +28,18 @@ github.com/DataDog/datadog-go v4.7.0+incompatible h1:setZNZoivEjeG87iK0abKZ9XHwH github.com/DataDog/datadog-go v4.7.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/Jeffail/gabs v1.4.0/go.mod h1:6xMvQMK4k33lb7GUUpaAPh6nKMmemQeg5d4gn7/bOXc= github.com/Jeffail/tunny v0.0.0-20171107125207-452a8e97d6a3/go.mod h1:BX3q3G70XX0UmIkDWfDHoDRquDS1xFJA5VTbMf+14wM= +github.com/Masterminds/goutils v1.1.0 h1:zukEsf/1JZwCMgHiK3GZftabmxiCw4apj3a28RPBiVg= github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Microsoft/go-winio v0.5.0 h1:Elr9Wn+sGKPlkaBvwu4mTrxtmOp3F3yV9qhaHbXGjwU= -github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60= +github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= +github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= +github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= +github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/sarama v1.29.1/go.mod h1:mdtqvCSg8JOxk8PmpTNGyo6wzd4BMm4QXSfDnTXmgkE= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d h1:G0m3OIz70MZUWq3EgK3CesDbo8upS2Vm9/P3FtgI+Jk= github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= @@ -45,12 +54,17 @@ github.com/TykTechnologies/gorm v1.20.7-0.20210409171139-b5c340f85ed0/go.mod h1: github.com/TykTechnologies/gorpc v0.0.0-20190515174534-b9c10befc5f4 h1:hTjM5Uubg3w9VjNc8WjrDrLiGX14Ih8/ItyXEn2tNUs= github.com/TykTechnologies/gorpc v0.0.0-20190515174534-b9c10befc5f4/go.mod h1:vqhQRhIHefD4jdFo55j+m0vD5NMjx2liq/ubnshQpaY= github.com/TykTechnologies/goverify v0.0.0-20160822133757-7ccc57452ade/go.mod h1:mkS8jKcz8otdfEXhJs1QQ/DKoIY1NFFsRPKS0RwQENI= +github.com/TykTechnologies/graphql-go-tools v1.6.2-0.20220811124354-8d1f142966f8 h1:CA59ssz4bwLkd7pzkDpZOnlMzzraq/TEbJ6xvQpSPCc= +github.com/TykTechnologies/graphql-go-tools v1.6.2-0.20220811124354-8d1f142966f8/go.mod h1:Cxpyt1EQHf8bRqAfZStqbgHif8YWngLga7tpnHRSRwU= github.com/TykTechnologies/leakybucket v0.0.0-20170301023702-71692c943e3c/go.mod h1:GnHUbsQx+ysI10osPhUdTmsxcE7ef64cVp38Fdyd7e0= github.com/TykTechnologies/murmur3 v0.0.0-20180602122059-1915e687e465 h1:A2gBjoX8aF0G3GHEpHyj2f0ixuPkCgcGqmPdKHSkW+0= github.com/TykTechnologies/murmur3 v0.0.0-20180602122059-1915e687e465/go.mod h1:sqH/SPFr11m9cahie7ulBuBX9TOhfBX1sp+qf9jh3Vg= github.com/TykTechnologies/openid2go v0.0.0-20200122120050-1b642583380a/go.mod h1:rGlqNE4CvxZIeiHp0mgrw+/jdGSjJzkZ0n78hhHMdfM= github.com/TykTechnologies/tyk v0.0.0-20200207055804-cf1d1ad81206 h1:dYSY3KkcFkITF+q8FWpPS87ggv1Rex1Vmmu9q4t4Pwc= github.com/TykTechnologies/tyk v0.0.0-20200207055804-cf1d1ad81206/go.mod h1:+WNQ0t1t4ZCh0Z+mDnnyNAQZc5hVJ490iqLOWKPLIMI= +github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= +github.com/agnivade/levenshtein v1.1.0 h1:n6qGwyHG61v3ABce1rPVZklEYRT8NFpCMrpZdBUbYGM= +github.com/agnivade/levenshtein v1.1.0/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= @@ -59,9 +73,13 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d h1:UQZhZ2O0vMHr2cI+DC1Mbh0TJxzA3RcLoMsFw+aXw7E= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0= +github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig+0+Ap1h4unLjW6YQJpKZVmUzxsD4E/Q= +github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aws/aws-sdk-go v1.29.11/go.mod h1:1KvfttTE3SPKMpo8g2c6jL3ZKfXtFvKscTgahTma5Xg= @@ -98,6 +116,8 @@ github.com/aws/smithy-go v1.13.2 h1:TBLKyeJfXTrTXRHmsv4qWt9IQGYyWThLYaJWSahTOGE= github.com/aws/smithy-go v1.13.2/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= github.com/beeker1121/goque v0.0.0-20170321141813-4044bc29b280 h1:ZgW7EEoTQvz27wleAVF3XVBqc6eBFqB4BNw4Awg4BN8= github.com/beeker1121/goque v0.0.0-20170321141813-4044bc29b280/go.mod h1:L6dOWBhDOnxUVQsb0wkLve0VCnt2xJW/MI8pdRX4ANw= +github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -108,19 +128,24 @@ github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dR github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= -github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23 h1:D21IyuvjDCshj1/qq+pCNd3VZOAEI9jy6Bi131YlXgI= github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= +github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= github.com/cenk/backoff v2.2.1+incompatible/go.mod h1:7FtoeaSnHoZnmZzz47cM35Y9nSW7tNyaidugnHTaFDE= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/certifi/gocertifi v0.0.0-20190905060710-a5e0173ced67/go.mod h1:GJKEexRPVJrBSOjoqN5VNOIKJ5Q3RViH6eu3puDRwx4= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= github.com/clbanning/mxj v1.8.4 h1:HuhwZtbyvyOw+3Z1AowPkU87JkJUSv751ELWaiTpj8I= github.com/clbanning/mxj v1.8.4/go.mod h1:BVjHeAH+rl9rs6f+QIpeRl0tfu10SXn1pUSa5PVGJng= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= @@ -128,11 +153,22 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= +github.com/containerd/continuity v0.3.0/go.mod h1:wJEAIwKOm/pBZuBd0JmeTvnLquTB1Ag8espWhkykbPM= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4= +github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= +github.com/dave/jennifer v1.4.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -142,10 +178,16 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= +github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48 h1:fRzb/w+pyskVMQ+UbP35JkH8yB7MYb4q/qhBarqZE6g= +github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/eclipse/paho.mqtt.golang v1.2.0 h1:1F8mhG9+aO5/xpdtFkW4SxOJB67ukuDC3t2y2qayIX0= github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts= github.com/emanoelxavier/openid2go v0.0.0-20190718021401-6345b638bfc9/go.mod h1:hahZBazACLtwLVO5XoLT8pPXTGfRt5bK6XddHEy/XUk= github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= @@ -154,6 +196,8 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evalphobia/logrus_sentry v0.8.2/go.mod h1:pKcp+vriitUqu9KiWj/VRFbRfFNUwz95/UkgG8a6MNc= +github.com/evanphx/json-patch/v5 v5.1.0 h1:B0aXl1o/1cP8NbviYiBMkcHBtUjIJ1/Ccg6b+SwCLQg= +github.com/evanphx/json-patch/v5 v5.1.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5/go.mod h1:JpoxHjuQauoxiFMl1ie8Xc/7TfLuMZ5eOCONd1sUBHg= github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a/go.mod h1:7Ga40egUymuWXxAe151lTNnCv97MddSOVsjpPPkityA= github.com/facebookgo/pidfile v0.0.0-20150612191647-f242e2999868/go.mod h1:3Hzo46xzfVpIdv4lJw7YBp9fUJ7HpUgbjH1fFDgy4qM= @@ -167,6 +211,8 @@ github.com/franela/goblin v0.0.0-20181003173013-ead4ad1d2727 h1:eouy4stZdUKn7n98 github.com/franela/goblin v0.0.0-20181003173013-ead4ad1d2727/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8 h1:a9ENSRDFBUPkJ5lCgVZh26+ZbGyoVJG7yb5SSzF5H54= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= +github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY= +github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= @@ -174,6 +220,10 @@ github.com/gemnasium/logrus-graylog-hook v2.0.7+incompatible/go.mod h1:85jwR23cg github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.6.3 h1:ahKqKTFpO5KTPHxWZjEdPScmYaGtLo8Y4DMHoEsnp14= +github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJpoZOs= @@ -190,6 +240,14 @@ github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI= github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A= +github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= +github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= +github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no= +github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= +github.com/go-playground/validator/v10 v10.2.0 h1:KgJ0snyC2R9VXYN2rneOtQcw5aHQB1Vv0sFl1UcHBOY= +github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= github.com/go-redis/redis v6.15.6+incompatible h1:H9evprGPLI8+ci7fxQx6WNZHJSb7be8FqJQRhdQZ5Sg= github.com/go-redis/redis v6.15.6+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= github.com/go-redis/redis/v8 v8.3.1 h1:jEPCgHQopfNaABun3NVN9pv2K7RjstY/7UJD6UEKFEY= @@ -199,8 +257,20 @@ github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gG github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/go-test/deep v1.0.4 h1:u2CU3YKy9I2pmu9pX0eq50wCgjfGIt539SqR7FbHiho= +github.com/go-test/deep v1.0.4/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/go-zookeeper/zk v1.0.2/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= +github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee h1:s+21KNqlpePfkah2I+gwHF8xmJWRjooY+5248k6m4A0= +github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= +github.com/gobwas/pool v0.2.0 h1:QEmUOlnSjWtnpRGHF3SauEiOsy82Cup83Vf2LcMlnc8= +github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= +github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= +github.com/gobwas/ws v1.0.4 h1:5eXU1CZhpQdq5kXbKb+sECH5Ia5KiO6CYzIzdlVx6Bs= +github.com/gobwas/ws v1.0.4/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= github.com/gocraft/health v0.0.0-20170925182251-8675af27fef0 h1:pKjeDsx7HGGbjr7VGI1HksxDJqSjaGED3cSw9GeSI98= github.com/gocraft/health v0.0.0-20170925182251-8675af27fef0/go.mod h1:rWibcVfwbUxi/QXW84U7vNTcIcZFd6miwbt8ritxh/Y= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gofrs/uuid v3.3.0+incompatible h1:8K4tyRfvU1CYPgJsveYFQMhpFd/wXNM7iK6rR7UHz84= github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= @@ -208,6 +278,7 @@ github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFG github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= @@ -217,9 +288,13 @@ github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4er github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.1 h1:ocYkMQY5RrXTYgXl7ICpV0IXwlEQGwKIsery4gyXa1U= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -227,11 +302,13 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0 h1:LUVKkCeviFUMKqHa4tXIIij/lbhnMbP7Fn5wKdKkRh4= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -246,8 +323,11 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= @@ -255,16 +335,24 @@ github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OI github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/context v0.0.0-20160226214623-1ea25387ff6f/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/mux v1.6.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= +github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gotestyourself/gotestyourself v1.4.0/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -285,10 +373,13 @@ github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjG github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= @@ -300,8 +391,13 @@ github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKe github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= +github.com/huandu/xstrings v1.2.1 h1:v6IdmkCnDhJG/S0ivr58PeIfg+tyhqQYy4YsCsQ0Pdc= +github.com/huandu/xstrings v1.2.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/iancoleman/strcase v0.0.0-20191112232945-16388991a334/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.8 h1:CGgOkSJeqMRmt0D9XLWExdT4m4F1vd3FV3VPt+0VxkQ= +github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7mYlOw2LqY= github.com/influxdata/influxdb v1.8.3 h1:WEypI1BQFTT4teLM+1qkEcvUi0dAvopAI/ir0vAiBg8= @@ -368,8 +464,23 @@ github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0f github.com/jackc/puddle v1.1.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v1.1.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v1.1.2/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= +github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= +github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= +github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= +github.com/jcmturner/gokrb5/v8 v8.4.2/go.mod h1:sb+Xq/fTY5yktf/VxLsE3wlfPqQjp0aWNYyvBVK62bc= +github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= github.com/jehiah/go-strftime v0.0.0-20151206194810-2efbe75097a5 h1:E1bpycfzgfdJWK32+GOJDYVrep2fbX6cN6tYiXd+CGY= github.com/jehiah/go-strftime v0.0.0-20151206194810-2efbe75097a5/go.mod h1:cJ6Cj7dQo+O6GJNiMx+Pa94qKj+TG8ONdKHgMNIyyag= +github.com/jensneuse/abstractlogger v0.0.4 h1:sa4EH8fhWk3zlTDbSncaWKfwxYM8tYSlQ054ETLyyQY= +github.com/jensneuse/abstractlogger v0.0.4/go.mod h1:6WuamOHuykJk8zED/R0LNiLhWR6C7FIAo43ocUEB3mo= +github.com/jensneuse/byte-template v0.0.0-20200214152254-4f3cf06e5c68 h1:E80wOd3IFQcoBxLkAUpUQ3BoGrZ4DxhQdP21+HH1s6A= +github.com/jensneuse/byte-template v0.0.0-20200214152254-4f3cf06e5c68/go.mod h1:0D5r/VSW6D/o65rKLL9xk7sZxL2+oku2HvFPYeIMFr4= +github.com/jensneuse/diffview v1.0.0 h1:4b6FQJ7y3295JUHU3tRko6euyEboL825ZsXeZZM47Z4= +github.com/jensneuse/diffview v1.0.0/go.mod h1:i6IacuD8LnEaPuiyzMHA+Wfz5mAuycMOf3R/orUY9y4= +github.com/jensneuse/pipeline v0.0.0-20200117120358-9fb4de085cd6 h1:y8hvuqbuVGFNpEos+vB5I5X+QxWm0uyTk+5oeOinMjY= +github.com/jensneuse/pipeline v0.0.0-20200117120358-9fb4de085cd6/go.mod h1:UsfzaMt+keVOxa007GcCJMFeTHr6voRfBGMQEW7DkdM= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= github.com/jinzhu/now v1.1.1 h1:g39TucaRWyV3dwDO++eEc6qf8TVIQ/Da48WmqjZ3i7E= @@ -381,7 +492,9 @@ github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8Hm github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= @@ -395,10 +508,14 @@ github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8= github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.9.8 h1:VMAMUUOh+gaxKTMk+zqbjsSjsIcUcL/LF4o63i82QyA= github.com/klauspost/compress v1.9.8/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.12/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.12.2 h1:2KCfW3I9M7nSc5wOqXAlW2v2U6v+w6cbjvbfp+OykW8= +github.com/klauspost/compress v1.12.2/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= @@ -407,21 +524,28 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxv github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/labstack/echo/v4 v4.2.1/go.mod h1:AA49e0DZ8kk5jTOOCKNuPR6oTnBS0dYiM4FW1e6jwpg= github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= +github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= +github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.3.0 h1:/qkRGz8zljWiDcFvgpwUpwIAPu3r07TDvs3Rws+o/pU= github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.10.6 h1:jbk+ZieJ0D7EVGJYpL9QTz7/YW6UHbmdnZWYyK5cdBs= +github.com/lib/pq v1.10.6/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lintianzhi/graylogd v0.0.0-20180503131252-dc68342f04dc h1:7f0qjuEBw/5vUrP2lyIUgAihl0A6H0E79kswNy6edeE= github.com/lintianzhi/graylogd v0.0.0-20180503131252-dc68342f04dc/go.mod h1:WTHfLzkGmTEe+nyJqdZhFbAWUkyI30IVS9ytgHDJj0I= +github.com/logrusorgru/aurora v0.0.0-20200102142835-e9ef32dff381 h1:bqDmpDG49ZRnB5PcgP0RXtQvnMSgIF14M7CBd2shtXs= +github.com/logrusorgru/aurora v0.0.0-20200102142835-e9ef32dff381/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= github.com/logzio/logzio-go v0.0.0-20200316143903-ac8fc0e2910e h1:j4tDETg2tUX0AZq2CClOpW8rBf9rPEBNjiXgQoso4Z8= github.com/logzio/logzio-go v0.0.0-20200316143903-ac8fc0e2910e/go.mod h1:OBprCVuGvtyYcaCmYjE32bF12d5AAHeXS5xI0QbIXMI= github.com/lonelycode/go-uuid v0.0.0-20141202165402-ed3ca8a15a93 h1:WcaWCUFwpiRpIjcM7u27kuy2p5zPLC1KRxB3/bJ7XsI= @@ -431,12 +555,14 @@ github.com/lonelycode/mgohacks v0.0.0-20150820024025-f9c291f7e57e/go.mod h1:xVJq github.com/lonelycode/osin v0.0.0-20160423095202-da239c9dacb6 h1:G2UYdR7/shMh7NMp2ETozj6zlqU5M8b0VqRbdxTXciU= github.com/lonelycode/osin v0.0.0-20160423095202-da239c9dacb6/go.mod h1:x4kc0i0iLfRkNWchVMcLjy+Txcz3XqNbr8iRUGFduLQ= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ= +github.com/matryer/moq v0.0.0-20200106131100-75d0ddfc0007/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= @@ -465,7 +591,10 @@ github.com/mavricknz/ldap v0.0.0-20160227184754-f5a958005e43/go.mod h1:z76yvVwVu github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b h1:j7+1HpAFS1zy5+Q4qx1fWh90gTKwiN4QCGoY9TWyyO4= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/minio/highwayhash v1.0.1 h1:dZ6IIu8Z14VlC0VpfKofAhCy74wu/Qb5gcn52yWoz/0= +github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -475,18 +604,38 @@ github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUb github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v0.0.0-20180203102830-a4e142e9c047/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.2.2 h1:dxe5oCinTXiTIcfgmZecdCzPmAJKd46KsCWc35r0TV4= +github.com/mitchellh/mapstructure v1.2.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/moesif/moesifapi-go v1.0.6 h1:r3ppy6p5jxzdauziRI3lMtcjDpVH/zW2an2rYXLkNWE= github.com/moesif/moesifapi-go v1.0.6/go.mod h1:wRGgVy0QeiCgnjFEiD13HD2Aa7reI8nZXtCnddNnZGs= +github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/nats-io/jwt v1.2.2 h1:w3GMTO969dFg+UOKTmmyuu7IGdusK+7Ytlt//OYH/uU= +github.com/nats-io/jwt v1.2.2/go.mod h1:/xX356yQA6LuXI9xWW7mZNpxgF2mBmGecH+Fj34sP5Q= +github.com/nats-io/jwt/v2 v2.0.2 h1:ejVCLO8gu6/4bOKIHQpmB5UhhUJfAQw55yvLWpfmKjI= +github.com/nats-io/jwt/v2 v2.0.2/go.mod h1:VRP+deawSXyhNjXmxPCHskrR6Mq50BqpEI5SEcNiGlY= +github.com/nats-io/nats-server/v2 v2.3.2 h1:SGJLWrjBHsl0DsdY8PeTR3YKEfiUEYVVq2STw9d8MSY= +github.com/nats-io/nats-server/v2 v2.3.2/go.mod h1:dUf7Cm5z5LbciFVwWx54owyCKm8x4/hL6p7rrljhLFY= +github.com/nats-io/nats.go v1.11.1-0.20210623165838-4b75fc59ae30 h1:9GqilBhZaR3xYis0JgMlJjNw933WIobdjKhilXm+Vls= +github.com/nats-io/nats.go v1.11.1-0.20210623165838-4b75fc59ae30/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w= +github.com/nats-io/nkeys v0.2.0/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s= +github.com/nats-io/nkeys v0.3.0 h1:cgM5tL53EvYRU+2YLXIK0G2mJtK12Ft9oeooSZMA2G8= +github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4= +github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/newrelic/go-agent v2.13.0+incompatible/go.mod h1:a8Fv1b/fYhFSReoTU6HDkTYIMZeSVNffmoS726Y0LzQ= github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= @@ -506,22 +655,32 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.2 h1:aY/nuoWlKJud2J6U0E3NWsjlg+0GtwXxgEqthRdzlcs= github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v1.1.2/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc= +github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= +github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= github.com/oschwald/maxminddb-golang v1.5.0 h1:rmyoIV6z2/s9TCJedUuDiKht2RN12LWJ1L7iRGtWY64= github.com/oschwald/maxminddb-golang v1.5.0/go.mod h1:3jhIUymTJ5VREKyIhWm66LJiQt04F0UCDdodShpjWsY= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.6.0/go.mod h1:5N711Q9dKgbdkxHL+MEfF31hpT7l0S0s/t2kKREewys= github.com/peterbourgon/g2s v0.0.0-20170223122336-d4e7ad98afea/go.mod h1:1VcHEd3ro4QMoHfiNl/j7Jkln9+KQuorp0PItHMJYNg= github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= -github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4 v2.6.0+incompatible h1:Ix9yFKn1nSPBLFl/yZknTp8TU5G4Ps0JDmguYK6iH1A= +github.com/pierrec/lz4 v2.6.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pires/go-proxyproto v0.0.0-20190615163442-2c19fd512994/go.mod h1:6/gX3+E/IYGa0wMORlSMla999awQFdbaeQCHjSMKIzY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -554,24 +713,35 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/qri-io/jsonpointer v0.1.1 h1:prVZBZLL6TW5vsSB9fFHFAMBLI4b0ri5vribQlTJiBA= +github.com/qri-io/jsonpointer v0.1.1/go.mod h1:DnJPaYgiKu56EuDp8TU5wFLdZIcAnb/uH9v37ZaMV64= +github.com/qri-io/jsonschema v0.2.1 h1:NNFoKms+kut6ABPf6xiKNM5214jzxAhDBrPHCJ97Wg0= +github.com/qri-io/jsonschema v0.2.1/go.mod h1:g7DPkiOsK1xv6T/Ao5scXRkd+yTFygcANPBaaqW+VrI= github.com/quipo/statsd v0.0.0-20160923160612-75b7afedf0d2 h1:IvjiJDGCF8L8TjKHQKmLAjWztpKDCAaRifiRMdGzWk0= github.com/quipo/statsd v0.0.0-20160923160612-75b7afedf0d2/go.mod h1:1COUodqytMiv/GkAVUGhc0CA6e8xak5U4551TY7iEe0= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc= github.com/robertkowalski/graylog-golang v0.0.0-20151121031040-e5295cfa2827 h1:D2Xs0bSuqpKnUOOlK4yu6lloeOs4+oD+pjbOfsxgWu0= github.com/robertkowalski/graylog-golang v0.0.0-20151121031040-e5295cfa2827/go.mod h1:jONcYFk83vUF1lv0aERAwaFtDM9wUW4BMGmlnpLJyZY= github.com/robertkrimen/otto v0.0.0-20180617131154-15f95af6e78d/go.mod h1:xvqspoSXJTIpemEonrMDFq6XzwHYYgToXWj5eRX1OtY= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/sebdah/goldie v0.0.0-20180424091453-8784dd1ab561 h1:IY+sDBJR/wRtsxq+626xJnt4Tw7/ROA9cDIR8MMhWyg= +github.com/sebdah/goldie v0.0.0-20180424091453-8784dd1ab561/go.mod h1:lvjGftC8oe7XPtyrOidaMi0rp5B9+XY/ZRUynGnuaxQ= +github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= github.com/segmentio/analytics-go v0.0.0-20160711225931-bdb0aeca8a99 h1:EDTpauhQs+xCzVCaO24ODBl5du/xVcJgHj6RciiFWgA= github.com/segmentio/analytics-go v0.0.0-20160711225931-bdb0aeca8a99/go.mod h1:C7CYBtQWk4vRk2RyLu0qOcbHJ18E3F1HV2C/8JvKN48= github.com/segmentio/backo-go v0.0.0-20160424052352-204274ad699c h1:rsRTAcCR5CeNLkvgBVSjQoDGRRt6kggsE6XYBqCv2KQ= @@ -581,11 +751,16 @@ github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfP github.com/segmentio/kafka-go v0.3.6 h1:+JauPDvHurc4XSJVGniNwFuv4NmRLr1CxWvhWkRAtXA= github.com/segmentio/kafka-go v0.3.6/go.mod h1:8rEphJEczp+yDE/R5vwmaqZgF1wllrl4ioQcNKB8wVA= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= +github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/shirou/gopsutil v3.20.11+incompatible h1:LJr4ZQK4mPpIV5gOa4jCOKOGb4ty4DZO54I4FGqIpto= github.com/shirou/gopsutil v3.20.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc h1:jUIKcSPO9MoMJBbEoyE/RJoE8vz7Mb8AjvifMMwSyvY= github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/vfsgen v0.0.0-20180121065927-ffb13db8def0/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= @@ -601,39 +776,70 @@ github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9 github.com/smartystreets/gunit v1.1.3/go.mod h1:EH5qMBab2UclzXUcpR8b93eHsIlp9u+pDQIRp5DZNzQ= github.com/smartystreets/gunit v1.4.2/go.mod h1:ZjM1ozSIMJlAz/ay4SG8PeKF00ckUp+zMHZXV9/bvak= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/square/go-jose v2.4.1+incompatible/go.mod h1:7MxpAF/1WTVUu8Am+T5kNy+t0902CaLWM4Z745MkOa8= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/goleveldb v0.0.0-20190318030020-c3a204f8e965 h1:V/AztY/q2oW5ghho7YMgUJQkKvSACHRxpeDyT5DxpIo= github.com/syndtr/goleveldb v0.0.0-20190318030020-c3a204f8e965/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA= +github.com/tidwall/gjson v1.11.0 h1:C16pk7tQNiH6VlCrtIXL1w8GaOsi1X3W8KDkE1BuYd4= +github.com/tidwall/gjson v1.11.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/sjson v1.0.4 h1:UcdIRXff12Lpnu3OLtZvnc03g4vH2suXDXhBwBqmzYg= +github.com/tidwall/sjson v1.0.4/go.mod h1:bURseu1nuBkFpIES5cz6zBtjmYeOQmEESshn7VpF15Y= github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/uber-go/atomic v1.4.0/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g= github.com/uber/jaeger-client-go v2.19.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli/v2 v2.1.1/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= +github.com/vektah/dataloaden v0.2.1-0.20190515034641-a19b9a6e7c9e/go.mod h1:/HUdMve7rvxZma+2ZELQeNh88+003LL7Pf/CZ089j8U= +github.com/vektah/gqlparser/v2 v2.2.0 h1:bAc3slekAAJW6sZTi07aGq0OrfaCjj4jxARAaC7g2EM= +github.com/vektah/gqlparser/v2 v2.2.0/go.mod h1:i3mQIGIrbK2PD1RrCeMTlVbkF2FJ6WkU1KJlJlC+3F4= +github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= +github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/x-cray/logrus-prefixed-formatter v0.5.2 h1:00txxvfBM9muc0jiLIEAkAcIMJzfthRT6usrui8uGmg= github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= -github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c h1:u40Z8hqBAAQyv+vATcGgV0YCnDjqSL7/q/JyPhhJSPk= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= -github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0= +github.com/xdg/scram v1.0.3 h1:nTadYh2Fs4BK2xdldEa2g5bbaZp0/+1nJMMPtPxS/to= +github.com/xdg/scram v1.0.3/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +github.com/xdg/stringprep v1.0.3 h1:cmL5Enob4W83ti/ZHuZLuKD/xqJfus4fVPwE+/BDm+4= +github.com/xdg/stringprep v1.0.3/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= @@ -642,9 +848,11 @@ github.com/xeipuuv/gojsonschema v0.0.0-20171025060643-212d8a0df7ac h1:4VBKAdTNqx github.com/xeipuuv/gojsonschema v0.0.0-20171025060643-212d8a0df7ac/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= github.com/xenolf/lego v0.3.2-0.20170618175828-28ead50ff1ca/go.mod h1:fwiGnfsIjG7OHPfOvgK7Y/Qo6+2Ox0iozjNTkZICKbY= github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c h1:3lbZUMbMiGUW/LMkfsEABsc5zNT9+b1CvsJx47JzJ8g= github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c/go.mod h1:UrdRz5enIKZ63MEE3IF9l2/ebyx59GyGgPi+tICQdmM= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= @@ -655,15 +863,27 @@ go.opentelemetry.io/otel v0.13.0 h1:2isEnyzjjJZq6r2EKMsFj4TxiQiexsM04AVhwbR/oBA= go.opentelemetry.io/otel v0.13.0/go.mod h1:dlSNewoRYikTkotEnxdmuBHgzT+k/idJSfDv/FxEnOY= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.18.1 h1:CSUJ2mjFszzEWt4CdKISEuChVIXGBn3lAPwkRGyVrc4= +go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190506204251-e1dfcc566284/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -673,11 +893,15 @@ golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191227163750-53104e6ec876/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad h1:DN0cp81fZ3njFcrLCytUHRSUkqBjfTo4Tx9RJTWs0EY= +golang.org/x/crypto v0.0.0-20201112155050-0c6587e931a9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e h1:gsTQYXdTw2Gq7RBsWvlQ91b+aEQ6bXFUngBGuR8sPpI= +golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -706,8 +930,9 @@ golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCc golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -727,14 +952,18 @@ golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191009170851-d66e71096ffb/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210614182718-04defd469f4e h1:XpT3nA5TvE525Ne3hInMh6+GETgn27Zfm9dxsThnX2Q= golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -748,15 +977,19 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -766,6 +999,7 @@ golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -774,6 +1008,7 @@ golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -790,11 +1025,18 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40 h1:JWgyZ1qgdTaF3N3oxC+MdTV7qvEEgHo3otj+HB5CM7Q= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220405210540-1e041c57c461 h1:kHVeDEnfKn3T238CvrUcz6KeEsFHVaKh4kMTt6Wsysg= +golang.org/x/sys v0.0.0-20220405210540-1e041c57c461/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= @@ -806,12 +1048,15 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba h1:O8mE0/t419eoIwhTFpKVkHiTs/Igowgfkj25AcZrtiE= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -821,6 +1066,7 @@ golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190515012406-7d7faa4812bd/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= @@ -831,6 +1077,7 @@ golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -839,14 +1086,16 @@ golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200623185156-456ad74e1464 h1:3pMGuJd09Tet0JddXuSU1doOjbRkkVtNjNG+/x8cmC8= -golang.org/x/tools v0.0.0-20200623185156-456ad74e1464/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a h1:CB3a9Nez8M13wwlr/E2YtwoU+qYHKfC+JrDa45RXXoQ= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.6.0/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= @@ -903,16 +1152,19 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1 h1:7QnIQpGRHE5RnLKnESfDoxm2dTapTZua5a0kS0A+VXQ= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/Masterminds/sprig.v2 v2.21.0/go.mod h1:DtHmW+kdrJpYMY6Mk6OHFNi/8EBAnNYVRUffwRCNHgA= gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= @@ -939,8 +1191,10 @@ gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gorm.io/driver/mysql v1.0.3 h1:+JKBYPfn1tygR1/of/Fh2T8iwuVwzt+PEJmKaXzMQXg= gorm.io/driver/mysql v1.0.3/go.mod h1:twGxftLBlFgNVNakL7F+P/x9oYqoymG3YYT8cAfI9oI= gorm.io/driver/postgres v1.0.5 h1:raX6ezL/ciUmaYTvOq48jq1GE95aMC0CmxQYbxQ4Ufw= @@ -952,6 +1206,12 @@ honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +nhooyr.io/websocket v1.8.7 h1:usjR2uOr/zjjkVMy0lW+PPohFok7PCow5sDjLgX4P4g= +nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/letsencrypt v0.0.2/go.mod h1:buyQKZ6IXrRnB7TdkHP0RyEybLx18HHyOSoTyoOLqNY= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= +sourcegraph.com/sourcegraph/appdash-data v0.0.0-20151005221446-73f23eafcf67/go.mod h1:L5q+DGLGOQFpo1snNEkLOJT2d1YTW66rWNzatr3He1k= diff --git a/pumps/graph_mongo.go b/pumps/graph_mongo.go new file mode 100644 index 000000000..13bf40103 --- /dev/null +++ b/pumps/graph_mongo.go @@ -0,0 +1,149 @@ +package pumps + +import ( + "context" + "fmt" + "strings" + + "github.com/TykTechnologies/tyk-pump/analytics" + "github.com/mitchellh/mapstructure" + "github.com/sirupsen/logrus" +) + +const mongoGraphPrefix = "mongo-graph-pump" + +type GraphMongoPump struct { + CommonPumpConfig + MongoPump +} + +func (g *GraphMongoPump) New() Pump { + return &GraphMongoPump{} +} + +func (g *GraphMongoPump) GetEnvPrefix() string { + return g.dbConf.EnvPrefix +} + +func (g *GraphMongoPump) GetName() string { + return "MongoDB Graph Pump" +} + +func (g *GraphMongoPump) Init(config interface{}) error { + g.dbConf = &MongoConf{} + g.log = log.WithField("prefix", mongoGraphPrefix) + g.MongoPump.CommonPumpConfig = g.CommonPumpConfig + + err := mapstructure.Decode(config, &g.dbConf) + if err != nil { + g.log.WithError(err).Warn("Failed to decode configuration: ") + return err + } + g.log.WithFields(logrus.Fields{ + "url": g.dbConf.GetBlurredURL(), + "collection_name": g.dbConf.CollectionName, + }).Info("Init") + + if err := mapstructure.Decode(config, &g.dbConf.BaseMongoConf); err != nil { + return err + } + + if g.dbConf.MaxInsertBatchSizeBytes == 0 { + g.log.Info("-- No max batch size set, defaulting to 10MB") + g.dbConf.MaxInsertBatchSizeBytes = 10 * MiB + } + + if g.dbConf.MaxDocumentSizeBytes == 0 { + g.log.Info("-- No max document size set, defaulting to 10MB") + g.dbConf.MaxDocumentSizeBytes = 10 * MiB + } + + g.connect() + + g.capCollection() + + indexCreateErr := g.ensureIndexes() + if indexCreateErr != nil { + g.log.Error(indexCreateErr) + } + + g.log.Debug("MongoDB DB CS: ", g.dbConf.GetBlurredURL()) + g.log.Debug("MongoDB Col: ", g.dbConf.CollectionName) + + g.log.Info(g.GetName() + " Initialized") + + return nil +} + +func (g *GraphMongoPump) WriteData(ctx context.Context, data []interface{}) error { + collectionName := g.dbConf.CollectionName + if collectionName == "" { + g.log.Warn("no collection name") + return fmt.Errorf("no collection name") + } + + g.log.Debug("Attempting to write ", len(data), " records...") + + for g.dbSession == nil { + g.log.Debug("Connecting to analytics store") + g.connect() + } + accumulateSet := g.AccumulateSet(data, true) + + errCh := make(chan error, len(accumulateSet)) + for _, dataSet := range accumulateSet { + go func(dataSet []interface{}, errCh chan error) { + sess := g.dbSession.Copy() + defer sess.Close() + + // make a graph record array with variable length in case there are errors with some conversion + finalSet := make([]interface{}, 0) + for _, d := range dataSet { + r, ok := d.(analytics.AnalyticsRecord) + if !ok { + continue + } + gr, err := r.ToGraphRecord() + if err != nil { + errCh <- err + g.log.WithError(err).Warn("error converting 1 record to graph record") + continue + } + finalSet = append(finalSet, gr) + } + + analyticsCollection := sess.DB("").C(collectionName) + + g.log.WithFields(logrus.Fields{ + "collection": collectionName, + "number of records": len(finalSet), + }).Debug("Attempt to purge records") + + err := analyticsCollection.Insert(finalSet...) + if err != nil { + g.log.WithFields(logrus.Fields{"collection": collectionName, "number of records": len(finalSet)}).Error("Problem inserting to mongo collection: ", err) + + if strings.Contains(strings.ToLower(err.Error()), "closed explicitly") { + g.log.Warning("--> Detected connection failure!") + } + errCh <- err + return + } + errCh <- nil + g.log.WithFields(logrus.Fields{ + "collection": collectionName, + "number of records": len(finalSet), + }).Info("Completed purging the records") + }(dataSet, errCh) + } + + for range accumulateSet { + err := <-errCh + if err != nil { + return err + } + } + g.log.Info("Purged ", len(data), " records...") + + return nil +} diff --git a/pumps/graph_mongo_test.go b/pumps/graph_mongo_test.go new file mode 100644 index 000000000..c7ec8e172 --- /dev/null +++ b/pumps/graph_mongo_test.go @@ -0,0 +1,343 @@ +package pumps + +import ( + "context" + "encoding/base64" + "testing" + + "github.com/TykTechnologies/tyk-pump/analytics" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/stretchr/testify/assert" +) + +const rawGQLRequest = `POST / HTTP/1.1 +Host: localhost:8181 +User-Agent: PostmanRuntime/7.29.2 +Content-Length: 58 +Accept: */* +Accept-Encoding: gzip, deflate, br +Content-Type: application/json +Postman-Token: e6d4bc44-3268-40ae-888b-d84bb5ea07fd + +{"query":"{\n country(code: \"NGN\"){\n code\n }\n}"}` + +const rawGQLResponse = `HTTP/0.0 200 OK +Content-Length: 25 +Connection: close +Content-Type: application/json +X-Ratelimit-Limit: 0 +X-Ratelimit-Remaining: 0 +X-Ratelimit-Reset: 0 + +{"data":{"country":null}}` + +const rawGQLResponseWithError = `HTTP/0.0 200 OK +Content-Length: 61 +Connection: close +Content-Type: application/json +X-Ratelimit-Limit: 0 +X-Ratelimit-Remaining: 0 +X-Ratelimit-Reset: 0 + +{"data":{"country":null},"errors":[{"message":"test error"}]}` + +const schema = `type Query { + countries(filter: CountryFilterInput): [Country!]! + country(code: ID!): Country + continents(filter: ContinentFilterInput): [Continent!]! + continent(code: ID!): Continent + languages(filter: LanguageFilterInput): [Language!]! + language(code: ID!): Language +} + +type Country { + code: ID! + name: String! + native: String! + phone: String! + continent: Continent! + capital: String + currency: String + languages: [Language!]! + emoji: String! + emojiU: String! + states: [State!]! +} + +type Continent { + code: ID! + name: String! + countries: [Country!]! +} + +type Language { + code: ID! + name: String + native: String + rtl: Boolean! +} + +type State { + code: String + name: String! + country: Country! +} + +input StringQueryOperatorInput { + eq: String + ne: String + in: [String] + nin: [String] + regex: String + glob: String +} + +input CountryFilterInput { + code: StringQueryOperatorInput + currency: StringQueryOperatorInput + continent: StringQueryOperatorInput +} + +input ContinentFilterInput { + code: StringQueryOperatorInput +} + +input LanguageFilterInput { + code: StringQueryOperatorInput +}` + +const rawHTTPReq = `GET /get HTTP/1.1 +Host: localhost:8181 +User-Agent: PostmanRuntime/7.29.2 +Accept: */* +Accept-Encoding: gzip, deflate, br +Postman-Token: a67c3054-aa1a-47f3-9bca-5dbde04c8565 +` + +const rawHTTPResponse = ` +HTTP/1.1 200 OK +Content-Length: 376 +Access-Control-Allow-Credentials: true +Access-Control-Allow-Origin: * +Connection: close +Content-Type: application/json +Date: Tue, 04 Oct 2022 06:33:23 GMT +Server: gunicorn/19.9.0 +X-Ratelimit-Limit: 0 +X-Ratelimit-Remaining: 0 +X-Ratelimit-Reset: 0 + +{ + "args": {}, + "headers": { + "Accept": "*/*", + "Accept-Encoding": "gzip, deflate, br", + "Host": "httpbin.org", + "Postman-Token": "a67c3054-aa1a-47f3-9bca-5dbde04c8565", + "User-Agent": "PostmanRuntime/7.29.2", + "X-Amzn-Trace-Id": "Root=1-633bd3b3-6345504724f3295b68d7dcd3" + }, + "origin": "::1, 102.89.45.253", + "url": "http://httpbin.org/get" +} + +` + +func TestGraphMongoPump_WriteData(t *testing.T) { + c := Conn{} + c.ConnectDb() + defer c.CleanDb() + + conf := defaultConf() + pump := GraphMongoPump{ + MongoPump: MongoPump{ + dbConf: &conf, + }, + } + pump.log = log.WithField("prefix", mongoPrefix) + pump.MongoPump.CommonPumpConfig = pump.CommonPumpConfig + pump.dbConf.CollectionCapEnable = true + pump.dbConf.CollectionCapMaxSizeBytes = 0 + + type customRecord struct { + rawRequest string + rawResponse string + schema string + tags []string + responseCode int + } + + testCases := []struct { + expectedError string + name string + expectedGraphRecords []analytics.GraphRecord + records []customRecord + }{ + { + name: "all records written", + records: []customRecord{ + { + rawRequest: rawGQLRequest, + rawResponse: rawGQLResponse, + schema: schema, + tags: []string{analytics.PredefinedTagGraphAnalytics}, + }, + { + rawRequest: rawGQLRequest, + rawResponse: rawGQLResponseWithError, + schema: schema, + tags: []string{analytics.PredefinedTagGraphAnalytics}, + }, + { + rawRequest: rawGQLRequest, + rawResponse: rawGQLResponse, + schema: schema, + tags: []string{analytics.PredefinedTagGraphAnalytics}, + responseCode: 500, + }, + }, + expectedGraphRecords: []analytics.GraphRecord{ + { + Types: map[string][]string{ + "Country": {"code"}, + }, + OperationType: "Query", + HasErrors: false, + Errors: []analytics.GraphError{}, + }, + { + Types: map[string][]string{ + "Country": {"code"}, + }, + OperationType: "Query", + HasErrors: true, + Errors: []analytics.GraphError{ + { + Message: "test error", + Path: []interface{}{}, + }, + }, + }, + { + Types: map[string][]string{ + "Country": {"code"}, + }, + OperationType: "Query", + HasErrors: true, + Errors: []analytics.GraphError{}, + }, + }, + }, + { + name: "contains non graph records", + records: []customRecord{ + { + rawRequest: rawGQLRequest, + rawResponse: rawGQLResponse, + schema: schema, + tags: []string{analytics.PredefinedTagGraphAnalytics}, + }, + { + rawRequest: rawHTTPReq, + rawResponse: rawHTTPResponse, + }, + }, + expectedGraphRecords: []analytics.GraphRecord{ + { + Types: map[string][]string{ + "Country": {"code"}, + }, + OperationType: "Query", + HasErrors: false, + Errors: []analytics.GraphError{}, + }, + }, + }, + { + name: "skip empty request response", + records: []customRecord{ + { + rawRequest: "", + rawResponse: rawGQLResponse, + schema: schema, + tags: []string{analytics.PredefinedTagGraphAnalytics}, + }, + { + rawResponse: "", + rawRequest: rawGQLRequest, + schema: schema, + tags: []string{analytics.PredefinedTagGraphAnalytics}, + }, + { + rawRequest: rawGQLRequest, + rawResponse: rawGQLResponse, + tags: []string{analytics.PredefinedTagGraphAnalytics}, + }, + }, + }, + } + + // clean db before start + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + records := make([]interface{}, 0) + for _, cr := range tc.records { + r := analytics.AnalyticsRecord{ + APIName: "Test API", + Path: "POST", + RawRequest: base64.StdEncoding.EncodeToString([]byte(cr.rawRequest)), + RawResponse: base64.StdEncoding.EncodeToString([]byte(cr.rawResponse)), + ApiSchema: base64.StdEncoding.EncodeToString([]byte(cr.schema)), + Tags: cr.tags, + } + if cr.responseCode != 0 { + r.ResponseCode = cr.responseCode + } + records = append(records, r) + } + + err := pump.WriteData(context.Background(), records) + if tc.expectedError != "" { + assert.ErrorContains(t, err, tc.expectedError) + } else { + assert.NoError(t, err) + } + + // now check for the written data + sess := pump.dbSession.Copy() + defer func() { + if err := sess.DB("").C(conf.CollectionName).DropCollection(); err != nil { + pump.log.WithError(err).Warn("error dropping collection") + } + }() + analyticsColl := sess.DB("").C(conf.CollectionName) + var results []analytics.GraphRecord + query := analyticsColl.Find(nil) + assert.NoError(t, query.All(&results)) + if diff := cmp.Diff(tc.expectedGraphRecords, results, cmpopts.IgnoreFields(analytics.GraphRecord{}, "AnalyticsRecord")); diff != "" { + t.Error(diff) + } + }) + } +} + +func TestGraphMongoPump_Init(t *testing.T) { + pump := GraphMongoPump{} + t.Run("successful init", func(t *testing.T) { + conf := defaultConf() + assert.NoError(t, pump.Init(conf)) + }) + t.Run("invalid conf type", func(t *testing.T) { + assert.ErrorContains(t, pump.Init("test"), "expected a map") + }) + t.Run("max document and insert size set", func(t *testing.T) { + conf := defaultConf() + conf.MaxInsertBatchSizeBytes = 0 + conf.MaxDocumentSizeBytes = 0 + err := pump.Init(conf) + assert.NoError(t, err) + assert.Equal(t, 10*MiB, pump.dbConf.MaxDocumentSizeBytes) + assert.Equal(t, 10*MiB, pump.dbConf.MaxInsertBatchSizeBytes) + }) +} diff --git a/pumps/init.go b/pumps/init.go index a93ce69f7..7cf49f39a 100644 --- a/pumps/init.go +++ b/pumps/init.go @@ -32,4 +32,5 @@ func init() { AvailablePumps["sql_aggregate"] = &SQLAggregatePump{} AvailablePumps["stdout"] = &StdOutPump{} AvailablePumps["timestream"] = &TimestreamPump{} + AvailablePumps["mongo-graph"] = &GraphMongoPump{} } diff --git a/pumps/mongo.go b/pumps/mongo.go index 79aef9826..b1d968244 100644 --- a/pumps/mongo.go +++ b/pumps/mongo.go @@ -495,7 +495,7 @@ func (m *MongoPump) WriteData(ctx context.Context, data []interface{}) error { m.log.Debug("Connecting to analytics store") m.connect() } - accumulateSet := m.AccumulateSet(data) + accumulateSet := m.AccumulateSet(data, false) errCh := make(chan error, len(accumulateSet)) for _, dataSet := range accumulateSet { @@ -540,8 +540,7 @@ func (m *MongoPump) WriteData(ctx context.Context, data []interface{}) error { return nil } -func (m *MongoPump) AccumulateSet(data []interface{}) [][]interface{} { - +func (m *MongoPump) AccumulateSet(data []interface{}, isForGraphRecords bool) [][]interface{} { accumulatorTotal := 0 returnArray := make([][]interface{}, 0) thisResultSet := make([]interface{}, 0) @@ -553,9 +552,14 @@ func (m *MongoPump) AccumulateSet(data []interface{}) [][]interface{} { } // Skip this record if it is a graph analytics record, they will be handled in a different pump - if thisItem.IsGraphRecord() { + if thisItem.IsGraphRecord() != isForGraphRecords { continue } + if isForGraphRecords { + if thisItem.RawRequest == "" || thisItem.RawResponse == "" || thisItem.ApiSchema == "" { + continue + } + } // Add 1 KB for metadata as average sizeBytes := len(thisItem.RawRequest) + len(thisItem.RawResponse) + 1024 @@ -592,6 +596,9 @@ func (m *MongoPump) AccumulateSet(data []interface{}) [][]interface{} { } } + if len(thisResultSet) > 0 && len(returnArray) == 0 { + returnArray = append(returnArray, thisResultSet) + } return returnArray } diff --git a/pumps/mongo_test.go b/pumps/mongo_test.go index 26f761097..cccd420cb 100644 --- a/pumps/mongo_test.go +++ b/pumps/mongo_test.go @@ -297,7 +297,7 @@ func TestMongoPump_AccumulateSet(t *testing.T) { const dataSize = 1024 totalData := dataSize * (numRecords - expectedGraphRecordSkips) - set := mPump.AccumulateSet(data) + set := mPump.AccumulateSet(data, false) recordsCount := 0 for _, setEntry := range set { From 155b05aecf43a182fb1dea790cb142be6cd69d02 Mon Sep 17 00:00:00 2001 From: Hassan Syed <91477794+hsyed-dojo@users.noreply.github.com> Date: Tue, 18 Oct 2022 13:23:58 +0100 Subject: [PATCH 007/102] TT-6799 Prometheus pump: Support to disable certain metric families from exposition. (#492) * support disabling prometheus metric families * update readme and changelog * fix language * disabling of metrics should only work for the base metrics * do not collect and log out disabled metrics * exclude base metrics entirely if they are disabled * checkpoint * update docs --- CHANGELOG.md | 3 ++- README.md | 2 ++ go.sum | 1 - pumps/prometheus.go | 50 ++++++++++++++++++++++++++-------------- pumps/prometheus_test.go | 41 ++++++++++++++++++++++++++++---- 5 files changed, 74 insertions(+), 23 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index abe521324..bf72308a8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,8 +4,9 @@ - Initial connector which outputs all analytics into a single elasticsearch index - Enabled the ES connecter to save all output to a rolling index rather than a static index +- Add support to the Prometheus pump to exclude metric families from exposition ## v0.1 - Initial connector to replace MongoDB connector in Tyk -- Added support for uptime purging (redis/mongo only) \ No newline at end of file +- Added support for uptime purging (redis/mongo only) diff --git a/README.md b/README.md index cd156b282..9db400c0a 100644 --- a/README.md +++ b/README.md @@ -483,6 +483,7 @@ Tyk expose the following counters: And the following Histogram for latencies: - tyk_latency{type, api} +Note: base metric families can be removed by configuring the `disabled_metrics` property. #### Custom Prometheus metrics From Pump 1.6+ it's possible to add custom prometheus metrics using the `custom_metrics` configuration. @@ -536,6 +537,7 @@ TYK_PMP_PUMPS_PROMETHEUS_TYPE=prometheus TYK_PMP_PUMPS_PROMETHEUS_META_ADDR=localhost:9090 TYK_PMP_PUMPS_PROMETHEUS_META_PATH=/metrics TYK_PMP_PUMPS_PROMETHEUS_META_CUSTOMMETRICS=[] +TYK_PMP_PUMPS_PROMETHEUS_META_DISABLED_METRICS=[] ``` ## DogStatsD diff --git a/go.sum b/go.sum index 1d569414e..21b20d942 100644 --- a/go.sum +++ b/go.sum @@ -324,7 +324,6 @@ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= diff --git a/pumps/prometheus.go b/pumps/prometheus.go index 4f1b61c20..dcd54b511 100644 --- a/pumps/prometheus.go +++ b/pumps/prometheus.go @@ -39,6 +39,8 @@ type PrometheusConf struct { // This will enable an experimental feature that will aggregate the histogram metrics request time values before exposing them to prometheus. // Enabling this will reduce the CPU usage of your prometheus pump but you will loose histogram precision. Experimental. AggregateObservations bool `json:"aggregate_observations" mapstructure:"aggregate_observations"` + // Metrics to exclude from exposition. Currently, excludes only the base metrics. + DisabledMetrics []string `json:"disabled_metrics" mapstructure:"disabled_metrics"` // Custom Prometheus metrics. CustomMetrics []PrometheusMetric `json:"custom_metrics" mapstructure:"custom_metrics"` } @@ -69,7 +71,7 @@ type PrometheusMetric struct { aggregatedObservations bool } -//histogramCounter is a helper struct to mantain the totalRequestTime and hits in memory +// histogramCounter is a helper struct to mantain the totalRequestTime and hits in memory type histogramCounter struct { totalRequestTime uint64 hits uint64 @@ -91,7 +93,7 @@ func (p *PrometheusPump) New() Pump { return &newPump } -//CreateBasicMetrics stores all the predefined pump metrics in allMetrics slice +// CreateBasicMetrics stores all the predefined pump metrics in allMetrics slice func (p *PrometheusPump) CreateBasicMetrics() { //counter metrics @@ -160,13 +162,7 @@ func (p *PrometheusPump) Init(conf interface{}) error { } //first we init the base metrics - for _, metric := range p.allMetrics { - metric.aggregatedObservations = p.conf.AggregateObservations - errInit := metric.InitVec() - if errInit != nil { - p.log.Error(errInit) - } - } + p.initBaseMetrics() //then we check the custom ones p.InitCustomMetrics() @@ -183,7 +179,27 @@ func (p *PrometheusPump) Init(conf interface{}) error { return nil } -//InitCustomMetrics initialise custom prometheus metrics based on p.conf.CustomMetrics and add them into p.allMetrics +func (p *PrometheusPump) initBaseMetrics() { + toDisableSet := map[string]struct{}{} + for _, metric := range p.conf.DisabledMetrics { + toDisableSet[metric] = struct{}{} + } + // exclude disabled base metrics if needed. This disables exposition entirely during scrapes. + trimmedAllMetrics := make([]*PrometheusMetric, 0, len(p.allMetrics)) + for _, metric := range p.allMetrics { + if _, isDisabled := toDisableSet[metric.Name]; isDisabled { + continue + } + metric.aggregatedObservations = p.conf.AggregateObservations + if errInit := metric.InitVec(); errInit != nil { + p.log.Error(errInit) + } + trimmedAllMetrics = append(trimmedAllMetrics, metric) + } + p.allMetrics = trimmedAllMetrics +} + +// InitCustomMetrics initialise custom prometheus metrics based on p.conf.CustomMetrics and add them into p.allMetrics func (p *PrometheusPump) InitCustomMetrics() { if len(p.conf.CustomMetrics) > 0 { customMetrics := []*PrometheusMetric{} @@ -358,7 +374,7 @@ func (pm *PrometheusMetric) GetLabelsValues(decoded analytics.AnalyticsRecord) [ return values } -//Inc is going to fill counterMap and histogramMap with the data from record. +// Inc is going to fill counterMap and histogramMap with the data from record. func (pm *PrometheusMetric) Inc(values ...string) error { switch pm.MetricType { case COUNTER_TYPE: @@ -370,7 +386,7 @@ func (pm *PrometheusMetric) Inc(values ...string) error { return nil } -//Observe will fill hitogramMap with the sum of totalRequest and hits per label value if aggregate_observations is true. If aggregate_observations is set to false (default) it will execute prometheus Observe directly. +// Observe will fill hitogramMap with the sum of totalRequest and hits per label value if aggregate_observations is true. If aggregate_observations is set to false (default) it will execute prometheus Observe directly. func (pm *PrometheusMetric) Observe(requestTime int64, values ...string) error { switch pm.MetricType { case HISTOGRAM_TYPE: @@ -399,10 +415,10 @@ func (pm *PrometheusMetric) Observe(requestTime int64, values ...string) error { return nil } -//Expose executes prometheus library functions using the counter/histogram vector from the PrometheusMetric struct. -//If the PrometheusMetric is COUNTER_TYPE, it will execute prometheus client Add function to add the counters from counterMap to the labels value metric -//If the PrometheusMetric is HISTOGRAM_TYPE and aggregate_observations config is true, it will calculate the average value of the metrics in the histogramMap and execute prometheus Observe. -//If aggregate_observations is false, it won't do anything since it means that we already exposed the metric. +// Expose executes prometheus library functions using the counter/histogram vector from the PrometheusMetric struct. +// If the PrometheusMetric is COUNTER_TYPE, it will execute prometheus client Add function to add the counters from counterMap to the labels value metric +// If the PrometheusMetric is HISTOGRAM_TYPE and aggregate_observations config is true, it will calculate the average value of the metrics in the histogramMap and execute prometheus Observe. +// If aggregate_observations is false, it won't do anything since it means that we already exposed the metric. func (pm *PrometheusMetric) Expose() error { switch pm.MetricType { case COUNTER_TYPE: @@ -426,7 +442,7 @@ func (pm *PrometheusMetric) Expose() error { return nil } -//getAverageRequestTime returns the average request time of an histogramCounter dividing the sum of all the RequestTimes by the hits. +// getAverageRequestTime returns the average request time of an histogramCounter dividing the sum of all the RequestTimes by the hits. func (c histogramCounter) getAverageRequestTime() float64 { return float64(c.totalRequestTime / c.hits) } diff --git a/pumps/prometheus_test.go b/pumps/prometheus_test.go index 47ed40c56..024d51f65 100644 --- a/pumps/prometheus_test.go +++ b/pumps/prometheus_test.go @@ -2,14 +2,16 @@ package pumps import ( "errors" + "io" "testing" "github.com/TykTechnologies/tyk-pump/analytics" "github.com/prometheus/client_golang/prometheus" + "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" ) -func TestInitVec(t *testing.T) { +func TestPrometheusInitVec(t *testing.T) { tcs := []struct { testName string customMetric PrometheusMetric @@ -82,7 +84,7 @@ func TestInitVec(t *testing.T) { } } -func TestInitCustomMetrics(t *testing.T) { +func TestPrometheusInitCustomMetrics(t *testing.T) { tcs := []struct { testName string metrics []PrometheusMetric @@ -183,7 +185,7 @@ func TestInitCustomMetrics(t *testing.T) { } } -func TestGetLabelsValues(t *testing.T) { +func TestPrometheusGetLabelsValues(t *testing.T) { tcs := []struct { testName string customMetric PrometheusMetric @@ -539,7 +541,7 @@ func TestPrometheusCreateBasicMetrics(t *testing.T) { } -func TestEnsureLabels(t *testing.T) { +func TestPrometheusEnsureLabels(t *testing.T) { testCases := []struct { name string metricType string @@ -601,3 +603,34 @@ func TestEnsureLabels(t *testing.T) { }) } } + +func TestPrometheusDisablingMetrics(t *testing.T) { + p := &PrometheusPump{} + newPump := p.New().(*PrometheusPump) + + log := logrus.New() + log.Out = io.Discard + newPump.log = logrus.NewEntry(log) + + newPump.conf = &PrometheusConf{DisabledMetrics: []string{"tyk_http_status_per_path"}} + + newPump.initBaseMetrics() + + defer func() { + for i := range newPump.allMetrics { + if newPump.allMetrics[i].MetricType == COUNTER_TYPE { + prometheus.Unregister(newPump.allMetrics[i].counterVec) + } else if newPump.allMetrics[i].MetricType == HISTOGRAM_TYPE { + prometheus.Unregister(newPump.allMetrics[i].histogramVec) + } + } + }() + + metricMap := map[string]*PrometheusMetric{} + for _, metric := range newPump.allMetrics { + metricMap[metric.Name] = metric + } + + assert.Contains(t, metricMap, "tyk_http_status") + assert.NotContains(t, metricMap, "tyk_http_status_per_path") +} \ No newline at end of file From ff7574e405ca64ea5ca27f4416d17f0c0fb034aa Mon Sep 17 00:00:00 2001 From: Kofo Okesola Date: Tue, 25 Oct 2022 21:20:31 +0100 Subject: [PATCH 008/102] [TT-6012] mongo graph records ignore max_document_size_bytes (#504) * mongo records ignore `max_document_size_bytes` * add tests * fix breaking error * gofmt linting --- pumps/elasticsearch.go | 2 +- pumps/graph_mongo_test.go | 3 ++- pumps/mongo.go | 10 +++------- pumps/mongo_test.go | 36 ++++++++++++++++++++++++++++++++++++ pumps/prometheus_test.go | 2 +- pumps/splunk_test.go | 2 +- 6 files changed, 44 insertions(+), 11 deletions(-) diff --git a/pumps/elasticsearch.go b/pumps/elasticsearch.go index b386c9660..31bd75426 100644 --- a/pumps/elasticsearch.go +++ b/pumps/elasticsearch.go @@ -120,7 +120,7 @@ type ApiKeyTransport struct { APIKeyID string } -//RoundTrip for ApiKeyTransport auth +// RoundTrip for ApiKeyTransport auth func (t *ApiKeyTransport) RoundTrip(r *http.Request) (*http.Response, error) { auth := t.APIKeyID + ":" + t.APIKey key := base64.StdEncoding.EncodeToString([]byte(auth)) diff --git a/pumps/graph_mongo_test.go b/pumps/graph_mongo_test.go index c7ec8e172..3e3a93f8c 100644 --- a/pumps/graph_mongo_test.go +++ b/pumps/graph_mongo_test.go @@ -255,7 +255,7 @@ func TestGraphMongoPump_WriteData(t *testing.T) { }, }, { - name: "skip empty request response", + name: "should error on empty request response", records: []customRecord{ { rawRequest: "", @@ -275,6 +275,7 @@ func TestGraphMongoPump_WriteData(t *testing.T) { tags: []string{analytics.PredefinedTagGraphAnalytics}, }, }, + expectedError: "error generating documents", }, } diff --git a/pumps/mongo.go b/pumps/mongo.go index b1d968244..4560bfc47 100644 --- a/pumps/mongo.go +++ b/pumps/mongo.go @@ -552,21 +552,17 @@ func (m *MongoPump) AccumulateSet(data []interface{}, isForGraphRecords bool) [] } // Skip this record if it is a graph analytics record, they will be handled in a different pump - if thisItem.IsGraphRecord() != isForGraphRecords { + isGraphRecord := thisItem.IsGraphRecord() + if isGraphRecord != isForGraphRecords { continue } - if isForGraphRecords { - if thisItem.RawRequest == "" || thisItem.RawResponse == "" || thisItem.ApiSchema == "" { - continue - } - } // Add 1 KB for metadata as average sizeBytes := len(thisItem.RawRequest) + len(thisItem.RawResponse) + 1024 m.log.Debug("Size is: ", sizeBytes) - if sizeBytes > m.dbConf.MaxDocumentSizeBytes { + if sizeBytes > m.dbConf.MaxDocumentSizeBytes && !isGraphRecord { m.log.Warning("Document too large, not writing raw request and raw response!") thisItem.RawRequest = "" diff --git a/pumps/mongo_test.go b/pumps/mongo_test.go index cccd420cb..b3eea0a1c 100644 --- a/pumps/mongo_test.go +++ b/pumps/mongo_test.go @@ -2,6 +2,7 @@ package pumps import ( "context" + "encoding/base64" "strconv" "testing" @@ -338,3 +339,38 @@ func TestMongoPump_AccumulateSet(t *testing.T) { 50, )) } + +func TestMongoPump_AccumulateSetIgnoreDocSize(t *testing.T) { + bloat := base64.StdEncoding.EncodeToString(make([]byte, 2048)) + pump := newPump() + conf := defaultConf() + conf.MaxDocumentSizeBytes = 2048 + mPump, ok := pump.(*MongoPump) + assert.True(t, ok) + mPump.dbConf = &conf + mPump.log = log.WithField("prefix", mongoPrefix) + + dataSet := make([]interface{}, 100) + for i := 0; i < 100; i++ { + record := analytics.AnalyticsRecord{} + if i%2 == 0 { + record.Tags = []string{analytics.PredefinedTagGraphAnalytics} + record.RawRequest = bloat + record.RawResponse = bloat + record.ApiSchema = bloat + } + dataSet[i] = record + } + + accumulated := mPump.AccumulateSet(dataSet, true) + for _, x := range accumulated { + for _, y := range x { + rec, ok := y.(analytics.AnalyticsRecord) + assert.True(t, ok) + if rec.IsGraphRecord() { + assert.NotEmpty(t, rec.RawRequest) + assert.NotEmpty(t, rec.RawResponse) + } + } + } +} diff --git a/pumps/prometheus_test.go b/pumps/prometheus_test.go index 024d51f65..87214d55a 100644 --- a/pumps/prometheus_test.go +++ b/pumps/prometheus_test.go @@ -633,4 +633,4 @@ func TestPrometheusDisablingMetrics(t *testing.T) { assert.Contains(t, metricMap, "tyk_http_status") assert.NotContains(t, metricMap, "tyk_http_status_per_path") -} \ No newline at end of file +} diff --git a/pumps/splunk_test.go b/pumps/splunk_test.go index 6ef56e51f..22aeb000e 100644 --- a/pumps/splunk_test.go +++ b/pumps/splunk_test.go @@ -151,7 +151,7 @@ func Test_SplunkWriteDataBatch(t *testing.T) { } -//getEventBytes returns the bytes amount of the marshalled events struct +// getEventBytes returns the bytes amount of the marshalled events struct func getEventBytes(records []interface{}) int { result := 0 From 19765113c064cb7f81469c4cef5259af65cf5887 Mon Sep 17 00:00:00 2001 From: Tomas Buchaillot Date: Wed, 26 Oct 2022 18:10:38 +0200 Subject: [PATCH 009/102] TT-6890 Prometheus custom metrics env vars (#505) * linting * rollback log level * adding example in readme * adding more test scenarios * fmting --- README.md | 2 +- pumps/prometheus.go | 71 +++++++++------- pumps/prometheus_test.go | 169 ++++++++++++++++++++++++++++++--------- 3 files changed, 172 insertions(+), 70 deletions(-) diff --git a/README.md b/README.md index 9db400c0a..75affc42c 100644 --- a/README.md +++ b/README.md @@ -536,7 +536,7 @@ The available values are: `["host","method", "path", "response_code", "api_key" TYK_PMP_PUMPS_PROMETHEUS_TYPE=prometheus TYK_PMP_PUMPS_PROMETHEUS_META_ADDR=localhost:9090 TYK_PMP_PUMPS_PROMETHEUS_META_PATH=/metrics -TYK_PMP_PUMPS_PROMETHEUS_META_CUSTOMMETRICS=[] +TYK_PMP_PUMPS_PROMETHEUS_META_CUSTOMMETRICS='[{"name":"tyk_http_requests_total","description":"Total of API requests","metric_type":"counter","labels":["response_code","api_name"]}]' TYK_PMP_PUMPS_PROMETHEUS_META_DISABLED_METRICS=[] ``` diff --git a/pumps/prometheus.go b/pumps/prometheus.go index dcd54b511..20b54b51d 100644 --- a/pumps/prometheus.go +++ b/pumps/prometheus.go @@ -2,6 +2,7 @@ package pumps import ( "context" + "encoding/json" "errors" "fmt" "net/http" @@ -42,7 +43,13 @@ type PrometheusConf struct { // Metrics to exclude from exposition. Currently, excludes only the base metrics. DisabledMetrics []string `json:"disabled_metrics" mapstructure:"disabled_metrics"` // Custom Prometheus metrics. - CustomMetrics []PrometheusMetric `json:"custom_metrics" mapstructure:"custom_metrics"` + CustomMetrics CustomMetrics `json:"custom_metrics" mapstructure:"custom_metrics"` +} + +type CustomMetrics []PrometheusMetric + +func (metrics *CustomMetrics) Set(data string) error { + return json.Unmarshal([]byte(data), &metrics) } type PrometheusMetric struct { @@ -77,11 +84,15 @@ type histogramCounter struct { hits uint64 } -const COUNTER_TYPE = "counter" -const HISTOGRAM_TYPE = "histogram" +const ( + counterType = "counter" + histogramType = "histogram" +) -var prometheusPrefix = "prometheus-pump" -var prometheusDefaultENV = PUMPS_ENV_PREFIX + "_PROMETHEUS" +var ( + prometheusPrefix = "prometheus-pump" + prometheusDefaultENV = PUMPS_ENV_PREFIX + "_PROMETHEUS" +) var buckets = []float64{1, 2, 5, 7, 10, 15, 20, 25, 30, 40, 50, 60, 70, 80, 90, 100, 200, 300, 400, 500, 1000, 2000, 5000, 10000, 30000, 60000} @@ -95,38 +106,37 @@ func (p *PrometheusPump) New() Pump { // CreateBasicMetrics stores all the predefined pump metrics in allMetrics slice func (p *PrometheusPump) CreateBasicMetrics() { - - //counter metrics + // counter metrics totalStatusMetric := &PrometheusMetric{ Name: "tyk_http_status", Help: "HTTP status codes per API", - MetricType: COUNTER_TYPE, + MetricType: counterType, Labels: []string{"code", "api"}, } pathStatusMetrics := &PrometheusMetric{ Name: "tyk_http_status_per_path", Help: "HTTP status codes per API path and method", - MetricType: COUNTER_TYPE, + MetricType: counterType, Labels: []string{"code", "api", "path", "method"}, } keyStatusMetrics := &PrometheusMetric{ Name: "tyk_http_status_per_key", Help: "HTTP status codes per API key", - MetricType: COUNTER_TYPE, + MetricType: counterType, Labels: []string{"code", "key"}, } oauthStatusMetrics := &PrometheusMetric{ Name: "tyk_http_status_per_oauth_client", Help: "HTTP status codes per oAuth client id", - MetricType: COUNTER_TYPE, + MetricType: counterType, Labels: []string{"code", "client_id"}, } - //histogram metrics + // histogram metrics totalLatencyMetrics := &PrometheusMetric{ Name: "tyk_latency", Help: "Latency added by Tyk, Total Latency, and upstream latency per API", - MetricType: HISTOGRAM_TYPE, + MetricType: histogramType, Buckets: buckets, Labels: []string{"type", "api"}, } @@ -152,7 +162,6 @@ func (p *PrometheusPump) Init(conf interface{}) error { } processPumpEnvVars(p, p.log, p.conf, prometheusDefaultENV) - if p.conf.Path == "" { p.conf.Path = "/metrics" } @@ -164,7 +173,7 @@ func (p *PrometheusPump) Init(conf interface{}) error { //first we init the base metrics p.initBaseMetrics() - //then we check the custom ones + // then we check the custom ones p.InitCustomMetrics() p.log.Info("Starting prometheus listener on:", p.conf.Addr) @@ -230,17 +239,17 @@ func (p *PrometheusPump) WriteData(ctx context.Context, data []interface{}) erro default: } record := item.(analytics.AnalyticsRecord) - //we loop through all the metrics avaialble. + // we loop through all the metrics available. for _, metric := range p.allMetrics { if metric.enabled { p.log.Debug("Processing metric:", metric.Name) - //we get the values for that metric required labels + // we get the values for that metric required labels values := metric.GetLabelsValues(record) switch metric.MetricType { - case COUNTER_TYPE: + case counterType: if metric.counterVec != nil { - //if the metric is a counter, we increment the counter memory map + // if the metric is a counter, we increment the counter memory map err := metric.Inc(values...) if err != nil { p.log.WithFields(logrus.Fields{ @@ -249,9 +258,9 @@ func (p *PrometheusPump) WriteData(ctx context.Context, data []interface{}) erro }).Error("error incrementing prometheus metric value:", err) } } - case HISTOGRAM_TYPE: + case histogramType: if metric.histogramVec != nil { - //if the metric is an histogram, we Observe the request time with the given values + // if the metric is an histogram, we Observe the request time with the given values err := metric.Observe(record.RequestTime, values...) if err != nil { p.log.WithFields(logrus.Fields{ @@ -267,7 +276,7 @@ func (p *PrometheusPump) WriteData(ctx context.Context, data []interface{}) erro } } - //after looping through all the analytics records, we expose the metrics to prometheus endpoint + // after looping through all the analytics records, we expose the metrics to prometheus endpoint for _, customMetric := range p.allMetrics { err := customMetric.Expose() if err != nil { @@ -287,7 +296,7 @@ func (p *PrometheusPump) WriteData(ctx context.Context, data []interface{}) erro // if the metric_type is anything else it returns an error func (pm *PrometheusMetric) InitVec() error { switch pm.MetricType { - case COUNTER_TYPE: + case counterType: pm.counterVec = prometheus.NewCounterVec( prometheus.CounterOpts{ Name: pm.Name, @@ -297,7 +306,7 @@ func (pm *PrometheusMetric) InitVec() error { ) pm.counterMap = make(map[string]uint64) prometheus.MustRegister(pm.counterVec) - case HISTOGRAM_TYPE: + case histogramType: bkts := pm.Buckets if len(bkts) == 0 { bkts = buckets @@ -325,7 +334,7 @@ func (pm *PrometheusMetric) InitVec() error { // EnsureLabels ensure the data validity and consistency of the metric labels func (pm *PrometheusMetric) ensureLabels() { // for histograms we need to be sure that type was added - if pm.MetricType == HISTOGRAM_TYPE { + if pm.MetricType == histogramType { // remove all references to `type` var i int for _, label := range pm.Labels { @@ -377,7 +386,7 @@ func (pm *PrometheusMetric) GetLabelsValues(decoded analytics.AnalyticsRecord) [ // Inc is going to fill counterMap and histogramMap with the data from record. func (pm *PrometheusMetric) Inc(values ...string) error { switch pm.MetricType { - case COUNTER_TYPE: + case counterType: pm.counterMap[strings.Join(values, "--")] += 1 default: return errors.New("invalid metric type:" + pm.MetricType) @@ -389,7 +398,7 @@ func (pm *PrometheusMetric) Inc(values ...string) error { // Observe will fill hitogramMap with the sum of totalRequest and hits per label value if aggregate_observations is true. If aggregate_observations is set to false (default) it will execute prometheus Observe directly. func (pm *PrometheusMetric) Observe(requestTime int64, values ...string) error { switch pm.MetricType { - case HISTOGRAM_TYPE: + case histogramType: labelValues := []string{"total"} labelValues = append(labelValues, values...) if pm.aggregatedObservations { @@ -416,19 +425,19 @@ func (pm *PrometheusMetric) Observe(requestTime int64, values ...string) error { } // Expose executes prometheus library functions using the counter/histogram vector from the PrometheusMetric struct. -// If the PrometheusMetric is COUNTER_TYPE, it will execute prometheus client Add function to add the counters from counterMap to the labels value metric -// If the PrometheusMetric is HISTOGRAM_TYPE and aggregate_observations config is true, it will calculate the average value of the metrics in the histogramMap and execute prometheus Observe. +// If the PrometheusMetric is counterType, it will execute prometheus client Add function to add the counters from counterMap to the labels value metric +// If the PrometheusMetric is histogramType and aggregate_observations config is true, it will calculate the average value of the metrics in the histogramMap and execute prometheus Observe. // If aggregate_observations is false, it won't do anything since it means that we already exposed the metric. func (pm *PrometheusMetric) Expose() error { switch pm.MetricType { - case COUNTER_TYPE: + case counterType: for key, value := range pm.counterMap { labelsValue := strings.Split(key, "--") pm.counterVec.WithLabelValues(labelsValue...).Add(float64(value)) } pm.counterMap = make(map[string]uint64) - case HISTOGRAM_TYPE: + case histogramType: if pm.aggregatedObservations { for key, value := range pm.histogramMap { labelsValue := strings.Split(key, "--") diff --git a/pumps/prometheus_test.go b/pumps/prometheus_test.go index 87214d55a..4d89dc2a0 100644 --- a/pumps/prometheus_test.go +++ b/pumps/prometheus_test.go @@ -3,6 +3,7 @@ package pumps import ( "errors" "io" + "os" "testing" "github.com/TykTechnologies/tyk-pump/analytics" @@ -22,7 +23,7 @@ func TestPrometheusInitVec(t *testing.T) { testName: "Counter metric", customMetric: PrometheusMetric{ Name: "testCounterMetric", - MetricType: COUNTER_TYPE, + MetricType: counterType, Labels: []string{"response_code", "api_id"}, }, expectedErr: nil, @@ -32,7 +33,7 @@ func TestPrometheusInitVec(t *testing.T) { testName: "Histogram metric", customMetric: PrometheusMetric{ Name: "testHistogramMetric", - MetricType: HISTOGRAM_TYPE, + MetricType: histogramType, Labels: []string{"type", "api_id"}, }, expectedErr: nil, @@ -42,7 +43,7 @@ func TestPrometheusInitVec(t *testing.T) { testName: "Histogram metric without type label set", customMetric: PrometheusMetric{ Name: "testHistogramMetricWithoutTypeSet", - MetricType: HISTOGRAM_TYPE, + MetricType: histogramType, Labels: []string{"api_id"}, }, expectedErr: nil, @@ -70,16 +71,15 @@ func TestPrometheusInitVec(t *testing.T) { assert.Equal(t, tc.isEnabled, tc.isEnabled) - if tc.customMetric.MetricType == COUNTER_TYPE { + if tc.customMetric.MetricType == counterType { assert.NotNil(t, tc.customMetric.counterVec) assert.Equal(t, tc.isEnabled, prometheus.Unregister(tc.customMetric.counterVec)) - } else if tc.customMetric.MetricType == HISTOGRAM_TYPE { + } else if tc.customMetric.MetricType == histogramType { assert.NotNil(t, tc.customMetric.histogramVec) assert.Equal(t, tc.isEnabled, prometheus.Unregister(tc.customMetric.histogramVec)) assert.Equal(t, tc.customMetric.Labels[0], "type") } - }) } } @@ -100,7 +100,7 @@ func TestPrometheusInitCustomMetrics(t *testing.T) { metrics: []PrometheusMetric{ { Name: "test", - MetricType: COUNTER_TYPE, + MetricType: counterType, Labels: []string{"api_name"}, }, }, @@ -111,12 +111,12 @@ func TestPrometheusInitCustomMetrics(t *testing.T) { metrics: []PrometheusMetric{ { Name: "test", - MetricType: COUNTER_TYPE, + MetricType: counterType, Labels: []string{"api_name"}, }, { Name: "other_test", - MetricType: COUNTER_TYPE, + MetricType: counterType, Labels: []string{"api_name", "api_key"}, }, }, @@ -127,17 +127,17 @@ func TestPrometheusInitCustomMetrics(t *testing.T) { metrics: []PrometheusMetric{ { Name: "test", - MetricType: COUNTER_TYPE, + MetricType: counterType, Labels: []string{"api_name"}, }, { Name: "other_test", - MetricType: COUNTER_TYPE, + MetricType: counterType, Labels: []string{"api_name", "api_key"}, }, { Name: "histogram_test", - MetricType: HISTOGRAM_TYPE, + MetricType: histogramType, Labels: []string{"api_name", "api_key"}, }, }, @@ -153,7 +153,7 @@ func TestPrometheusInitCustomMetrics(t *testing.T) { }, { Name: "other_test", - MetricType: COUNTER_TYPE, + MetricType: counterType, Labels: []string{"api_name", "api_key"}, }, }, @@ -169,18 +169,114 @@ func TestPrometheusInitCustomMetrics(t *testing.T) { p.conf.CustomMetrics = tc.metrics p.InitCustomMetrics() - //this function do the unregistering for the metrics in the prometheus lib. + // this function do the unregistering for the metrics in the prometheus lib. defer func() { for i := range tc.metrics { - if tc.metrics[i].MetricType == COUNTER_TYPE { + if tc.metrics[i].MetricType == counterType { prometheus.Unregister(tc.metrics[i].counterVec) - } else if tc.metrics[i].MetricType == HISTOGRAM_TYPE { + } else if tc.metrics[i].MetricType == histogramType { prometheus.Unregister(tc.metrics[i].histogramVec) } } }() assert.Equal(t, tc.expectedAllMetricsLen, len(p.allMetrics)) + }) + } +} +func TestInitCustomMetricsEnv(t *testing.T) { + tcs := []struct { + testName string + envKey string + envValue string + envPrefix string + expectedMetrics CustomMetrics + }{ + { + testName: "valid custom - coutner metric", + envPrefix: "TYK_PMP_PUMPS_PROMETHEUS_META", + envKey: "TYK_PMP_PUMPS_PROMETHEUS_META_CUSTOMMETRICS", + envValue: `[{"name":"tyk_http_requests_total","help":"Total of API requests","metric_type":"counter","labels":["response_code","api_name"]}]`, + expectedMetrics: CustomMetrics{ + PrometheusMetric{ + Name: "tyk_http_requests_total", + Help: "Total of API requests", + MetricType: counterType, + Labels: []string{"response_code", "api_name"}, + }, + }, + }, + { + testName: "valid customs - counter metric", + envPrefix: "TYK_PMP_PUMPS_PROMETHEUS_META", + envKey: "TYK_PMP_PUMPS_PROMETHEUS_META_CUSTOMMETRICS", + envValue: `[{"name":"tyk_http_requests_total","help":"Total of API requests","metric_type":"counter","labels":["response_code","api_name"]},{"name":"tyk_http_requests_total_two","help":"Total Two of API requests","metric_type":"counter","labels":["response_code","api_name"]}]`, + expectedMetrics: CustomMetrics{ + PrometheusMetric{ + Name: "tyk_http_requests_total", + Help: "Total of API requests", + MetricType: counterType, + Labels: []string{"response_code", "api_name"}, + }, + PrometheusMetric{ + Name: "tyk_http_requests_total_two", + Help: "Total Two of API requests", + MetricType: counterType, + Labels: []string{"response_code", "api_name"}, + }, + }, + }, + { + testName: "valid customs - histogram metric", + envPrefix: "TYK_PMP_PUMPS_PROMETHEUS_META", + envKey: "TYK_PMP_PUMPS_PROMETHEUS_META_CUSTOMMETRICS", + envValue: `[{"name":"tyk_http_requests_total","help":"Total of API requests","metric_type":"histogram","buckets":[100,200],"labels":["response_code","api_name"]}]`, + expectedMetrics: CustomMetrics{ + PrometheusMetric{ + Name: "tyk_http_requests_total", + Help: "Total of API requests", + MetricType: histogramType, + Buckets: []float64{100, 200}, + Labels: []string{"response_code", "api_name"}, + }, + }, + }, + { + testName: "invalid custom metric format", + envPrefix: "TYK_PMP_PUMPS_PROMETHEUS_META", + envKey: "TYK_PMP_PUMPS_PROMETHEUS_META_CUSTOMMETRICS", + envValue: `["name":"tyk_http_requests_total","help":"Total of API requests","metric_type":"histogram","buckets":[100,200],"labels":["response_code","api_name"]]`, + expectedMetrics: CustomMetrics(nil), + }, + { + testName: "invalid custom metric input", + envPrefix: "TYK_PMP_PUMPS_PROMETHEUS_META", + envKey: "TYK_PMP_PUMPS_PROMETHEUS_META_CUSTOMMETRICS", + envValue: `invalid-input`, + expectedMetrics: CustomMetrics(nil), + }, + { + testName: "empty custom metric input", + envPrefix: "TYK_PMP_PUMPS_PROMETHEUS_META", + envKey: "TYK_PMP_PUMPS_PROMETHEUS_META_CUSTOMMETRICS", + envValue: ``, + expectedMetrics: CustomMetrics(nil), + }, + } + for _, tc := range tcs { + t.Run(tc.testName, func(t *testing.T) { + err := os.Setenv(tc.envKey, tc.envValue) + assert.Nil(t, err) + defer os.Unsetenv(tc.envKey) + + pmp := &PrometheusPump{} + + pmp.log = log.WithField("prefix", prometheusPrefix) + pmp.conf = &PrometheusConf{} + pmp.conf.EnvPrefix = tc.envPrefix + processPumpEnvVars(pmp, pmp.log, pmp.conf, prometheusDefaultENV) + + assert.Equal(t, tc.expectedMetrics, pmp.conf.CustomMetrics) }) } } @@ -196,7 +292,7 @@ func TestPrometheusGetLabelsValues(t *testing.T) { testName: "tree valid labels", customMetric: PrometheusMetric{ Name: "testCounterMetric", - MetricType: COUNTER_TYPE, + MetricType: counterType, Labels: []string{"response_code", "api_id", "api_key"}, }, record: analytics.AnalyticsRecord{ @@ -210,7 +306,7 @@ func TestPrometheusGetLabelsValues(t *testing.T) { testName: "two valid labels - one wrong", customMetric: PrometheusMetric{ Name: "testCounterMetric", - MetricType: COUNTER_TYPE, + MetricType: counterType, Labels: []string{"host", "method", "randomLabel"}, }, record: analytics.AnalyticsRecord{ @@ -226,7 +322,7 @@ func TestPrometheusGetLabelsValues(t *testing.T) { testName: "situational labels names ", customMetric: PrometheusMetric{ Name: "testCounterMetric", - MetricType: COUNTER_TYPE, + MetricType: counterType, Labels: []string{"code", "api", "key"}, }, record: analytics.AnalyticsRecord{ @@ -249,7 +345,6 @@ func TestPrometheusGetLabelsValues(t *testing.T) { } func TestPrometheusCounterMetric(t *testing.T) { - tcs := []struct { testName string @@ -263,7 +358,7 @@ func TestPrometheusCounterMetric(t *testing.T) { metric: &PrometheusMetric{ Name: "tyk_http_status", Help: "HTTP status codes per API", - MetricType: COUNTER_TYPE, + MetricType: counterType, Labels: []string{"code", "api"}, }, analyticsRecords: []analytics.AnalyticsRecord{ @@ -284,7 +379,7 @@ func TestPrometheusCounterMetric(t *testing.T) { metric: &PrometheusMetric{ Name: "tyk_http_status_per_path", Help: "HTTP status codes per API path and method", - MetricType: COUNTER_TYPE, + MetricType: counterType, Labels: []string{"code", "api", "path", "method"}, }, analyticsRecords: []analytics.AnalyticsRecord{ @@ -309,7 +404,7 @@ func TestPrometheusCounterMetric(t *testing.T) { metric: &PrometheusMetric{ Name: "tyk_http_status_per_key", Help: "HTTP status codes per API key", - MetricType: COUNTER_TYPE, + MetricType: counterType, Labels: []string{"code", "key"}, }, analyticsRecords: []analytics.AnalyticsRecord{ @@ -331,7 +426,7 @@ func TestPrometheusCounterMetric(t *testing.T) { metric: &PrometheusMetric{ Name: "tyk_http_status_per_oauth_client", Help: "HTTP status codes per oAuth client id", - MetricType: COUNTER_TYPE, + MetricType: counterType, Labels: []string{"code", "client_id"}, }, analyticsRecords: []analytics.AnalyticsRecord{ @@ -353,7 +448,7 @@ func TestPrometheusCounterMetric(t *testing.T) { metric: &PrometheusMetric{ Name: "tyk_http_status_per_api_key_alias", Help: "HTTP status codes per api name and key alias", - MetricType: COUNTER_TYPE, + MetricType: counterType, Labels: []string{"code", "api", "alias"}, }, analyticsRecords: []analytics.AnalyticsRecord{ @@ -398,7 +493,6 @@ func TestPrometheusCounterMetric(t *testing.T) { } func TestPrometheusHistogramMetric(t *testing.T) { - tcs := []struct { testName string @@ -413,7 +507,7 @@ func TestPrometheusHistogramMetric(t *testing.T) { metric: &PrometheusMetric{ Name: "tyk_latency_per_api", Help: "Latency added by Tyk, Total Latency, and upstream latency per API", - MetricType: HISTOGRAM_TYPE, + MetricType: histogramType, Buckets: buckets, Labels: []string{"type", "api"}, aggregatedObservations: true, @@ -439,7 +533,7 @@ func TestPrometheusHistogramMetric(t *testing.T) { metric: &PrometheusMetric{ Name: "tyk_latency_per_api_2", Help: "Latency added by Tyk, Total Latency, and upstream latency per API", - MetricType: HISTOGRAM_TYPE, + MetricType: histogramType, Buckets: buckets, Labels: []string{"type", "api"}, aggregatedObservations: false, @@ -459,7 +553,7 @@ func TestPrometheusHistogramMetric(t *testing.T) { metric: &PrometheusMetric{ Name: "tyk_latency_per_api_method_path", Help: "Latency added by Tyk, Total Latency, and upstream latency per API_ID, Method and Path", - MetricType: HISTOGRAM_TYPE, + MetricType: histogramType, Buckets: buckets, Labels: []string{"type", "api_id", "method", "path"}, aggregatedObservations: true, @@ -536,9 +630,8 @@ func TestPrometheusCreateBasicMetrics(t *testing.T) { assert.EqualValues(t, actualMetricsNames, []string{"tyk_http_status", "tyk_http_status_per_path", "tyk_http_status_per_key", "tyk_http_status_per_oauth_client", "tyk_latency"}) - assert.Equal(t, 4, actualMetricTypeCounter[COUNTER_TYPE]) - assert.Equal(t, 1, actualMetricTypeCounter[HISTOGRAM_TYPE]) - + assert.Equal(t, 4, actualMetricTypeCounter[counterType]) + assert.Equal(t, 1, actualMetricTypeCounter[histogramType]) } func TestPrometheusEnsureLabels(t *testing.T) { @@ -551,25 +644,25 @@ func TestPrometheusEnsureLabels(t *testing.T) { { name: "histogram type, type label should be added if not exist", labels: []string{"response_code", "api_name", "method", "api_key", "alias", "path"}, - metricType: HISTOGRAM_TYPE, + metricType: histogramType, typeLabelShouldExist: true, }, { name: "counter type, type label should not be added", labels: []string{"response_code", "api_name", "method", "api_key", "alias", "path"}, - metricType: COUNTER_TYPE, + metricType: counterType, typeLabelShouldExist: false, }, { name: "histogram type, type label should not be repeated and in the 1st position", labels: []string{"type", "response_code", "api_name", "method", "api_key", "alias", "path"}, - metricType: HISTOGRAM_TYPE, + metricType: histogramType, typeLabelShouldExist: true, }, { name: "histogram type, type label should not be repeated (even if user repeated it), and always in the 1st position", labels: []string{"response_code", "api_name", "type", "method", "api_key", "alias", "path", "type"}, - metricType: HISTOGRAM_TYPE, + metricType: histogramType, typeLabelShouldExist: true, }, } @@ -618,9 +711,9 @@ func TestPrometheusDisablingMetrics(t *testing.T) { defer func() { for i := range newPump.allMetrics { - if newPump.allMetrics[i].MetricType == COUNTER_TYPE { + if newPump.allMetrics[i].MetricType == counterType { prometheus.Unregister(newPump.allMetrics[i].counterVec) - } else if newPump.allMetrics[i].MetricType == HISTOGRAM_TYPE { + } else if newPump.allMetrics[i].MetricType == histogramType { prometheus.Unregister(newPump.allMetrics[i].histogramVec) } } From d93d7794a35fe23acb53212ed32443f460183b0f Mon Sep 17 00:00:00 2001 From: Sredny M Date: Thu, 27 Oct 2022 12:39:16 -0500 Subject: [PATCH 010/102] ignore the field ApiSchema in SQL (#506) * ignore ApiSchema on sql migration --- analytics/analytics.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/analytics/analytics.go b/analytics/analytics.go index 7edbd29f3..d4e277cfb 100644 --- a/analytics/analytics.go +++ b/analytics/analytics.go @@ -69,7 +69,7 @@ type AnalyticsRecord struct { Alias string `json:"alias"` TrackPath bool `json:"track_path" gorm:"column:trackpath"` ExpireAt time.Time `bson:"expireAt" json:"expireAt"` - ApiSchema string `json:"api_schema" bson:"-" gorm:"-"` + ApiSchema string `json:"api_schema" bson:"-" gorm:"-:all"` } func (a *AnalyticsRecord) TableName() string { From a4631e82c276f2911eb5d2a397227cd7d6bd4c7c Mon Sep 17 00:00:00 2001 From: Tomas Buchaillot Date: Wed, 2 Nov 2022 12:58:13 +0100 Subject: [PATCH 011/102] adding prometheus default env var postfix (#509) --- pumps/prometheus.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pumps/prometheus.go b/pumps/prometheus.go index 20b54b51d..3e140b7e8 100644 --- a/pumps/prometheus.go +++ b/pumps/prometheus.go @@ -91,7 +91,7 @@ const ( var ( prometheusPrefix = "prometheus-pump" - prometheusDefaultENV = PUMPS_ENV_PREFIX + "_PROMETHEUS" + prometheusDefaultENV = PUMPS_ENV_PREFIX + "_PROMETHEUS" + PUMPS_ENV_META_PREFIX ) var buckets = []float64{1, 2, 5, 7, 10, 15, 20, 25, 30, 40, 50, 60, 70, 80, 90, 100, 200, 300, 400, 500, 1000, 2000, 5000, 10000, 30000, 60000} From 84f385566923cf6d95735629475d5ca62049f869 Mon Sep 17 00:00:00 2001 From: Matias <83959431+mativm02@users.noreply.github.com> Date: Wed, 2 Nov 2022 09:02:01 -0300 Subject: [PATCH 012/102] TT-3067 - Add ability to use ssl_insecure_skip_verify with Elasticsearch Pump (#502) * adding SSL fields to ElasticsearchConf struct * refactoring code * running go fmt * fixing linter error * adding unit tests * adding documentation * applying suggestions * converting function to method * linting * adding more unit tests --- pumps/elasticsearch.go | 49 ++++++++ pumps/elasticsearch_test.go | 216 ++++++++++++++++++++++++++++++++++++ 2 files changed, 265 insertions(+) create mode 100644 pumps/elasticsearch_test.go diff --git a/pumps/elasticsearch.go b/pumps/elasticsearch.go index 31bd75426..8132b6899 100644 --- a/pumps/elasticsearch.go +++ b/pumps/elasticsearch.go @@ -2,6 +2,7 @@ package pumps import ( "context" + "crypto/tls" "encoding/base64" "errors" "fmt" @@ -71,6 +72,14 @@ type ElasticsearchConf struct { Username string `json:"auth_basic_username" mapstructure:"auth_basic_username"` // Basic auth password. It's send to ES in the Authorization header as username:password encoded in base64. Password string `json:"auth_basic_password" mapstructure:"auth_basic_password"` + // Enables SSL connection. + UseSSL bool `json:"use_ssl" mapstructure:"use_ssl"` + // Controls whether the pump client verifies the Elastic Search server's certificate chain and hostname. + SSLInsecureSkipVerify bool `json:"ssl_insecure_skip_verify" mapstructure:"ssl_insecure_skip_verify"` + // Can be used to set custom certificate file for authentication with Elastic Search. + SSLCertFile string `json:"ssl_cert_file" mapstructure:"ssl_cert_file"` + // Can be used to set custom key file for authentication with Elastic Search. + SSLKeyFile string `json:"ssl_key_file" mapstructure:"ssl_key_file"` } type ElasticsearchBulkConfig struct { @@ -143,6 +152,15 @@ func (e *ElasticsearchPump) getOperator() (ElasticsearchOperator, error) { httpClient = &http.Client{Transport: &ApiKeyTransport{APIKey: conf.AuthAPIKey, APIKeyID: conf.AuthAPIKeyID}} } + if conf.UseSSL { + tlsConf, err := e.GetTLSConfig() + if err != nil { + e.log.WithError(err).Error("Failed to get TLS config") + return nil, err + } + httpClient = &http.Client{Transport: &http.Transport{TLSClientConfig: tlsConf}} + } + switch conf.Version { case "3": op := new(Elasticsearch3Operator) @@ -361,6 +379,37 @@ func (e *ElasticsearchPump) Init(config interface{}) error { return nil } +// GetTLSConfig sets the TLS config for the pump +func (e *ElasticsearchPump) GetTLSConfig() (*tls.Config, error) { + var tlsConfig *tls.Config + // If the user has not specified a CA file nor a key file, we'll use a tls config with no certs + if e.esConf.SSLCertFile == "" && e.esConf.SSLKeyFile == "" { + // #nosec G402 + tlsConfig = &tls.Config{ + InsecureSkipVerify: e.esConf.SSLInsecureSkipVerify, + } + return tlsConfig, nil + } + + // If the user has specified both a SSL cert file and a key file, we'll use them to create a tls config + if e.esConf.SSLCertFile != "" && e.esConf.SSLKeyFile != "" { + cert, err := tls.LoadX509KeyPair(e.esConf.SSLCertFile, e.esConf.SSLKeyFile) + if err != nil { + return tlsConfig, err + } + // #nosec G402 + tlsConfig = &tls.Config{ + Certificates: []tls.Certificate{cert}, + InsecureSkipVerify: e.esConf.SSLInsecureSkipVerify, + } + return tlsConfig, nil + } + + // If the user has specified a SSL cert file or a key file, but not both, we'll return an error + err := errors.New("only one of ssl_cert_file and ssl_cert_key configuration option is setted, you should set both to enable mTLS") + return tlsConfig, err +} + func (e *ElasticsearchPump) connect() { var err error diff --git a/pumps/elasticsearch_test.go b/pumps/elasticsearch_test.go new file mode 100644 index 000000000..be62c715c --- /dev/null +++ b/pumps/elasticsearch_test.go @@ -0,0 +1,216 @@ +package pumps + +import ( + "bytes" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "math/big" + "os" + "reflect" + "testing" + "time" +) + +func Test_getTLSConfig(t *testing.T) { + certFile, keyFile, err := createSelfSignedCertificate() + if err != nil { + log.Fatal(err) + } + defer os.Remove(certFile.Name()) + defer os.Remove(keyFile.Name()) + + tests := []struct { + args *ElasticsearchConf + want *tls.Config + name string + wantErr bool + }{ + { + name: "SSLCertFile, SSLKeyfile are set and InsecureSkipVerify = true", + args: &ElasticsearchConf{ + SSLCertFile: certFile.Name(), + SSLKeyFile: keyFile.Name(), + SSLInsecureSkipVerify: true, + }, + // #nosec G402 + want: &tls.Config{ + Certificates: getCertificate(certFile.Name(), keyFile.Name()), + InsecureSkipVerify: true, + }, + wantErr: false, + }, + { + // No error expected. It should fail when sending data to ES, because we're using a self-signed certificate + // and InsecureSkipVerify is false + name: "SSLCertFile, SSLKeyfile are set and InsecureSkipVerify = false", + args: &ElasticsearchConf{ + SSLCertFile: certFile.Name(), + SSLKeyFile: keyFile.Name(), + SSLInsecureSkipVerify: true, + }, + // #nosec G402 + want: &tls.Config{ + Certificates: getCertificate(certFile.Name(), keyFile.Name()), + InsecureSkipVerify: true, + }, + wantErr: false, + }, + { + name: "SSLKeyFile not set -> error expected because CertFile is set", + args: &ElasticsearchConf{ + SSLCertFile: certFile.Name(), + SSLKeyFile: "", + SSLInsecureSkipVerify: true, + }, + want: nil, + wantErr: true, + }, + { + name: "CertFile not set -> error expected because KeyFile is set", + args: &ElasticsearchConf{ + SSLCertFile: "", + SSLKeyFile: keyFile.Name(), + SSLInsecureSkipVerify: true, + }, + want: nil, + wantErr: true, + }, + { + name: "CertFile and KeyFile not set -> no error expected. It must return a tls.Config with InsecureSkipVerify = true", + args: &ElasticsearchConf{ + SSLCertFile: "", + SSLKeyFile: "", + SSLInsecureSkipVerify: true, + }, + // #nosec G402 + want: &tls.Config{ + InsecureSkipVerify: true, + }, + wantErr: false, + }, + { + name: "Invalid CertFile -> error expected.", + args: &ElasticsearchConf{ + SSLCertFile: "invalid.cert", + SSLKeyFile: "invalid.key", + SSLInsecureSkipVerify: true, + }, + // #nosec G402 + want: nil, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + pump := ElasticsearchPump{ + esConf: tt.args, + } + got, err := pump.GetTLSConfig() + if (err != nil) != tt.wantErr { + t.Errorf("GetTLSConfig() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("GetTLSConfig() = %v, want %v", got, tt.want) + } + }) + } +} + +func createSelfSignedCertificate() (*os.File, *os.File, error) { + priv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + return nil, nil, err + } + template := x509.Certificate{ + SerialNumber: big.NewInt(1), + Subject: pkix.Name{ + Organization: []string{"Testing Co"}, + }, + NotBefore: time.Now(), + NotAfter: time.Now().Add(time.Hour * 24 * 180), + + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + } + + derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, publicKey(priv), priv) + if err != nil { + return nil, nil, err + } + out := &bytes.Buffer{} + err = pem.Encode(out, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) + if err != nil { + return nil, nil, err + } + certFile, err := os.CreateTemp("", "test.*.crt") + if err != nil { + return nil, nil, err + } + _, err = certFile.Write(out.Bytes()) + if err != nil { + return nil, nil, err + } + + out.Reset() + block, err := pemBlockForKey(priv) + if err != nil { + return nil, nil, err + } + err = pem.Encode(out, block) + if err != nil { + return nil, nil, err + } + keyFile, err := os.CreateTemp("", "test.*.key") + if err != nil { + return nil, nil, err + } + _, err = keyFile.Write(out.Bytes()) + if err != nil { + return nil, nil, err + } + return certFile, keyFile, nil +} + +func publicKey(priv interface{}) interface{} { + switch k := priv.(type) { + case *rsa.PrivateKey: + return &k.PublicKey + case *ecdsa.PrivateKey: + return &k.PublicKey + default: + return nil + } +} + +func pemBlockForKey(priv interface{}) (*pem.Block, error) { + switch k := priv.(type) { + case *rsa.PrivateKey: + return &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(k)}, nil + case *ecdsa.PrivateKey: + b, err := x509.MarshalECPrivateKey(k) + if err != nil { + return nil, fmt.Errorf("Unable to marshal ECDSA private key: %v", err) + } + return &pem.Block{Type: "EC PRIVATE KEY", Bytes: b}, nil + default: + return nil, fmt.Errorf("unknown private key type") + } +} + +func getCertificate(certFile, keyFile string) []tls.Certificate { + cert, err := tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + log.Fatal(err) + } + + return []tls.Certificate{cert} +} From d13b62eb0160003896880dffee93e246ec92e36e Mon Sep 17 00:00:00 2001 From: Matias <83959431+mativm02@users.noreply.github.com> Date: Wed, 2 Nov 2022 14:10:58 -0300 Subject: [PATCH 013/102] [TT-506] - Self-Healing when hitting 16mb + making aggregation per time setting configurable (#486) * allowing multiple storedAnalyticsPerMinute options * handling max size error * refactor: removing unnecessary if sentence * refactoring changes * adding unit tests * refactoring code based on golangci-lint * making new conf compatible with old confs * linter refactoring * applying reviews * applying suggested changes * mutex + DocDB support * Revert "mutex + DocDB support" This reverts commit c317f759c8f21ac7d435bb20d90e192d7b5a7dd6. * mutex + DocDB support * fixing linter errors * fixing linter error * adding unit tests * fixing linter error * adding enable_aggregate_self_healing to readme * adding documentation * reverting changes in external file * deleting empty file * modifying readme * adding unit tests * linting unit tests * WriteData unit tests * increasing test coverage * linting --- README.md | 5 + analytics/aggregate.go | 63 +++++- analytics/aggregate_test.go | 45 +++- pumps/hybrid.go | 27 ++- pumps/mongo_aggregate.go | 97 +++++++- pumps/mongo_aggregate_test.go | 404 +++++++++++++++++++++++++++++++++- pumps/sql_aggregate.go | 12 +- 7 files changed, 626 insertions(+), 27 deletions(-) diff --git a/README.md b/README.md index 75affc42c..8695b4ba0 100644 --- a/README.md +++ b/README.md @@ -320,6 +320,8 @@ Available Mongo instances are: Standard Mongo, DocumentDB (AWS), CosmosDB (Azure "mongo_url": "mongodb://username:password@{hostname:port},{hostname:port}/{db_name}", "use_mixed_collection": true, "store_analytics_per_minute": false, + "aggregation_time": 50, + "enable_aggregate_self_healing": true, "track_all_paths": false }, "mongo-pump-selective": { @@ -343,6 +345,9 @@ TYK_PMP_PUMPS_MONGO_META_MAXDOCUMENTSIZEBYTES=20112 TYK_PMP_PUMPS_MONGOAGG_TYPE=mongo-pump-aggregate TYK_PMP_PUMPS_MONGOAGG_META_USEMIXEDCOLLECTION=true +TYK_PMP_PUMPS_MONGOAGG_META_STOREANALYTICSPERMINUTE=false +TYK_PMP_PUMPS_MONGOAGG_META_AGGREGATIONTIME=50 +TYK_PMP_PUMPS_MONGOAGG_META_ENABLESELFHEALING=true ``` ###### Mongo Graph Pump diff --git a/analytics/aggregate.go b/analytics/aggregate.go index a2f82e37d..97dfbb353 100644 --- a/analytics/aggregate.go +++ b/analytics/aggregate.go @@ -6,6 +6,7 @@ import ( "fmt" "strconv" "strings" + "sync" "time" "github.com/fatih/structs" @@ -20,6 +21,12 @@ const ( AggregateSQLTable = "tyk_aggregated" ) +// lastDocumentTimestamp is a map to store the last document timestamps of different Mongo Aggregators +var lastDocumentTimestamp = make(map[string]time.Time) + +// mutex is used to prevent concurrent writes to the same key +var mutex sync.RWMutex + type ErrorData struct { Code string Count int @@ -537,7 +544,7 @@ func replaceUnsupportedChars(path string) string { } // AggregateData calculates aggregated data, returns map orgID => aggregated analytics data -func AggregateData(data []interface{}, trackAllPaths bool, ignoreTagPrefixList []string, storeAnalyticPerMinute, ignoreGraphData bool) map[string]AnalyticsRecordAggregate { +func AggregateData(data []interface{}, trackAllPaths bool, ignoreTagPrefixList []string, dbIdentifier string, aggregationTime int, ignoreGraphData bool) map[string]AnalyticsRecordAggregate { analyticsPerOrg := make(map[string]AnalyticsRecordAggregate) for _, v := range data { thisV := v.(AnalyticsRecord) @@ -559,11 +566,7 @@ func AggregateData(data []interface{}, trackAllPaths bool, ignoreTagPrefixList [ // Set the hourly timestamp & expiry asTime := thisV.TimeStamp - if storeAnalyticPerMinute { - thisAggregate.TimeStamp = time.Date(asTime.Year(), asTime.Month(), asTime.Day(), asTime.Hour(), asTime.Minute(), 0, 0, asTime.Location()) - } else { - thisAggregate.TimeStamp = time.Date(asTime.Year(), asTime.Month(), asTime.Day(), asTime.Hour(), 0, 0, 0, asTime.Location()) - } + thisAggregate.TimeStamp = setAggregateTimestamp(dbIdentifier, asTime, aggregationTime) thisAggregate.ExpireAt = thisV.ExpireAt thisAggregate.TimeID.Year = asTime.Year() thisAggregate.TimeID.Month = int(asTime.Month()) @@ -844,8 +847,50 @@ func AggregateData(data []interface{}, trackAllPaths bool, ignoreTagPrefixList [ func TrimTag(thisTag string) string { trimmedTag := strings.TrimSpace(thisTag) - if strings.Contains(trimmedTag, ".") { - trimmedTag = strings.Replace(trimmedTag, ".", "", -1) - } + trimmedTag = strings.ReplaceAll(trimmedTag, ".", "") return trimmedTag } + +// SetlastTimestampAgggregateRecord sets the last timestamp for the aggregate record +func SetlastTimestampAgggregateRecord(id string, date time.Time) { + mutex.Lock() + defer mutex.Unlock() + lastDocumentTimestamp[id] = date +} + +// getLastDocumentTimestamp gets the last timestamp for the aggregate record +func getLastDocumentTimestamp(id string) (time.Time, bool) { + mutex.RLock() + defer mutex.RUnlock() + ts, ok := lastDocumentTimestamp[id] + return ts, ok +} + +func setAggregateTimestamp(dbIdentifier string, asTime time.Time, aggregationTime int) time.Time { + // if aggregationTime is set to 60, use asTime.Hour() and group every record by hour + if aggregationTime == 60 { + return time.Date(asTime.Year(), asTime.Month(), asTime.Day(), asTime.Hour(), 0, 0, 0, asTime.Location()) + } + + // get the last document timestamp + lastDocumentTS, ok := getLastDocumentTimestamp(dbIdentifier) + emptyTime := time.Time{} + if lastDocumentTS == emptyTime || !ok { + // if it's not set, or it's empty, just set it to the current time + lastDocumentTS = time.Date(asTime.Year(), asTime.Month(), asTime.Day(), asTime.Hour(), asTime.Minute(), 0, 0, asTime.Location()) + SetlastTimestampAgggregateRecord(dbIdentifier, lastDocumentTS) + } + if dbIdentifier != "" { + // if aggregationTime != 60 and the database is Mongo (because we have an identifier): + if lastDocumentTS.Add(time.Minute * time.Duration(aggregationTime)).After(asTime) { + // if the last record timestamp + aggregationTime setting is after the current time, just add the new record to the current document + return lastDocumentTS + } + // if last record timestamp + amount of minutes set is before current time, just create a new record + newTime := time.Date(asTime.Year(), asTime.Month(), asTime.Day(), asTime.Hour(), asTime.Minute(), 0, 0, asTime.Location()) + SetlastTimestampAgggregateRecord(dbIdentifier, newTime) + return newTime + } + // if aggregationTime is set to 1 and DB is not Mongo, use asTime.Minute() and group every record by minute + return time.Date(asTime.Year(), asTime.Month(), asTime.Day(), asTime.Hour(), asTime.Minute(), 0, 0, asTime.Location()) +} diff --git a/analytics/aggregate_test.go b/analytics/aggregate_test.go index 6e92b6f97..eab26d9fd 100644 --- a/analytics/aggregate_test.go +++ b/analytics/aggregate_test.go @@ -2,6 +2,7 @@ package analytics import ( "testing" + "time" "github.com/stretchr/testify/assert" ) @@ -56,7 +57,7 @@ func TestAggregate_Tags(t *testing.T) { } func runTestAggregatedTags(t *testing.T, name string, records []interface{}) { - aggregations := AggregateData(records, false, []string{}, false, false) + aggregations := AggregateData(records, false, []string{}, "", 60, false) t.Run(name, func(t *testing.T) { for _, aggregation := range aggregations { @@ -79,7 +80,7 @@ func TestAggregateData_SkipGraphRecords(t *testing.T) { for i := range records { data[i] = records[i] } - aggregatedData := AggregateData(data, true, nil, true, true) + aggregatedData := AggregateData(data, true, nil, "", 1, true) assert.Equal(t, expectedAggregatedRecordCount, len(aggregatedData)) for _, expectedExistingOrgKey := range expectedExistingOrgKeys { _, exists := aggregatedData[expectedExistingOrgKey] @@ -129,3 +130,43 @@ func TestAggregateData_SkipGraphRecords(t *testing.T) { []string{"777-graph", "555-graph"}, )) } + +func TestSetAggregateTimestamp(t *testing.T) { + asTime := time.Now() + + tests := []struct { + ExpectedTime time.Time + testName string + DBIdentifier string + AggregationTime int + }{ + { + testName: "AggregationTime is 60", + AggregationTime: 60, + DBIdentifier: "testing-mongo", + ExpectedTime: time.Date(asTime.Year(), asTime.Month(), asTime.Day(), asTime.Hour(), 0, 0, 0, asTime.Location()), + }, + { + testName: "AggregationTime is 1", + AggregationTime: 1, + DBIdentifier: "testing-mongo", + ExpectedTime: time.Date(asTime.Year(), asTime.Month(), asTime.Day(), asTime.Hour(), asTime.Minute(), 0, 0, asTime.Location()), + }, + { + testName: "AggregationTime is 40", + AggregationTime: 40, + DBIdentifier: "testing-mongo", + ExpectedTime: time.Date(asTime.Year(), asTime.Month(), asTime.Day(), asTime.Hour(), asTime.Minute(), 0, 0, asTime.Location()), + }, + } + for _, test := range tests { + t.Run(test.testName, func(t *testing.T) { + ts := setAggregateTimestamp(test.DBIdentifier, asTime, test.AggregationTime) + assert.Equal(t, test.ExpectedTime, ts) + }) + } + + SetlastTimestampAgggregateRecord("testing-setLastTimestamp", time.Now().Add(-time.Minute*10)) + ts := setAggregateTimestamp("testing-setLastTimestamp", asTime, 7) + assert.Equal(t, time.Date(asTime.Year(), asTime.Month(), asTime.Day(), asTime.Hour(), asTime.Minute(), 0, 0, asTime.Location()), ts) +} diff --git a/pumps/hybrid.go b/pumps/hybrid.go index b12c6eaa7..196e2946c 100644 --- a/pumps/hybrid.go +++ b/pumps/hybrid.go @@ -43,10 +43,12 @@ var ( // HybridPump allows to send analytics to MDCB over RPC type HybridPump struct { - aggregated bool - trackAllPaths bool - storeAnalyticPerMinute bool - ignoreTagPrefixList []string + aggregated bool + trackAllPaths bool + storeAnalyticsPerMinute bool + aggregationTime int + enableAggregateSelfHealing bool + ignoreTagPrefixList []string CommonPumpConfig rpcConfig rpc.Config } @@ -128,8 +130,19 @@ func (p *HybridPump) Init(config interface{}) error { p.trackAllPaths = trackAllPaths.(bool) } - if storeAnalyticPerMinute, ok := meta["store_analytics_per_minute"]; ok { - p.storeAnalyticPerMinute = storeAnalyticPerMinute.(bool) + if storeAnalyticsPerMinute, ok := meta["store_analytics_per_minute"].(bool); ok { + p.storeAnalyticsPerMinute = storeAnalyticsPerMinute + p.aggregationTime = 1 + } else { + aggregationTime, ok := meta["aggregation_time"].(int) + if !ok || aggregationTime > 60 || aggregationTime < 1 { + p.log.Warnf("aggregation_time should be between 1 and 60, Found: %v. The default value will be used (60 minutes)", aggregationTime) + p.aggregationTime = 60 + } + } + + if enableAggregateSelfHealing, ok := meta["enable_aggregate_self_healing"].(bool); ok { + p.enableAggregateSelfHealing = enableAggregateSelfHealing } if list, ok := meta["ignore_tag_prefix_list"]; ok { @@ -199,7 +212,7 @@ func (p *HybridPump) WriteData(ctx context.Context, data []interface{}) error { } } else { // send aggregated data // calculate aggregates - aggregates := analytics.AggregateData(data, p.trackAllPaths, p.ignoreTagPrefixList, p.storeAnalyticPerMinute, false) + aggregates := analytics.AggregateData(data, p.trackAllPaths, p.ignoreTagPrefixList, p.rpcConfig.ConnectionString, p.aggregationTime, false) // turn map with analytics aggregates into JSON payload jsonData, err := json.Marshal(aggregates) diff --git a/pumps/mongo_aggregate.go b/pumps/mongo_aggregate.go index 683251eaa..697fee734 100644 --- a/pumps/mongo_aggregate.go +++ b/pumps/mongo_aggregate.go @@ -48,8 +48,15 @@ type MongoAggregateConf struct { // it will print an alert. // Defaults to 1000. ThresholdLenTagList int `json:"threshold_len_tag_list" mapstructure:"threshold_len_tag_list"` - // Determines if the aggregations should be made per minute instead of per hour. + // Determines if the aggregations should be made per minute (true) or per hour (false). StoreAnalyticsPerMinute bool `json:"store_analytics_per_minute" mapstructure:"store_analytics_per_minute"` + // Determines the amount of time the aggregations should be made (in minutes). It defaults to the max value is 60 and the minimum is 1. + // If StoreAnalyticsPerMinute is set to true, this field will be skipped. + AggregationTime int `json:"aggregation_time" mapstructure:"aggregation_time"` + // Determines if the self healing will be activated or not. + // Self Healing allows pump to handle Mongo document's max-size errors by creating a new document when the max-size is reached. + // It also divide by 2 the AggregationTime field to avoid the same error in the future. + EnableAggregateSelfHealing bool `json:"enable_aggregate_self_healing" mapstructure:"enable_aggregate_self_healing"` // This list determines which aggregations are going to be dropped and not stored in the collection. // Posible values are: "APIID","errors","versions","apikeys","oauthids","geo","tags","endpoints","keyendpoints", // "oauthendpoints", and "apiendpoints". @@ -170,12 +177,23 @@ func (m *MongoAggregatePump) Init(config interface{}) error { if m.dbConf.ThresholdLenTagList == 0 { m.dbConf.ThresholdLenTagList = THRESHOLD_LEN_TAG_LIST } + m.SetAggregationTime() m.connect() m.log.Debug("MongoDB DB CS: ", m.dbConf.GetBlurredURL()) m.log.Info(m.GetName() + " Initialized") + // look for the last record timestamp stored in the collection + lastTimestampAgggregateRecord, err := getLastDocumentTimestamp(m.dbSession, analytics.AgggregateMixedCollectionName) + + // we will set it to the lastDocumentTimestamp map to track the timestamp of different documents of different Mongo Aggregators + if err != nil { + m.log.Debug("Last document timestamp not found:", err) + } else { + analytics.SetlastTimestampAgggregateRecord(m.dbConf.MongoURL, lastTimestampAgggregateRecord) + } + return nil } @@ -261,12 +279,20 @@ func (m *MongoAggregatePump) WriteData(ctx context.Context, data []interface{}) m.WriteData(ctx, data) } else { // calculate aggregates - analyticsPerOrg := analytics.AggregateData(data, m.dbConf.TrackAllPaths, m.dbConf.IgnoreTagPrefixList, m.dbConf.StoreAnalyticsPerMinute, true) - + analyticsPerOrg := analytics.AggregateData(data, m.dbConf.TrackAllPaths, m.dbConf.IgnoreTagPrefixList, m.dbConf.MongoURL, m.dbConf.AggregationTime, true) // put aggregated data into MongoDB for orgID, filteredData := range analyticsPerOrg { err := m.DoAggregatedWriting(ctx, orgID, filteredData) if err != nil { + // checking if the error is related to the document size and AggregateSelfHealing is enabled + if shouldSelfHeal := m.ShouldSelfHeal(err); shouldSelfHeal { + // executing the function again with the new AggregationTime setting + newErr := m.WriteData(ctx, data) + if newErr == nil { + m.log.Info("Self-healing successful") + } + return newErr + } return err } @@ -325,7 +351,6 @@ func (m *MongoAggregatePump) DoAggregatedWriting(ctx context.Context, orgID stri m.log.Info("No OrgID for AnalyticsRecord, skipping") return nil } - thisSession := m.dbSession.Copy() defer thisSession.Close() @@ -355,7 +380,6 @@ func (m *MongoAggregatePump) DoAggregatedWriting(ctx context.Context, orgID stri doc := analytics.AnalyticsRecordAggregate{} _, err := analyticsCollection.Find(query).Apply(change, &doc) - if err != nil { m.log.WithField("query", query).Error("UPSERT Failure: ", err) return m.HandleWriteErr(err) @@ -416,3 +440,66 @@ func (m *MongoAggregatePump) collectionExists(name string) (bool, error) { func (m *MongoAggregatePump) WriteUptimeData(data []interface{}) { m.log.Warning("Mongo Aggregate should not be writing uptime data!") } + +// getLastDocumentTimestamp will return the timestamp of the last document in the collection +func getLastDocumentTimestamp(session *mgo.Session, collectionName string) (time.Time, error) { + var doc bson.M + err := session.DB("").C(collectionName).Find(nil).Sort("-$natural").One(&doc) + if err != nil { + return time.Time{}, err + } + if ts, ok := doc["timestamp"].(time.Time); ok { + return ts, nil + } + return time.Time{}, errors.New("timestamp of type: time.Time not found in bson map") +} + +// divideAggregationTime divides by two the analytics stored per minute setting +func (m *MongoAggregatePump) divideAggregationTime() { + if m.dbConf.AggregationTime == 1 { + m.log.Debug("Analytics Stored Per Minute is set to 1, unable to divide") + return + } + oldAggTime := m.dbConf.AggregationTime + m.dbConf.AggregationTime /= 2 + m.log.Warn("Analytics Stored Per Minute dicreased from ", oldAggTime, " to ", m.dbConf.AggregationTime) +} + +// ShouldSelfHeal returns true if the pump should self heal +func (m *MongoAggregatePump) ShouldSelfHeal(err error) bool { + const StandardMongoSizeError = "Size must be between 0 and" + const CosmosSizeError = "Request size is too large" + const DocDBSizeError = "Resulting document after update is larger than" + + if m.dbConf.EnableAggregateSelfHealing { + if strings.Contains(err.Error(), StandardMongoSizeError) || strings.Contains(err.Error(), CosmosSizeError) || strings.Contains(err.Error(), DocDBSizeError) { + // if the AggregationTime setting is already set to 1, we can't do anything else + if m.dbConf.AggregationTime == 1 { + m.log.Warning("AggregationTime is equal to 1 minute, unable to reduce it further. Skipping self-healing.") + return false + } + m.log.Warning("Detected document size failure, attempting to create a new document and reduce the number of analytics stored per minute") + // dividing the AggregationTime by 2 to reduce the number of analytics stored per minute + m.divideAggregationTime() + // resetting the lastDocumentTimestamp, this will create a new document with the current timestamp + analytics.SetlastTimestampAgggregateRecord(m.dbConf.MongoURL, time.Time{}) + return true + } + } + return false +} + +// SetAggregationTime sets the aggregation time for the pump +func (m *MongoAggregatePump) SetAggregationTime() { + // if StoreAnalyticsPerMinute is set to true, the aggregation time will be set to 1. + // if not, the aggregation time will be set to the value of the field AggregationTime. + // if there is no value for AggregationTime, it will be set to 60. + + if m.dbConf.StoreAnalyticsPerMinute { + m.log.Info("StoreAnalyticsPerMinute is set to true. Pump will aggregate data every minute.") + m.dbConf.AggregationTime = 1 + } else if m.dbConf.AggregationTime < 1 || m.dbConf.AggregationTime > 60 { + m.log.Warn("AggregationTime is not set or is not between 1 and 60. Defaulting to 60") + m.dbConf.AggregationTime = 60 + } +} diff --git a/pumps/mongo_aggregate_test.go b/pumps/mongo_aggregate_test.go index bed4629d6..cb9a3a5ab 100644 --- a/pumps/mongo_aggregate_test.go +++ b/pumps/mongo_aggregate_test.go @@ -2,10 +2,14 @@ package pumps import ( "context" + "errors" + "strings" "testing" "time" "github.com/TykTechnologies/tyk-pump/analytics" + "github.com/TykTechnologies/tyk-pump/analytics/demo" + "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "gopkg.in/mgo.v2/bson" ) @@ -15,10 +19,12 @@ func TestDoAggregatedWritingWithIgnoredAggregations(t *testing.T) { cfgPump1["mongo_url"] = "mongodb://localhost:27017/tyk_analytics" cfgPump1["ignore_aggregations"] = []string{"apikeys"} cfgPump1["use_mixed_collection"] = true + cfgPump1["store_analytics_per_minute"] = false cfgPump2 := make(map[string]interface{}) cfgPump2["mongo_url"] = "mongodb://localhost:27017/tyk_analytics" cfgPump2["use_mixed_collection"] = true + cfgPump2["store_analytics_per_minute"] = false pmp1 := MongoAggregatePump{} pmp2 := MongoAggregatePump{} @@ -92,7 +98,6 @@ func TestDoAggregatedWritingWithIgnoredAggregations(t *testing.T) { collectionName, collErr = pmp1.GetCollectionName("123") assert.Nil(t, collErr) } - thisSession := pmp1.dbSession.Copy() defer thisSession.Close() @@ -125,3 +130,400 @@ func TestDoAggregatedWritingWithIgnoredAggregations(t *testing.T) { }) } } + +func TestAggregationTime(t *testing.T) { + cfgPump1 := make(map[string]interface{}) + cfgPump1["mongo_url"] = "mongodb://localhost:27017/tyk_analytics" + cfgPump1["ignore_aggregations"] = []string{"apikeys"} + cfgPump1["use_mixed_collection"] = true + + pmp1 := MongoAggregatePump{} + + timeNow := time.Now() + keys := make([]interface{}, 1) + keys[0] = analytics.AnalyticsRecord{APIID: "api1", OrgID: "123", TimeStamp: timeNow, APIKey: "apikey1"} + + tests := []struct { + testName string + AggregationTime int + WantedNumberOfRecords int + }{ + { + testName: "create record every 60 minutes - 180 minutes hitting the API", + AggregationTime: 60, + WantedNumberOfRecords: 3, + }, + { + testName: "create new record every 30 minutes - 120 minutes hitting the API", + AggregationTime: 30, + WantedNumberOfRecords: 4, + }, + { + testName: "create new record every 15 minutes - 90 minutes hitting the API", + AggregationTime: 15, + WantedNumberOfRecords: 6, + }, + { + testName: "create new record every 7 minutes - 28 minutes hitting the API", + AggregationTime: 7, + WantedNumberOfRecords: 4, + }, + { + testName: "create new record every 3 minutes - 24 minutes hitting the API", + AggregationTime: 3, + WantedNumberOfRecords: 8, + }, + { + testName: "create new record every minute - 10 minutes hitting the API", + AggregationTime: 1, + WantedNumberOfRecords: 10, + }, + } + for _, test := range tests { + t.Run(test.testName, func(t *testing.T) { + cfgPump1["aggregation_time"] = test.AggregationTime + errInit1 := pmp1.Init(cfgPump1) + if errInit1 != nil { + t.Error(errInit1) + return + } + + defer func() { + // we clean the db after we finish every test case + sess := pmp1.dbSession.Copy() + defer sess.Close() + + if err := sess.DB("").DropDatabase(); err != nil { + panic(err) + } + }() + + ctx := context.TODO() + for i := 0; i < test.WantedNumberOfRecords; i++ { + for index := 0; index < test.AggregationTime; index++ { + errWrite := pmp1.WriteData(ctx, keys) + if errWrite != nil { + t.Fatal("Mongo Aggregate Pump couldn't write records with err:", errWrite) + } + } + timeNow = timeNow.Add(time.Minute * time.Duration(test.AggregationTime)) + keys[0] = analytics.AnalyticsRecord{APIID: "api1", OrgID: "123", TimeStamp: timeNow, APIKey: "apikey1"} + } + + collectionName := analytics.AgggregateMixedCollectionName + + thisSession := pmp1.dbSession.Copy() + defer thisSession.Close() + + analyticsCollection := thisSession.DB("").C(collectionName) + + query := bson.M{ + "orgid": "123", + } + + results := []analytics.AnalyticsRecordAggregate{} + // fetch the results + errFind := analyticsCollection.Find(query).All(&results) + assert.Nil(t, errFind) + + // double check that the res is not nil + assert.NotNil(t, results) + + // checking if we have the correct number of records + assert.Len(t, results, test.WantedNumberOfRecords) + + // validate totals + for _, res := range results { + assert.NotNil(t, res.Total) + } + }) + } +} + +func TestMongoAggregatePump_divideAggregationTime(t *testing.T) { + tests := []struct { + name string + currentAggregationTime int + newAggregationTime int + }{ + { + name: "divide 60 minutes (even number)", + currentAggregationTime: 60, + newAggregationTime: 30, + }, + { + name: "divide 15 minutes (odd number)", + currentAggregationTime: 15, + newAggregationTime: 7, + }, + { + name: "divide 1 minute (must return 1)", + currentAggregationTime: 1, + newAggregationTime: 1, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + dbConf := &MongoAggregateConf{ + AggregationTime: tt.currentAggregationTime, + } + + commonPumpConfig := CommonPumpConfig{ + log: logrus.NewEntry(logrus.New()), + } + + m := &MongoAggregatePump{ + dbConf: dbConf, + CommonPumpConfig: commonPumpConfig, + } + m.divideAggregationTime() + + assert.Equal(t, tt.newAggregationTime, m.dbConf.AggregationTime) + }) + } +} + +func TestMongoAggregatePump_SelfHealing(t *testing.T) { + cfgPump1 := make(map[string]interface{}) + cfgPump1["mongo_url"] = "mongodb://localhost:27017/tyk_analytics" + cfgPump1["ignore_aggregations"] = []string{"apikeys"} + cfgPump1["use_mixed_collection"] = true + cfgPump1["aggregation_time"] = 60 + cfgPump1["enable_aggregate_self_healing"] = true + + pmp1 := MongoAggregatePump{} + + errInit1 := pmp1.Init(cfgPump1) + if errInit1 != nil { + t.Error(errInit1) + return + } + + defer func() { + // we clean the db after we finish the test + // we use pmp1 session since it should be the same + sess := pmp1.dbSession.Copy() + defer sess.Close() + + if err := sess.DB("").DropDatabase(); err != nil { + panic(err) + } + }() + + var count int + var set []interface{} + for { + count++ + record := demo.GenerateRandomAnalyticRecord("org123") + set = append(set, record) + if count == 1000 { + err := pmp1.WriteData(context.TODO(), set) + if err != nil { + // checking if the error is related to the size of the document (standard Mongo) + contains := strings.Contains(err.Error(), "Size must be between 0 and") + assert.True(t, contains) + // If we get an error, is because aggregation time is equal to 1, and self healing can't divide it + assert.Equal(t, 1, pmp1.dbConf.AggregationTime) + + // checking lastDocumentTimestamp + ts, err := getLastDocumentTimestamp(pmp1.dbSession, "tyk_analytics_aggregates") + assert.Nil(t, err) + assert.NotNil(t, ts) + break + } + count = 0 + } + } +} + +func TestMongoAggregatePump_ShouldSelfHeal(t *testing.T) { + type fields struct { + dbConf *MongoAggregateConf + CommonPumpConfig CommonPumpConfig + } + + // dbConf - EnableAggregateSelfHealing / AggregationTime / MongoURL / Log + + tests := []struct { + fields fields + inputErr error + name string + want bool + }{ + { + name: "random error", + fields: fields{ + dbConf: &MongoAggregateConf{ + EnableAggregateSelfHealing: true, + AggregationTime: 60, + BaseMongoConf: BaseMongoConf{ + MongoURL: "mongodb://localhost:27017", + }, + }, + CommonPumpConfig: CommonPumpConfig{ + log: logrus.NewEntry(logrus.New()), + }, + }, + inputErr: errors.New("random error"), + want: false, + }, + { + name: "CosmosSizeError error", + fields: fields{ + dbConf: &MongoAggregateConf{ + EnableAggregateSelfHealing: true, + AggregationTime: 60, + BaseMongoConf: BaseMongoConf{ + MongoURL: "mongodb://localhost:27017", + }, + }, + CommonPumpConfig: CommonPumpConfig{ + log: logrus.NewEntry(logrus.New()), + }, + }, + inputErr: errors.New("Request size is too large"), + want: true, + }, + { + name: "StandardMongoSizeError error", + fields: fields{ + dbConf: &MongoAggregateConf{ + EnableAggregateSelfHealing: true, + AggregationTime: 60, + BaseMongoConf: BaseMongoConf{ + MongoURL: "mongodb://localhost:27017", + }, + }, + CommonPumpConfig: CommonPumpConfig{ + log: logrus.NewEntry(logrus.New()), + }, + }, + inputErr: errors.New("Size must be between 0 and"), + want: true, + }, + { + name: "DocDBSizeError error", + fields: fields{ + dbConf: &MongoAggregateConf{ + EnableAggregateSelfHealing: true, + AggregationTime: 60, + BaseMongoConf: BaseMongoConf{ + MongoURL: "mongodb://localhost:27017", + }, + }, + CommonPumpConfig: CommonPumpConfig{ + log: logrus.NewEntry(logrus.New()), + }, + }, + inputErr: errors.New("Resulting document after update is larger than"), + want: true, + }, + { + name: "StandardMongoSizeError error but self healing disabled", + fields: fields{ + dbConf: &MongoAggregateConf{ + EnableAggregateSelfHealing: false, + AggregationTime: 60, + BaseMongoConf: BaseMongoConf{ + MongoURL: "mongodb://localhost:27017", + }, + }, + CommonPumpConfig: CommonPumpConfig{ + log: logrus.NewEntry(logrus.New()), + }, + }, + inputErr: errors.New("Size must be between 0 and"), + want: false, + }, + { + name: "StandardMongoSizeError error but aggregation time is 1", + fields: fields{ + dbConf: &MongoAggregateConf{ + EnableAggregateSelfHealing: true, + AggregationTime: 1, + BaseMongoConf: BaseMongoConf{ + MongoURL: "mongodb://localhost:27017", + }, + }, + CommonPumpConfig: CommonPumpConfig{ + log: logrus.NewEntry(logrus.New()), + }, + }, + inputErr: errors.New("Size must be between 0 and"), + want: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m := &MongoAggregatePump{ + dbConf: tt.fields.dbConf, + CommonPumpConfig: tt.fields.CommonPumpConfig, + } + if got := m.ShouldSelfHeal(tt.inputErr); got != tt.want { + t.Errorf("MongoAggregatePump.ShouldSelfHeal() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestMongoAggregatePump_HandleWriteErr(t *testing.T) { + cfgPump1 := make(map[string]interface{}) + cfgPump1["mongo_url"] = "mongodb://localhost:27017/tyk_analytics" + cfgPump1["ignore_aggregations"] = []string{"apikeys"} + cfgPump1["use_mixed_collection"] = true + cfgPump1["store_analytics_per_minute"] = false + pmp1 := MongoAggregatePump{} + + errInit1 := pmp1.Init(cfgPump1) + if errInit1 != nil { + t.Error(errInit1) + return + } + + tests := []struct { + inputErr error + name string + wantErr bool + }{ + { + name: "nil error", + inputErr: nil, + wantErr: false, + }, + { + name: "random error", + inputErr: errors.New("random error"), + wantErr: true, + }, + { + name: "EOF error", + inputErr: errors.New("EOF"), + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if err := pmp1.HandleWriteErr(tt.inputErr); (err != nil) != tt.wantErr { + t.Errorf("MongoAggregatePump.HandleWriteErr() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func TestMongoAggregatePump_StoreAnalyticsPerMinute(t *testing.T) { + cfgPump1 := make(map[string]interface{}) + cfgPump1["mongo_url"] = "mongodb://localhost:27017/tyk_analytics" + cfgPump1["ignore_aggregations"] = []string{"apikeys"} + cfgPump1["use_mixed_collection"] = true + cfgPump1["store_analytics_per_minute"] = true + cfgPump1["aggregation_time"] = 45 + pmp1 := MongoAggregatePump{} + + errInit1 := pmp1.Init(cfgPump1) + if errInit1 != nil { + t.Error(errInit1) + return + } + // Checking if the aggregation time is set to 1. Doesn't matter if aggregation_time is equal to 45 or 1, the result should be always 1. + assert.True(t, pmp1.dbConf.AggregationTime == 1) +} diff --git a/pumps/sql_aggregate.go b/pumps/sql_aggregate.go index 65837bb08..b34d98a78 100644 --- a/pumps/sql_aggregate.go +++ b/pumps/sql_aggregate.go @@ -33,9 +33,7 @@ type SQLAggregatePumpConf struct { type SQLAggregatePump struct { CommonPumpConfig - SQLConf *SQLAggregatePumpConf - db *gorm.DB dbType string dialect gorm.Dialector @@ -152,7 +150,15 @@ func (c *SQLAggregatePump) WriteData(ctx context.Context, data []interface{}) er table = analytics.AggregateSQLTable } - analyticsPerOrg := analytics.AggregateData(data[startIndex:endIndex], c.SQLConf.TrackAllPaths, c.SQLConf.IgnoreTagPrefixList, c.SQLConf.StoreAnalyticsPerMinute, false) + // if StoreAnalyticsPerMinute is set to true, we will create new documents with records every 1 minute + var aggregationTime int + if c.SQLConf.StoreAnalyticsPerMinute { + aggregationTime = 1 + } else { + aggregationTime = 60 + } + + analyticsPerOrg := analytics.AggregateData(data[startIndex:endIndex], c.SQLConf.TrackAllPaths, c.SQLConf.IgnoreTagPrefixList, "", aggregationTime, false) for orgID, ag := range analyticsPerOrg { From 8e42170ee1993ddb94802bbca26b1714ae1c49e8 Mon Sep 17 00:00:00 2001 From: Kofo Okesola Date: Thu, 3 Nov 2022 12:56:51 +0100 Subject: [PATCH 014/102] [TT-6012] fix edge case where the query is to an unresolved subgraph schema (#511) * fix edge case where the query is to an unresolved subgraph schema * fixed tests * added tests for new record logic --- analytics/graph_record.go | 4 ++ analytics/graph_record_test.go | 67 ++++++++++++++++++++++++++++++++++ pumps/graph_mongo.go | 21 ++++++++--- pumps/graph_mongo_test.go | 17 ++++++++- 4 files changed, 102 insertions(+), 7 deletions(-) diff --git a/analytics/graph_record.go b/analytics/graph_record.go index ddfb7500f..1dcc5de16 100644 --- a/analytics/graph_record.go +++ b/analytics/graph_record.go @@ -85,6 +85,10 @@ func (a *AnalyticsRecord) ToGraphRecord() (GraphRecord, error) { } typesToFieldsMap := make(map[string][]string) for fieldRef, typeDefRef := range fieldTypeList { + if typeDefRef == ast.InvalidRef { + err = errors.New("invalid selection set field type") + return record, err + } extractTypesAndFields(fieldRef, typeDefRef, typesToFieldsMap, request, schema) } record.Types = typesToFieldsMap diff --git a/analytics/graph_record_test.go b/analytics/graph_record_test.go index a2788320c..5f68a376b 100644 --- a/analytics/graph_record_test.go +++ b/analytics/graph_record_test.go @@ -19,6 +19,55 @@ const ( responseTemplate = "HTTP/0.0 200 OK\r\nContent-Length: %d\r\nConnection: close\r\nContent-Type: application/json\r\n\r\n%s" ) +const subgraphSchema = ` +directive @extends on OBJECT + +directive @external on FIELD_DEFINITION + +directive @key(fields: _FieldSet!) on OBJECT | INTERFACE + +directive @provides(fields: _FieldSet!) on FIELD_DEFINITION + +directive @requires(fields: _FieldSet!) on FIELD_DEFINITION + +type Entity { + findProductByUpc(upc: String!): Product! + findUserByID(id: ID!): User! +} + +type Product { + upc: String! + reviews: [Review] +} + +type Query { + _entities(representations: [_Any!]!): [_Entity]! + _service: _Service! +} + +type Review { + body: String! + author: User! + product: Product! +} + +type User { + id: ID! + username: String! + reviews: [Review] +} + +scalar _Any + +union _Entity = Product | User + +scalar _FieldSet + +type _Service { + sdl: String +} +` + const sampleSchema = ` type Query { characters(filter: FilterCharacter, page: Int): Characters @@ -115,6 +164,24 @@ func TestAnalyticsRecord_ToGraphRecord(t *testing.T) { return g }, }, + { + title: "error field type", + request: `{"query":"query($representations: [_Any!]!){_entities(representations: $representations){... on User {reviews {body}}}}","variables":{"representations":[{"id":"1234","__typename":"User"}]}}`, + response: `{"data":{"_entities":[{"reviews":[{"body":"A highly effective form of birth control."},{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits."}]}]}}`, + expected: func(s, s2 string) GraphRecord { + variables := `{"representations":[{"id":"1234","__typename":"User"}]}` + g := graphRecordSample + g.OperationType = "Query" + g.Variables = base64.StdEncoding.EncodeToString([]byte(variables)) + g.Types = nil + return g + }, + expectedErr: "invalid selection set field type", + modifyRecord: func(a AnalyticsRecord) AnalyticsRecord { + a.ApiSchema = base64.StdEncoding.EncodeToString([]byte(subgraphSchema)) + return a + }, + }, { title: "no error mutation", request: `{"query":"mutation{\n changeCharacter()\n}"}`, diff --git a/pumps/graph_mongo.go b/pumps/graph_mongo.go index 13bf40103..11ecd282f 100644 --- a/pumps/graph_mongo.go +++ b/pumps/graph_mongo.go @@ -103,12 +103,23 @@ func (g *GraphMongoPump) WriteData(ctx context.Context, data []interface{}) erro if !ok { continue } - gr, err := r.ToGraphRecord() - if err != nil { - errCh <- err - g.log.WithError(err).Warn("error converting 1 record to graph record") - continue + + var ( + gr analytics.GraphRecord + err error + ) + if r.RawRequest == "" || r.RawResponse == "" || r.ApiSchema == "" { + g.log.Warn("skipping record parsing") + gr = analytics.GraphRecord{AnalyticsRecord: r} + } else { + gr, err = r.ToGraphRecord() + if err != nil { + errCh <- err + g.log.WithError(err).Warn("error converting 1 record to graph record") + continue + } } + finalSet = append(finalSet, gr) } diff --git a/pumps/graph_mongo_test.go b/pumps/graph_mongo_test.go index 3e3a93f8c..a86a7c157 100644 --- a/pumps/graph_mongo_test.go +++ b/pumps/graph_mongo_test.go @@ -255,7 +255,7 @@ func TestGraphMongoPump_WriteData(t *testing.T) { }, }, { - name: "should error on empty request response", + name: "should be empty on empty request response", records: []customRecord{ { rawRequest: "", @@ -275,7 +275,20 @@ func TestGraphMongoPump_WriteData(t *testing.T) { tags: []string{analytics.PredefinedTagGraphAnalytics}, }, }, - expectedError: "error generating documents", + expectedGraphRecords: []analytics.GraphRecord{ + { + Types: map[string][]string{}, + Errors: []analytics.GraphError{}, + }, + { + Types: map[string][]string{}, + Errors: []analytics.GraphError{}, + }, + { + Types: map[string][]string{}, + Errors: []analytics.GraphError{}, + }, + }, }, } From 1f0bc24934cd6d09829f5c0f6d6fe2e243f6b7ac Mon Sep 17 00:00:00 2001 From: Matias <83959431+mativm02@users.noreply.github.com> Date: Wed, 9 Nov 2022 14:20:26 -0300 Subject: [PATCH 015/102] [TT-5426] - Improve Tyk Pump demo mode (#513) * adding info about self-healing * adding more features to demo mode * improving variable names * unit tests + linting * fixing tests * adding docs * fixing tests * removing extra documentation * small change in readme * slight details in readme --- README.md | 9 +++ analytics/demo/demo.go | 27 +++++--- analytics/demo/demo_test.go | 121 ++++++++++++++++++++++++++++++++++ main.go | 6 +- pumps/mongo_aggregate_test.go | 2 +- serializer/serializer_test.go | 40 +++++------ 6 files changed, 172 insertions(+), 33 deletions(-) create mode 100644 analytics/demo/demo_test.go diff --git a/README.md b/README.md index 8695b4ba0..551a717c6 100644 --- a/README.md +++ b/README.md @@ -1223,3 +1223,12 @@ go build -v ./... ``` go test -v ./... ``` + +## Demo Mode +You can run Tyk Pump in demo mode, which will generate fake analytics data and send it to the configured pumps. This is useful for testing and development. To enable demo mode, use the following flags: + +- `--demo=` - Enables demo mode and sets the organization ID to use for the demo data. **This is required to enable Demo Mode**. +- `--demo-api=` - Configure the value to be recorded as the `API_ID` in the demo transactions. If this option is not set, the Pump Demo mode will use a random `API_ID`. Note that the same `API_ID` will be used for all transaction logs. +- `--demo-days=` - Sets the number of days of demo data to generate. Defaults to 30. +- `--demo-records-per-hour=` - Sets the number of records to generate per hour. The default value is a random number between 300 and 500. +- `--demo-track-path` - Enables tracking of the request path in the demo data. Defaults to false (disabled). Note that setting `track_all_paths` to `true` in your Pump configuration will override this option. diff --git a/analytics/demo/demo.go b/analytics/demo/demo.go index b0975e444..083b131b6 100644 --- a/analytics/demo/demo.go +++ b/analytics/demo/demo.go @@ -149,17 +149,24 @@ func country() string { return codes[rand.Intn(len(codes))] } -func GenerateDemoData(start time.Time, days int, orgId string, writer func([]interface{}, *health.Job, time.Time, int)) { +func GenerateDemoData(days, recordsPerHour int, orgID string, trackPath bool, writer func([]interface{}, *health.Job, time.Time, int)) { + t := time.Now() + start := time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, t.Location()) count := 0 - for d := 30; d >= 0; d-- { - for h := 0; h < 23; h++ { + for d := 0; d < days; d++ { + for h := 0; h < 24; h++ { set := []interface{}{} ts := start.AddDate(0, 0, d) ts = ts.Add(time.Duration(h) * time.Hour) // Generate daily entries - volume := randomInRange(300, 500) + var volume int + if recordsPerHour > 0 { + volume = recordsPerHour + } else { + volume = randomInRange(300, 500) + } for i := 0; i < volume; i++ { - r := GenerateRandomAnalyticRecord(orgId) + r := GenerateRandomAnalyticRecord(orgID, trackPath) r.Day = ts.Day() r.Month = ts.Month() r.Year = ts.Year() @@ -175,7 +182,7 @@ func GenerateDemoData(start time.Time, days int, orgId string, writer func([]int } } -func GenerateRandomAnalyticRecord(orgId string) analytics.AnalyticsRecord { +func GenerateRandomAnalyticRecord(orgID string, trackPath bool) analytics.AnalyticsRecord { p := randomPath() api, apiID := randomAPI() ts := time.Now() @@ -190,20 +197,20 @@ func GenerateRandomAnalyticRecord(orgId string) analytics.AnalyticsRecord { Year: ts.Year(), Hour: ts.Hour(), ResponseCode: responseCode(), - APIKey: getRandomKey(orgId), + APIKey: getRandomKey(orgID), TimeStamp: ts, APIVersion: apiVersion, APIName: api, APIID: apiID, - OrgID: orgId, + OrgID: orgID, OauthID: "", RequestTime: int64(randomInRange(0, 10)), RawRequest: "Qk9EWSBEQVRB", RawResponse: "UkVTUE9OU0UgREFUQQ==", IPAddress: "118.93.55.103", - Tags: []string{"orgid-" + orgId, "apiid-" + apiID}, + Tags: []string{"orgid-" + orgID, "apiid-" + apiID}, Alias: "", - TrackPath: true, + TrackPath: trackPath, ExpireAt: time.Now().Add(time.Hour * 8760), } diff --git a/analytics/demo/demo_test.go b/analytics/demo/demo_test.go new file mode 100644 index 000000000..1573ad9f1 --- /dev/null +++ b/analytics/demo/demo_test.go @@ -0,0 +1,121 @@ +package demo + +import ( + "testing" + "time" + + "github.com/TykTechnologies/tyk-pump/analytics" + "github.com/gocraft/health" + "github.com/stretchr/testify/assert" +) + +func TestGenerateDemoData(t *testing.T) { + type args struct { + writer func([]interface{}, *health.Job, time.Time, int) + orgID string + days int + recordsPerHour int + trackPath bool + } + + tests := []struct { + name string + args args + }{ + { + name: "generating demo data for 1 day, 1 record per hour -> 24 records", + args: args{ + days: 1, + recordsPerHour: 1, + orgID: "test", + trackPath: false, + writer: func(data []interface{}, job *health.Job, ts time.Time, n int) { + }, + }, + }, + { + name: "generating demo data for 2 days, 1 record per hour -> 48 records", + args: args{ + days: 2, + recordsPerHour: 1, + orgID: "test", + trackPath: true, + writer: func([]interface{}, *health.Job, time.Time, int) {}, + }, + }, + { + name: "generating demo data for 1 day, 2 records per hour -> 48 records", + args: args{ + days: 1, + recordsPerHour: 2, + orgID: "test", + trackPath: false, + writer: func([]interface{}, *health.Job, time.Time, int) {}, + }, + }, + { + name: "generating demo data for 2 days, 2 records per hour -> 96 records", + args: args{ + days: 2, + recordsPerHour: 2, + orgID: "test", + trackPath: true, + writer: func([]interface{}, *health.Job, time.Time, int) {}, + }, + }, + { + name: "generating demo data for 0 days, 100 records per hour -> 0 records", + args: args{ + days: 0, + recordsPerHour: 100, + orgID: "test", + trackPath: false, + writer: func([]interface{}, *health.Job, time.Time, int) {}, + }, + }, + { + name: "generating demo data for 1 day, 0 records per hour -> 0 records", + args: args{ + days: 1, + recordsPerHour: 0, + orgID: "test", + trackPath: true, + writer: func([]interface{}, *health.Job, time.Time, int) {}, + }, + }, + { + name: "generating demo data for 10 days, from 300 to 500 records per hour", + args: args{ + days: 10, + recordsPerHour: 0, + orgID: "test", + trackPath: false, + writer: func([]interface{}, *health.Job, time.Time, int) {}, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + counter := 0 + tt.args.writer = func(data []interface{}, job *health.Job, ts time.Time, n int) { + counter += len(data) + for _, d := range data { + analyticsRecord, ok := d.(analytics.AnalyticsRecord) + if !ok { + t.Errorf("unexpected type: %T", d) + } + assert.Equal(t, tt.args.trackPath, analyticsRecord.TrackPath) + } + } + + GenerateDemoData(tt.args.days, tt.args.recordsPerHour, tt.args.orgID, tt.args.trackPath, tt.args.writer) + if tt.args.recordsPerHour == 0 { + isValid := counter >= 300*tt.args.days || counter <= 500*tt.args.days + assert.True(t, isValid) + return + } + assert.Equal(t, tt.args.days*24*tt.args.recordsPerHour, counter) + }) + } +} diff --git a/main.go b/main.go index 0c52e3296..7c0ae6cb9 100644 --- a/main.go +++ b/main.go @@ -40,6 +40,9 @@ var ( demoMode = kingpin.Flag("demo", "pass orgID string to generate demo data").Default("").String() demoApiMode = kingpin.Flag("demo-api", "pass apiID string to generate demo data").Default("").String() demoApiVersionMode = kingpin.Flag("demo-api-version", "pass apiID string to generate demo data").Default("").String() + demoTrackPath = kingpin.Flag("demo-track-path", "enable track path in analytics records").Default("false").Bool() + demoDays = kingpin.Flag("demo-days", "flag that determines the number of days for the analytics records").Default("30").Int() + demoRecordsPerHour = kingpin.Flag("demo-records-per-hour", "flag that determines the number of records per hour for the analytics records").Default("0").Int() debugMode = kingpin.Flag("debug", "enable debug mode").Bool() version = kingpin.Version(pumps.VERSION) ) @@ -408,8 +411,7 @@ func main() { log.Info("BUILDING DEMO DATA AND EXITING...") log.Warning("Starting from date: ", time.Now().AddDate(0, 0, -30)) demo.DemoInit(*demoMode, *demoApiMode, *demoApiVersionMode) - demo.GenerateDemoData(time.Now().AddDate(0, 0, -30), 30, *demoMode, writeToPumps) - + demo.GenerateDemoData(*demoDays, *demoRecordsPerHour, *demoMode, *demoTrackPath, writeToPumps) return } diff --git a/pumps/mongo_aggregate_test.go b/pumps/mongo_aggregate_test.go index cb9a3a5ab..0da40c36b 100644 --- a/pumps/mongo_aggregate_test.go +++ b/pumps/mongo_aggregate_test.go @@ -314,7 +314,7 @@ func TestMongoAggregatePump_SelfHealing(t *testing.T) { var set []interface{} for { count++ - record := demo.GenerateRandomAnalyticRecord("org123") + record := demo.GenerateRandomAnalyticRecord("org123", true) set = append(set, record) if count == 1000 { err := pmp1.WriteData(context.TODO(), set) diff --git a/serializer/serializer_test.go b/serializer/serializer_test.go index 99bcd9c9d..7eb6a600c 100644 --- a/serializer/serializer_test.go +++ b/serializer/serializer_test.go @@ -108,16 +108,16 @@ func TestSerializer_GetSuffix(t *testing.T) { func BenchmarkProtobufEncoding(b *testing.B) { serializer := NewAnalyticsSerializer(PROTOBUF_SERIALIZER) records := []analytics.AnalyticsRecord{ - demo.GenerateRandomAnalyticRecord("org_1"), - demo.GenerateRandomAnalyticRecord("org_1"), - demo.GenerateRandomAnalyticRecord("org_1"), - demo.GenerateRandomAnalyticRecord("org_1"), - demo.GenerateRandomAnalyticRecord("org_1"), - demo.GenerateRandomAnalyticRecord("org_2"), - demo.GenerateRandomAnalyticRecord("org_2"), - demo.GenerateRandomAnalyticRecord("org_2"), - demo.GenerateRandomAnalyticRecord("org_2"), - demo.GenerateRandomAnalyticRecord("org_2"), + demo.GenerateRandomAnalyticRecord("org_1", true), + demo.GenerateRandomAnalyticRecord("org_1", true), + demo.GenerateRandomAnalyticRecord("org_1", true), + demo.GenerateRandomAnalyticRecord("org_1", true), + demo.GenerateRandomAnalyticRecord("org_1", true), + demo.GenerateRandomAnalyticRecord("org_2", true), + demo.GenerateRandomAnalyticRecord("org_2", true), + demo.GenerateRandomAnalyticRecord("org_2", true), + demo.GenerateRandomAnalyticRecord("org_2", true), + demo.GenerateRandomAnalyticRecord("org_2", true), } b.Helper() b.ReportAllocs() @@ -134,16 +134,16 @@ func BenchmarkProtobufEncoding(b *testing.B) { func BenchmarkMsgpEncoding(b *testing.B) { serializer := NewAnalyticsSerializer(MSGP_SERIALIZER) records := []analytics.AnalyticsRecord{ - demo.GenerateRandomAnalyticRecord("org_1"), - demo.GenerateRandomAnalyticRecord("org_1"), - demo.GenerateRandomAnalyticRecord("org_1"), - demo.GenerateRandomAnalyticRecord("org_1"), - demo.GenerateRandomAnalyticRecord("org_1"), - demo.GenerateRandomAnalyticRecord("org_2"), - demo.GenerateRandomAnalyticRecord("org_2"), - demo.GenerateRandomAnalyticRecord("org_2"), - demo.GenerateRandomAnalyticRecord("org_2"), - demo.GenerateRandomAnalyticRecord("org_2"), + demo.GenerateRandomAnalyticRecord("org_1", true), + demo.GenerateRandomAnalyticRecord("org_1", true), + demo.GenerateRandomAnalyticRecord("org_1", true), + demo.GenerateRandomAnalyticRecord("org_1", true), + demo.GenerateRandomAnalyticRecord("org_1", true), + demo.GenerateRandomAnalyticRecord("org_2", true), + demo.GenerateRandomAnalyticRecord("org_2", true), + demo.GenerateRandomAnalyticRecord("org_2", true), + demo.GenerateRandomAnalyticRecord("org_2", true), + demo.GenerateRandomAnalyticRecord("org_2", true), } b.Helper() b.ReportAllocs() From de9ced0f42acc499ab5890e5dc4e3bb33a4efe40 Mon Sep 17 00:00:00 2001 From: Matias <83959431+mativm02@users.noreply.github.com> Date: Mon, 14 Nov 2022 12:28:44 -0300 Subject: [PATCH 016/102] [TT-2485] Support MongoDB Connection argument "readPreference" (#517) * adding mongo_session_consistency to pump * avoiding repeated lines * reverting changes out of ticket's scope * comment small change --- pumps/mongo.go | 15 +++++++++++ pumps/mongo_aggregate.go | 2 ++ pumps/mongo_aggregate_test.go | 51 +++++++++++++++++++++++++++++++++++ pumps/mongo_selective.go | 2 ++ pumps/mongo_selective_test.go | 42 +++++++++++++++++++++++++++++ pumps/mongo_test.go | 45 +++++++++++++++++++++++++++++++ 6 files changed, 157 insertions(+) diff --git a/pumps/mongo.go b/pumps/mongo.go index 4560bfc47..6b080ef6e 100644 --- a/pumps/mongo.go +++ b/pumps/mongo.go @@ -79,6 +79,8 @@ type BaseMongoConf struct { MongoDBType MongoType `json:"mongo_db_type" mapstructure:"mongo_db_type"` // Set to true to disable the default tyk index creation. OmitIndexCreation bool `json:"omit_index_creation" mapstructure:"omit_index_creation"` + // Set the consistency mode for the session, it defaults to `Strong`. The valid values are: strong, monotonic, eventual. + MongoSessionConsistency string `json:"mongo_session_consistency" mapstructure:"mongo_session_consistency"` } func (b *BaseMongoConf) GetBlurredURL() string { @@ -91,6 +93,17 @@ func (b *BaseMongoConf) GetBlurredURL() string { return blurredUrl } +func (b *BaseMongoConf) SetMongoConsistency(session *mgo.Session) { + switch b.MongoSessionConsistency { + case "eventual": + session.SetMode(mgo.Eventual, true) + case "monotonic": + session.SetMode(mgo.Monotonic, true) + default: + session.SetMode(mgo.Strong, true) + } +} + // @PumpConf Mongo type MongoConf struct { // TYKCONFIGEXPAND @@ -480,6 +493,8 @@ func (m *MongoPump) connect() { if err == nil && m.dbConf.MongoDBType == 0 { m.dbConf.MongoDBType = mongoType(m.dbSession) } + + m.dbConf.SetMongoConsistency(m.dbSession) } func (m *MongoPump) WriteData(ctx context.Context, data []interface{}) error { diff --git a/pumps/mongo_aggregate.go b/pumps/mongo_aggregate.go index 697fee734..1444b50c0 100644 --- a/pumps/mongo_aggregate.go +++ b/pumps/mongo_aggregate.go @@ -221,6 +221,8 @@ func (m *MongoAggregatePump) connect() { if err == nil && m.dbConf.MongoDBType == 0 { m.dbConf.MongoDBType = mongoType(m.dbSession) } + + m.dbConf.SetMongoConsistency(m.dbSession) } func (m *MongoAggregatePump) ensureIndexes(c *mgo.Collection) error { diff --git a/pumps/mongo_aggregate_test.go b/pumps/mongo_aggregate_test.go index 0da40c36b..57175be30 100644 --- a/pumps/mongo_aggregate_test.go +++ b/pumps/mongo_aggregate_test.go @@ -11,6 +11,7 @@ import ( "github.com/TykTechnologies/tyk-pump/analytics/demo" "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" + "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" ) @@ -527,3 +528,53 @@ func TestMongoAggregatePump_StoreAnalyticsPerMinute(t *testing.T) { // Checking if the aggregation time is set to 1. Doesn't matter if aggregation_time is equal to 45 or 1, the result should be always 1. assert.True(t, pmp1.dbConf.AggregationTime == 1) } + +func TestMongoAggregatePump_SessionConsistency(t *testing.T) { + cfgPump1 := make(map[string]interface{}) + cfgPump1["mongo_url"] = "mongodb://localhost:27017/tyk_analytics" + cfgPump1["ignore_aggregations"] = []string{"apikeys"} + cfgPump1["use_mixed_collection"] = true + cfgPump1["store_analytics_per_minute"] = false + + pmp1 := MongoAggregatePump{} + + tests := []struct { + testName string + sessionConsistency string + expectedSessionMode mgo.Mode + }{ + { + testName: "should set session mode to strong", + sessionConsistency: "strong", + expectedSessionMode: mgo.Strong, + }, + { + testName: "should set session mode to monotonic", + sessionConsistency: "monotonic", + expectedSessionMode: mgo.Monotonic, + }, + { + testName: "should set session mode to eventual", + sessionConsistency: "eventual", + expectedSessionMode: mgo.Eventual, + }, + { + testName: "should set session mode to strong by default", + sessionConsistency: "", + expectedSessionMode: mgo.Strong, + }, + } + + for _, test := range tests { + t.Run(test.testName, func(t *testing.T) { + cfgPump1["mongo_session_consistency"] = test.sessionConsistency + errInit1 := pmp1.Init(cfgPump1) + if errInit1 != nil { + t.Error(errInit1) + return + } + + assert.Equal(t, test.expectedSessionMode, pmp1.dbSession.Mode()) + }) + } +} diff --git a/pumps/mongo_selective.go b/pumps/mongo_selective.go index d55d6a29b..1d2f3a5df 100644 --- a/pumps/mongo_selective.go +++ b/pumps/mongo_selective.go @@ -120,6 +120,8 @@ func (m *MongoSelectivePump) connect() { if err == nil && m.dbConf.MongoDBType == 0 { m.dbConf.MongoDBType = mongoType(m.dbSession) } + + m.dbConf.SetMongoConsistency(m.dbSession) } func (m *MongoSelectivePump) ensureIndexes(c *mgo.Collection) error { diff --git a/pumps/mongo_selective_test.go b/pumps/mongo_selective_test.go index bad1d8952..a86bc2d18 100644 --- a/pumps/mongo_selective_test.go +++ b/pumps/mongo_selective_test.go @@ -5,6 +5,7 @@ import ( "github.com/TykTechnologies/tyk-pump/analytics" "github.com/stretchr/testify/assert" + "gopkg.in/mgo.v2" ) func TestMongoSelectivePump_AccumulateSet(t *testing.T) { @@ -96,3 +97,44 @@ func TestMongoSelectivePump_AccumulateSet(t *testing.T) { 1024, )) } + +func TestMongoSelectivePump_SessionConsistency(t *testing.T) { + mPump := MongoSelectivePump{} + conf := defaultSelectiveConf() + mPump.dbConf = &conf + + tests := []struct { + testName string + sessionConsistency string + expectedSessionMode mgo.Mode + }{ + { + testName: "should set session mode to strong", + sessionConsistency: "strong", + expectedSessionMode: mgo.Strong, + }, + { + testName: "should set session mode to monotonic", + sessionConsistency: "monotonic", + expectedSessionMode: mgo.Monotonic, + }, + { + testName: "should set session mode to eventual", + sessionConsistency: "eventual", + expectedSessionMode: mgo.Eventual, + }, + { + testName: "should set session mode to strong by default", + sessionConsistency: "", + expectedSessionMode: mgo.Strong, + }, + } + + for _, test := range tests { + t.Run(test.testName, func(t *testing.T) { + mPump.dbConf.MongoSessionConsistency = test.sessionConsistency + mPump.connect() + assert.Equal(t, test.expectedSessionMode, mPump.dbSession.Mode()) + }) + } +} diff --git a/pumps/mongo_test.go b/pumps/mongo_test.go index b3eea0a1c..e7eebd95e 100644 --- a/pumps/mongo_test.go +++ b/pumps/mongo_test.go @@ -7,6 +7,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "gopkg.in/mgo.v2" "github.com/TykTechnologies/tyk-pump/analytics" ) @@ -374,3 +375,47 @@ func TestMongoPump_AccumulateSetIgnoreDocSize(t *testing.T) { } } } + +func TestMongoPump_SessionConsistency(t *testing.T) { + pump := newPump() + conf := defaultConf() + + mPump, ok := pump.(*MongoPump) + assert.True(t, ok) + mPump.dbConf = &conf + + tests := []struct { + testName string + sessionConsistency string + expectedSessionMode mgo.Mode + }{ + { + testName: "should set session mode to strong", + sessionConsistency: "strong", + expectedSessionMode: mgo.Strong, + }, + { + testName: "should set session mode to monotonic", + sessionConsistency: "monotonic", + expectedSessionMode: mgo.Monotonic, + }, + { + testName: "should set session mode to eventual", + sessionConsistency: "eventual", + expectedSessionMode: mgo.Eventual, + }, + { + testName: "should set session mode to strong by default", + sessionConsistency: "", + expectedSessionMode: mgo.Strong, + }, + } + + for _, test := range tests { + t.Run(test.testName, func(t *testing.T) { + mPump.dbConf.MongoSessionConsistency = test.sessionConsistency + mPump.connect() + assert.Equal(t, test.expectedSessionMode, mPump.dbSession.Mode()) + }) + } +} From 776b9fa0cccfb477a926a6d265fb2ff131e4e99a Mon Sep 17 00:00:00 2001 From: Tomas Buchaillot Date: Tue, 15 Nov 2022 11:37:09 +0100 Subject: [PATCH 017/102] TT-6278 Mongo blur URL (#518) * changing mongo blurURL regex to a more flex one * Adding another tc --- pumps/mongo.go | 4 +-- pumps/mongo_test.go | 67 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 69 insertions(+), 2 deletions(-) diff --git a/pumps/mongo.go b/pumps/mongo.go index 6b080ef6e..58e5daa2b 100644 --- a/pumps/mongo.go +++ b/pumps/mongo.go @@ -85,8 +85,8 @@ type BaseMongoConf struct { func (b *BaseMongoConf) GetBlurredURL() string { // mongo uri match with regex ^(mongodb:(?:\/{2})?)((\w+?):(\w+?)@|:?@?)(\S+?):(\d+)(\/(\S+?))?(\?replicaSet=(\S+?))?$ - // but we need only a segment, so regex explanation: https://regex101.com/r/E34wQO/1 - regex := `^(mongodb:(?:\/{2})?)((\w+?):(\w+?)@|:?@?)` + // but we need only a segment, so regex explanation: https://regex101.com/r/8Uzwtw/1 + regex := `^(mongodb:(?:\/{2})?)((...+?):(...+?)@)` var re = regexp.MustCompile(regex) blurredUrl := re.ReplaceAllString(b.MongoURL, "***:***@") diff --git a/pumps/mongo_test.go b/pumps/mongo_test.go index e7eebd95e..be60ab197 100644 --- a/pumps/mongo_test.go +++ b/pumps/mongo_test.go @@ -376,6 +376,73 @@ func TestMongoPump_AccumulateSetIgnoreDocSize(t *testing.T) { } } +func TestGetBlurredURL(t *testing.T) { + tcs := []struct { + testName string + givenURL string + expectedBlurredURL string + }{ + { + testName: "mongodb:username:password@", + givenURL: "mongodb:username:password@localhost:27107/mydatabasename", + expectedBlurredURL: "***:***@localhost:27107/mydatabasename", + }, + { + testName: "no user or password", + givenURL: "mongodb://localhost:27017/test", + expectedBlurredURL: "mongodb://localhost:27017/test", + }, + { + testName: "no mongodb:// but user and password", + givenURL: "mongodb:username:password@localhost:27107/mydatabasename", + expectedBlurredURL: "***:***@localhost:27107/mydatabasename", + }, + + { + testName: "complex url", + givenURL: "mongodb://user:pass@mongo-HZNP-0.j.com,mongo-HZNP-1.j.com,mongo-HZNP-2.j.com/tyk?replicaSet=RS1", + expectedBlurredURL: "***:***@mongo-HZNP-0.j.com,mongo-HZNP-1.j.com,mongo-HZNP-2.j.com/tyk?replicaSet=RS1", + }, + { + testName: "complex password username", + givenURL: "mongodb://myDBReader:D1fficultP%40ssw0rd@mongodb0.example.com:27017/?authSource=admin", + expectedBlurredURL: "***:***@mongodb0.example.com:27017/?authSource=admin", + }, + + { + testName: "cluster", + givenURL: "mongodb://mongos0.example.com:27017,mongos1.example.com:27017,mongos2.example.com:27017", + expectedBlurredURL: "mongodb://mongos0.example.com:27017,mongos1.example.com:27017,mongos2.example.com:27017", + }, + + { + testName: "cluster+complex password username", + givenURL: "mongodb://us3r-n4m!:p4_ssw:0rd@mongo-HZNP-0.j.com,mongo-HZNP-1.j.com,mongo-HZNP-2.j.com/tyk?replicaSet=RS1", + expectedBlurredURL: "***:***@mongo-HZNP-0.j.com,mongo-HZNP-1.j.com,mongo-HZNP-2.j.com/tyk?replicaSet=RS1", + }, + { + testName: "CosmoDB", + givenURL: "mongodb://contoso123:0Fc3IolnL12312asdfawejunASDFasdfYXX2t8a97kghVcUzcDv98hawelufhawefafnoQRGwNj2nMPL1Y9qsIr9Srdw==@contoso123.documents.azure.com:10255/mydatabase?ssl=true", + expectedBlurredURL: "***:***@contoso123.documents.azure.com:10255/mydatabase?ssl=true", + }, + { + testName: "DocDB", + givenURL: "mongodb://UserName:Password@sample-cluster-instance.cluster-corlsfccjozr.us-east-1.docdb.amazonaws.com:27017?replicaSet=rs0&ssl_ca_certs=rds-combined-ca-bundle.pem", + expectedBlurredURL: "***:***@sample-cluster-instance.cluster-corlsfccjozr.us-east-1.docdb.amazonaws.com:27017?replicaSet=rs0&ssl_ca_certs=rds-combined-ca-bundle.pem", + }, + } + + for _, tc := range tcs { + t.Run(tc.testName, func(t *testing.T) { + conf := BaseMongoConf{ + MongoURL: tc.givenURL, + } + actualBlurredURL := conf.GetBlurredURL() + assert.Equal(t, tc.expectedBlurredURL, actualBlurredURL) + }) + } +} + func TestMongoPump_SessionConsistency(t *testing.T) { pump := newPump() conf := defaultConf() From 6cb1edcf596d43aca3950f09400492f51b2dfd2c Mon Sep 17 00:00:00 2001 From: Tomas Buchaillot Date: Tue, 15 Nov 2022 16:06:53 +0100 Subject: [PATCH 018/102] TT-6834 Timeout documentation (#520) * changing timeout documentation * removing pmp_type from example * changing timeout wording * changing wording * changing wording --- config.go | 30 ++++++++++++------------------ 1 file changed, 12 insertions(+), 18 deletions(-) diff --git a/config.go b/config.go index 31eff46b2..77f3ef4d2 100644 --- a/config.go +++ b/config.go @@ -54,31 +54,25 @@ type PumpConfig struct { // } // ``` Filters analytics.AnalyticsFilters `json:"filters"` - // You can configure a different timeout for each pump with the configuration option `timeout`. - // Its default value is `0` seconds, which means that the pump will wait for the writing - // operation forever. + // By default, a pump will wait forever for each write operation to complete; you can configure an optional timeout by setting the configuration option `timeout`. + // If you have deployed multiple pumps, then you can configure each timeout independently. The timeout is in seconds and defaults to 0. // - // An example of this configuration would be: + // The timeout is configured within the main pump config as shown here; note that this example would configure a 5 second timeout: // ```{.json} - // "mongo": { - // "type": "mongo", + // "pump_name": { + // ... // "timeout":5, - // "meta": { - // "collection_name": "tyk_analytics", - // "mongo_url": "mongodb://username:password@{hostname:port},{hostname:port}/{db_name}" - // } + // "meta": {...} // } // ``` // - // In case that any pump doesn't have a configured timeout, and it takes more seconds to write - // than the value configured for the purge loop in the `purge_delay` config option, you will - // see the following warning message: `Pump PMP_NAME is taking more time than the value - // configured of purge_delay. You should try to set a timeout for this pump.`. + // Tyk will inform you if the pump's write operation is taking longer than the purging loop (configured via `purge_delay`) as this will mean that data is purged before being written to the target data sink. + // + // If there is no timeout configured and pump's write operation is taking longer than the purging loop, the following warning log will be generated: + // `Pump {pump_name} is taking more time than the value configured of purge_delay. You should try to set a timeout for this pump.` // - // In case that you have a configured timeout, but it still takes more seconds to write than - // the value configured for the purge loop in the `purge_delay` config option, you will see the - // following warning message: `Pump PMP_NAME is taking more time than the value configured of - // purge_delay. You should try lowering the timeout configured for this pump.`. + // If there is a timeout configured, but pump's write operation is still taking longer than the purging loop, the following warning log will be generated: + // `Pump {pump_name} is taking more time than the value configured of purge_delay. You should try lowering the timeout configured for this pump.`. Timeout int `json:"timeout"` // Setting this to true will avoid writing raw_request and raw_response fields for each request // in pumps. Defaults to `false`. From 7b67cd378d36bcbf3a2d4b45e8c8fc1e66d52c98 Mon Sep 17 00:00:00 2001 From: Long Le Date: Thu, 17 Nov 2022 12:07:42 -0500 Subject: [PATCH 019/102] Added table for tyk analytics record schema (#527) * Added table for tyk analytics record schema Provide more context for each key-pair value fields in our tyk_analytics record * Cleaned up formatting and modified description Reordered the items to make the flow more fluid and modified apikey description and example --- README.md | 43 ++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 40 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 551a717c6..29a7abf15 100644 --- a/README.md +++ b/README.md @@ -6,10 +6,47 @@ Tyk Pump is a pluggable analytics purger to move Analytics generated by your Tyk # Table of Contents -- 1. [Base Configuration](#configuration) +- 1. [Tyk Analytics Schema](#tyk-analytics-schema) - 2. [Supported Pumps](#pumps--back-ends-supported) -- 3. [Base Pump Configurations](#base-pump-configurations) -- 4. [Developer & Contributer Guide](#compiling--testing) +- 3. [Base Configuration](#configuration) +- 4. [Base Pump Configurations](#base-pump-configurations) +- 5. [Developer & Contributer Guide](#compiling--testing) + +# Tyk Analytics Schema + +The table below provides details on the fields within each `tyk_analytics` record. + +| Analytics Data Field | Description | Remarks | Example | +| :--- | :--- | :--- | :--- | +| `Method` | Request method | | `GET`, `POST` | +| `Host` | Request `Host` header | Includes host and optional port number of the server to which the request was sent. | `tyk.io`, or `tyk.io:8080` if port is included | +| `Path` | Request path | Displayed in decoded form. | `/foo/bar` for `/foo%2Fbar` or `/foo/bar` | +| `RawPath` | Request path | Same value as `Path`. Does not provide the raw encoded path. | `/foo/bar` for `/foo%2Fbar` or `/foo/bar` | +| `ContentLength` | Request `Content-Length` header | The number of bytes in the request body. | `10` for request body `0123456789` | +| `UserAgent` | Request `User-Agent` header | | `curl/7.86.0` | +| `Day` | Request day | Based on `TimeStamp` field. | `16` for `2022-11-16T03:01:54Z` | +| `Month` | Request month | Based on `TimeStamp` field. | `11` for `2022-11-16T03:01:54Z` | +| `Year` | Request year | Based on `TimeStamp` field. | `2022` for `2022-11-16T03:01:54Z` | +| `Hour` | Request hour | Based on `TimeStamp` field. | `3` for `2022-11-16T03:01:54Z` | +| `ResponseCode` | Response code | Only contains the integer element of the response code. Can be generated by either the gateway or upstream server, depending on how the request is handled. | `200` for `200 OK` | +| `APIKey` | `Request authentication key` | Authentication key, as provided in request. If no API key is provided then gateway will substitute a default value. | Unhashed `auth_key`, hashed `6129dc1e8b64c6b4`, or `00000000` if no authentication provided. | +| `TimeStamp` | Request timestamp | Generated by the gateway, based on the time it receives the request from the client. | `2022-11-16T03:01:54.648+00:00` | +| `APIVersion` | Version of API Definition requested | Based on version configuration of context API definition. If API is unversioned then value is "Not Versioned". | Could be an alphanumeric value such as `1` or `b`. Is `Not Versioned` if not versioned. | +| `APIName` | Name of API Definition requested | | `Foo API` | +| `APIID` | Id of API Definition requested | | `727dad853a8a45f64ab981154d1ffdad` | +| `OrgID` | Organisation Id of API Definition requested | | `5e9d9544a1dcd60001d0ed20` | +| `OauthID` | Id of OAuth client | Value is empty string if not using OAuth, or OAuth client not present. | `my-oauth-client-id` | +| `RequestTime` | Duration of upstream roundtrip | Equal to value of `Latency.Total` field. | `3` for a 3ms roundtrip | +| `RawRequest` | Raw HTTP request | Base64 encoded copy of the request sent from the gateway to the upstream server. | `R0VUIC9nZXQgSFRUUC8xLjEKSG9zdDogdHlrLmlv` | +| `RawResponse` | Raw HTTP response | Base64 encoded copy of the response sent from the gateway to the client. | `SFRUUC8xLjEgMjAwIE9LCkNvbnRlbnQtTGVuZ3RoOiAxOQpEYXRlOiBXZWQsIDE2IE5vdiAyMDIyIDA2OjIxOjE2IEdNVApTZXJ2ZXI6IGd1bmljb3JuLzE5LjkuMAoKewogICJmb28iOiAiYmFyIgp9Cg==` | +| `IPAddress` | Client IP address | Taken from either `X-Real-IP` or `X-Forwarded-For` request headers, if set. Otherwise, determined by gateway based on request. | `172.18.0.1` | +| `Geo` | Client geolocation data | Calculated using MaxMind database, based on client IP address. | `{"country":{"isocode":"SG"},"city":{"geonameid":0,"names":{}},"location":{"latitude":0,"longitude":0,"timezone":""}}` | +| `Network` | Network statistics | Not currently used. | N/A | +| `Latency` | Latency statistics | Contains two fields; `upstream` is the roundtrip duration between the gateway sending the request to the upstream server and it receiving a response. `total` is the `upstream` value plus additional gateway-side functionality such as processing analytics data. | `{"total":3,"upstream":3}` | +| `Tags` | Session context tags | Can contain many tags which refer to many things, such as the gateway, API key, organisation, API definition etc. | `["key-00000000","org-5e9d9544a1dcd60001d0ed20","api-accbdd1b89e84ec97f4f16d4e3197d5c"]` | +| `Alias` | Session alias | Alias of the context authenticated identity. Blank if no alias set or request is unauthenticated. | `my-key-alias` | +| `TrackPath` | Tracked endpoint flag | Value is `true` if the requested endpoint is configured to be tracked, otherwise `false`. | `true` or `false` | +| `ExpireAt` | Future expiry date | Can be used to implement automated data expiry, if supported by storage. | `2022-11-23T07:26:25.762+00:00` | # Pumps / Back ends supported: From 5a25a2e6718a9499424a88df56baa8af9e2b8896 Mon Sep 17 00:00:00 2001 From: Matias <83959431+mativm02@users.noreply.github.com> Date: Fri, 18 Nov 2022 11:04:05 -0300 Subject: [PATCH 020/102] [TT-5429] Tyk Pump Ignore Fields (#519) * allowing users to ignore fields * docs + linting * refactoring: from reflect library to structs library * avoiding unncessary double error message --- README.md | 15 ++++++++++ analytics/analytics.go | 22 ++++++++++++++ analytics/analytics_test.go | 60 +++++++++++++++++++++++++++++++++++++ config.go | 5 ++++ main.go | 8 +++-- main_test.go | 49 ++++++++++++++++++++++++++++++ pumps/common.go | 9 ++++++ pumps/pump.go | 2 ++ 8 files changed, 168 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 29a7abf15..77ab9f592 100644 --- a/README.md +++ b/README.md @@ -1241,6 +1241,21 @@ This can also be set at a pump level. For example: } ``` +### Ignore Fields + +`ignore_fields` defines a list of analytics fields that will be ignored when writing to the pump. This can be used to avoid writing sensitive information to the Database, or data that you don't really need to have. +Fields must be written using JSON tags. For example: + +```{.json} +"csv": { + "type": "csv", + "ignore_fields":["api_id","api_version"], + "meta": { + "csv_dir": "./bar" + } +} +``` + ## Compiling & Testing 1. Download dependent packages: diff --git a/analytics/analytics.go b/analytics/analytics.go index d4e277cfb..60acc6b28 100644 --- a/analytics/analytics.go +++ b/analytics/analytics.go @@ -10,6 +10,7 @@ import ( "sync/atomic" "time" + "github.com/fatih/structs" "github.com/oschwald/maxminddb-golang" "google.golang.org/protobuf/types/known/timestamppb" @@ -348,3 +349,24 @@ func (a *AnalyticsRecord) IsGraphRecord() bool { return false } + +func (a *AnalyticsRecord) RemoveIgnoredFields(ignoreFields []string) { + for _, fieldToIgnore := range ignoreFields { + found := false + for _, field := range structs.Fields(a) { + fieldTag := field.Tag("json") + if fieldTag == fieldToIgnore { + // setting field to default value + err := field.Zero() + if err != nil { + log.Error("Unable to ignore "+field.Name()+" field: ", err) + } + found = true + continue + } + } + if !found { + log.Error("Error looking for field + ", fieldToIgnore+" in AnalyticsRecord struct: not found.") + } + } +} diff --git a/analytics/analytics_test.go b/analytics/analytics_test.go index 68941824e..86b49a79a 100644 --- a/analytics/analytics_test.go +++ b/analytics/analytics_test.go @@ -26,3 +26,63 @@ func TestAnalyticsRecord_IsGraphRecord(t *testing.T) { assert.True(t, record.IsGraphRecord()) }) } + +func TestAnalyticsRecord_RemoveIgnoredFields(t *testing.T) { + defaultRecord := AnalyticsRecord{ + APIID: "api123", + APIKey: "api_key_123", + OrgID: "org_123", + APIName: "api_name_123", + APIVersion: "v1", + } + + recordWithoutAPIID := defaultRecord + recordWithoutAPIID.APIID = "" + + recordWithoutAPIKeyAndAPIID := defaultRecord + recordWithoutAPIKeyAndAPIID.APIKey = "" + recordWithoutAPIKeyAndAPIID.APIID = "" + + type args struct { + ignoreFields []string + } + tests := []struct { + name string + record AnalyticsRecord + expectedRecord AnalyticsRecord + args args + }{ + { + name: "should remove ignored APIID field", + record: defaultRecord, + expectedRecord: recordWithoutAPIID, + args: args{ + ignoreFields: []string{"api_id"}, + }, + }, + { + name: "should remove ignored APIID and APIKey fields", + record: defaultRecord, + expectedRecord: recordWithoutAPIKeyAndAPIID, + args: args{ + ignoreFields: []string{"api_id", "api_key"}, + }, + }, + { + name: "should remove valid fields and ignore invalid fields", + record: defaultRecord, + expectedRecord: recordWithoutAPIKeyAndAPIID, + args: args{ + ignoreFields: []string{"api_id", "api_key", "invalid_field"}, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tt.record.RemoveIgnoredFields(tt.args.ignoreFields) + + assert.Equal(t, tt.expectedRecord, tt.record) + }) + } +} diff --git a/config.go b/config.go index 77f3ef4d2..c1b7da78f 100644 --- a/config.go +++ b/config.go @@ -91,6 +91,11 @@ type PumpConfig struct { // ``` MaxRecordSize int `json:"max_record_size"` Meta map[string]interface{} `json:"meta"` // TODO: convert this to json.RawMessage and use regular json.Unmarshal + // IgnoreFields defines a list of analytics fields that will be ignored when writing to the pump. + // This can be used to avoid writing sensitive information to the Database, or data that you don't really need to have. + // The field names must be the same as the JSON tags of the analytics record fields. + // For example: `["api_key", "api_version"]`. + IgnoreFields []string `json:"ignore_fields"` } type UptimeConf struct { diff --git a/main.go b/main.go index 7c0ae6cb9..2e29f89bb 100644 --- a/main.go +++ b/main.go @@ -145,6 +145,7 @@ func initialisePumps() { thisPmp.SetTimeout(pmp.Timeout) thisPmp.SetOmitDetailedRecording(pmp.OmitDetailedRecording) thisPmp.SetMaxRecordSize(pmp.MaxRecordSize) + thisPmp.SetIgnoreFields(pmp.IgnoreFields) initErr := thisPmp.Init(pmp.Meta) if initErr != nil { log.WithField("pump", thisPmp.GetName()).Error("Pump init error (skipping): ", initErr) @@ -297,7 +298,8 @@ func filterData(pump pumps.Pump, keys []interface{}) []interface{} { shouldTrim := SystemConfig.MaxRecordSize != 0 || pump.GetMaxRecordSize() != 0 filters := pump.GetFilters() - if !filters.HasFilter() && !pump.GetOmitDetailedRecording() && !shouldTrim { + ignoreFields := pump.GetIgnoreFields() + if !filters.HasFilter() && !pump.GetOmitDetailedRecording() && !shouldTrim && len(ignoreFields) == 0 { return keys } @@ -323,6 +325,9 @@ func filterData(pump pumps.Pump, keys []interface{}) []interface{} { if filters.ShouldFilter(decoded) { continue } + if len(ignoreFields) > 0 { + decoded.RemoveIgnoredFields(ignoreFields) + } filteredKeys[newLenght] = decoded newLenght++ } @@ -365,7 +370,6 @@ func execPumpWriting(wg *sync.WaitGroup, pmp pumps.Pump, keys *[]interface{}, pu go func(ch chan error, ctx context.Context, pmp pumps.Pump, keys *[]interface{}) { filteredKeys := filterData(pmp, *keys) - ch <- pmp.WriteData(ctx, filteredKeys) }(ch, ctx, pmp, keys) diff --git a/main_test.go b/main_test.go index 43c1c4fa6..da0f22699 100644 --- a/main_test.go +++ b/main_test.go @@ -272,3 +272,52 @@ func TestShutdown(t *testing.T) { t.Fatal("MockedPump should have turned off") } } + +func TestIgnoreFieldsFilterData(t *testing.T) { + keys := make([]interface{}, 1) + record := analytics.AnalyticsRecord{APIID: "api111", RawResponse: "test", RawRequest: "test", OrgID: "321", ResponseCode: 200, RequestTime: 123} + keys[0] = record + + recordWithoutAPIID := record + recordWithoutAPIID.APIID = "" + + recordWithoutAPIIDAndAPIName := record + recordWithoutAPIIDAndAPIName.APIID = "" + + tcs := []struct { + expectedRecord analytics.AnalyticsRecord + testName string + ignoreFields []string + }{ + { + testName: "ignore 1 field", + ignoreFields: []string{"api_id"}, + expectedRecord: recordWithoutAPIID, + }, + { + testName: "ignore 2 fields", + ignoreFields: []string{"api_id", "api_name"}, + expectedRecord: recordWithoutAPIIDAndAPIName, + }, + { + testName: "invalid field - log error must be shown", + ignoreFields: []string{"api_id", "api_name", "invalid_field"}, + expectedRecord: recordWithoutAPIIDAndAPIName, + }, + } + + for _, tc := range tcs { + t.Run(tc.testName, func(t *testing.T) { + mockedPump := &MockedPump{} + mockedPump.SetIgnoreFields(tc.ignoreFields) + + filteredKeys := filterData(mockedPump, keys) + + for _, key := range filteredKeys { + record, ok := key.(analytics.AnalyticsRecord) + assert.True(t, ok) + assert.Equal(t, tc.expectedRecord, record) + } + }) + } +} diff --git a/pumps/common.go b/pumps/common.go index fbc5c6f9f..e3cd96514 100644 --- a/pumps/common.go +++ b/pumps/common.go @@ -11,6 +11,7 @@ type CommonPumpConfig struct { maxRecordSize int OmitDetailedRecording bool log *logrus.Entry + ignoreFields []string } func (p *CommonPumpConfig) SetFilters(filters analytics.AnalyticsFilters) { @@ -53,3 +54,11 @@ func (p *CommonPumpConfig) GetMaxRecordSize() int { func (p *CommonPumpConfig) SetLogLevel(level logrus.Level) { p.log.Level = level } + +func (p *CommonPumpConfig) SetIgnoreFields(fields []string) { + p.ignoreFields = fields +} + +func (p *CommonPumpConfig) GetIgnoreFields() []string { + return p.ignoreFields +} diff --git a/pumps/pump.go b/pumps/pump.go index 31cf38d51..d18e8d624 100644 --- a/pumps/pump.go +++ b/pumps/pump.go @@ -29,6 +29,8 @@ type Pump interface { SetMaxRecordSize(size int) GetMaxRecordSize() int SetLogLevel(logrus.Level) + SetIgnoreFields([]string) + GetIgnoreFields() []string } type UptimePump interface { From d5e9afb5e39bbe1793c5772e550cf7448528a536 Mon Sep 17 00:00:00 2001 From: Sredny M Date: Fri, 18 Nov 2022 10:21:19 -0500 Subject: [PATCH 021/102] updted version of TykTechnologies/gorm (#528) --- go.mod | 4 ++-- go.sum | 10 ++++++---- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 7991791da..430e6d277 100644 --- a/go.mod +++ b/go.mod @@ -59,8 +59,8 @@ require ( gorm.io/driver/mysql v1.0.3 gorm.io/driver/postgres v1.0.5 gorm.io/driver/sqlite v1.1.3 - gorm.io/gorm v1.20.12 + gorm.io/gorm v1.21.10 ) //replace gorm.io/gorm => ../gorm -replace gorm.io/gorm => github.com/TykTechnologies/gorm v1.20.7-0.20210409171139-b5c340f85ed0 +replace gorm.io/gorm => github.com/TykTechnologies/gorm v1.20.7-0.20210910090358-06148e82dc85 diff --git a/go.sum b/go.sum index 21b20d942..80e4e01f2 100644 --- a/go.sum +++ b/go.sum @@ -49,8 +49,8 @@ github.com/TykTechnologies/drl v0.0.0-20190905191955-cc541aa8e3e1/go.mod h1:dLW6 github.com/TykTechnologies/goautosocket v0.0.0-20190430121222-97bfa5e7e481/go.mod h1:CtF8OunV123VfKa8Z9kKcIPHgcd67hSAwFMLlS7FvS4= github.com/TykTechnologies/gojsonschema v0.0.0-20170222154038-dcb3e4bb7990 h1:CJRTgg13M3vJG9S7k7kpnvDRMGMywm5OsN6eUE8VwJE= github.com/TykTechnologies/gojsonschema v0.0.0-20170222154038-dcb3e4bb7990/go.mod h1:SQT0NBrY4/pMikBgwFIrWCjcHBxg015Y8is0kAnMtug= -github.com/TykTechnologies/gorm v1.20.7-0.20210409171139-b5c340f85ed0 h1:Q08OD/xvO1D3rzdtlwCMsPB4XOKeHMVw8FifVWEYHiM= -github.com/TykTechnologies/gorm v1.20.7-0.20210409171139-b5c340f85ed0/go.mod h1:l/HFwXrJOl2N+sth1mqa2cd0Gx1Cqb1FRYBLhY1TIJw= +github.com/TykTechnologies/gorm v1.20.7-0.20210910090358-06148e82dc85 h1:16hcEoY9Av84ykdGGAXdVZo7kY5r00247jHlxcnLP60= +github.com/TykTechnologies/gorm v1.20.7-0.20210910090358-06148e82dc85/go.mod h1:hz0d/E0QBTYarOnYtdcNnBWN/NYxVMP7nZNDT6E/fFM= github.com/TykTechnologies/gorpc v0.0.0-20190515174534-b9c10befc5f4 h1:hTjM5Uubg3w9VjNc8WjrDrLiGX14Ih8/ItyXEn2tNUs= github.com/TykTechnologies/gorpc v0.0.0-20190515174534-b9c10befc5f4/go.mod h1:vqhQRhIHefD4jdFo55j+m0vD5NMjx2liq/ubnshQpaY= github.com/TykTechnologies/goverify v0.0.0-20160822133757-7ccc57452ade/go.mod h1:mkS8jKcz8otdfEXhJs1QQ/DKoIY1NFFsRPKS0RwQENI= @@ -388,6 +388,8 @@ github.com/hashicorp/vault/api v1.0.4/go.mod h1:gDcqh3WGcR1cpF5AJz/B1UFheUEneMoI github.com/hashicorp/vault/sdk v0.1.13/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/helloeave/json v1.15.3 h1:roUxUEGhsSvhuhi80c4qmLiW633d5uf0mkzUGzBMfX8= +github.com/helloeave/json v1.15.3/go.mod h1:uTHhuUsgnrpm9cc7Gi3tfIUwgf1dq/7+uLfpUFLBFEQ= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= github.com/huandu/xstrings v1.2.1 h1:v6IdmkCnDhJG/S0ivr58PeIfg+tyhqQYy4YsCsQ0Pdc= @@ -482,8 +484,8 @@ github.com/jensneuse/pipeline v0.0.0-20200117120358-9fb4de085cd6/go.mod h1:Usfza github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= -github.com/jinzhu/now v1.1.1 h1:g39TucaRWyV3dwDO++eEc6qf8TVIQ/Da48WmqjZ3i7E= -github.com/jinzhu/now v1.1.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= +github.com/jinzhu/now v1.1.2 h1:eVKgfIdy9b6zbWBMgFpfDPoAMifwSZagU9HmEU6zgiI= +github.com/jinzhu/now v1.1.2/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= From ca449213bb1e81246714614a415fefd0849aebc7 Mon Sep 17 00:00:00 2001 From: Matias <83959431+mativm02@users.noreply.github.com> Date: Wed, 23 Nov 2022 10:22:07 -0300 Subject: [PATCH 022/102] [TT-5426] Updating timestamp of every record in Demo Mode (#529) * updating timestamp of every record * fixing tests not working * modifying ts to nextTimestamp --- analytics/demo/demo.go | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/analytics/demo/demo.go b/analytics/demo/demo.go index 083b131b6..1be3b91e3 100644 --- a/analytics/demo/demo.go +++ b/analytics/demo/demo.go @@ -165,12 +165,16 @@ func GenerateDemoData(days, recordsPerHour int, orgID string, trackPath bool, wr } else { volume = randomInRange(300, 500) } + timeDifference := 3600 / volume // this is the difference in seconds between each record + nextTimestamp := ts // this is the timestamp of the next record for i := 0; i < volume; i++ { r := GenerateRandomAnalyticRecord(orgID, trackPath) - r.Day = ts.Day() - r.Month = ts.Month() - r.Year = ts.Year() - r.Hour = ts.Hour() + r.Day = nextTimestamp.Day() + r.Month = nextTimestamp.Month() + r.Year = nextTimestamp.Year() + r.Hour = nextTimestamp.Hour() + r.TimeStamp = nextTimestamp + nextTimestamp = nextTimestamp.Add(time.Second * time.Duration(timeDifference)) set = append(set, r) } From 665b72a014f09ccbe4693c17cc2cc6b17090ec59 Mon Sep 17 00:00:00 2001 From: Matias <83959431+mativm02@users.noreply.github.com> Date: Thu, 24 Nov 2022 13:40:05 -0300 Subject: [PATCH 023/102] changing from user's location to UTC by default (#534) --- analytics/demo/demo.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/analytics/demo/demo.go b/analytics/demo/demo.go index 1be3b91e3..e4bd08721 100644 --- a/analytics/demo/demo.go +++ b/analytics/demo/demo.go @@ -151,7 +151,7 @@ func country() string { func GenerateDemoData(days, recordsPerHour int, orgID string, trackPath bool, writer func([]interface{}, *health.Job, time.Time, int)) { t := time.Now() - start := time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, t.Location()) + start := time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, time.UTC) count := 0 for d := 0; d < days; d++ { for h := 0; h < 24; h++ { From 5fd437e3c71937421cd81ce82687d30a4ebb9720 Mon Sep 17 00:00:00 2001 From: Tomas Buchaillot Date: Thu, 24 Nov 2022 18:25:14 +0100 Subject: [PATCH 024/102] Fix typo in timestream pump docs (#524) * typo in timestream pump * fixing NewTimestreamWriter func name --- pumps/timestream.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pumps/timestream.go b/pumps/timestream.go index 055015003..7e85bcdb0 100644 --- a/pumps/timestream.go +++ b/pumps/timestream.go @@ -42,7 +42,7 @@ const ( timestreamMaxRecordsCount = 100 //https://docs.aws.amazon.com/timestream/latest/developerguide/API_WriteRecords.html ) -// @PumpConf Timesteram +// @PumpConf Timestream type TimestreamPumpConf struct { EnvPrefix string `mapstructure:"meta_env_prefix"` //The aws region that contains the timestream database @@ -102,7 +102,7 @@ func (t *TimestreamPump) Init(config interface{}) error { return errors.New("missing \"measures\" or \"dimensions\" in pump configuration") } - t.client, err = t.NewTimesteramWriter() + t.client, err = t.NewTimestreamWriter() if err != nil { t.log.Fatal("Failed to create timestream client: ", err) return err @@ -409,7 +409,7 @@ func (t *TimestreamPump) GetAnalyticsRecordDimensions(decoded *analytics.Analyti return dimensions } -func (t *TimestreamPump) NewTimesteramWriter() (c *timestreamwrite.Client, err error) { +func (t *TimestreamPump) NewTimestreamWriter() (c *timestreamwrite.Client, err error) { timeout := t.CommonPumpConfig.timeout * int(time.Second) if timeout <= 0 { timeout = 30 * int(time.Second) From 92be2bd9847abc95cbb71a1002ae1e79303a8fe5 Mon Sep 17 00:00:00 2001 From: Esteban Ricardo Mirizio Date: Thu, 24 Nov 2022 17:49:36 -0300 Subject: [PATCH 025/102] Update upgradefrom attribute (#532) * Update upgradefrom attribute * Update upgradefrom attribute * Update upgradefrom attribute Co-authored-by: Gromit --- .github/dependabot.yml | 2 +- .github/workflows/del-env.yml | 2 +- .github/workflows/release.yml | 6 +++--- ci/Dockerfile.std | 2 +- ci/aws/byol.pkr.hcl | 2 +- ci/goreleaser/goreleaser-el7.yml | 2 +- ci/goreleaser/goreleaser.yml | 2 +- ci/install/before_install.sh | 2 +- ci/install/post_install.sh | 2 +- ci/install/post_remove.sh | 2 +- ci/install/post_trans.sh | 2 +- ci/terraform/outputs.tf | 2 +- 12 files changed, 14 insertions(+), 14 deletions(-) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 04c636919..89f3bb512 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -1,6 +1,6 @@ # Generated by: gromit policy -# Generated on: Mon Oct 10 21:30:09 UTC 2022 +# Generated on: Wed Nov 23 11:38:15 UTC 2022 version: 2 updates: diff --git a/.github/workflows/del-env.yml b/.github/workflows/del-env.yml index b7f57244b..ea00f4d4d 100644 --- a/.github/workflows/del-env.yml +++ b/.github/workflows/del-env.yml @@ -1,6 +1,6 @@ # Generated by: gromit policy -# Generated on: Mon Oct 10 21:30:09 UTC 2022 +# Generated on: Wed Nov 23 11:38:15 UTC 2022 name: Retiring dev env diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 9db6234d9..30d435418 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -1,6 +1,6 @@ # Generated by: gromit policy -# Generated on: Mon Oct 10 21:30:09 UTC 2022 +# Generated on: Wed Nov 23 11:38:15 UTC 2022 # Distribution channels covered by this workflow @@ -257,7 +257,7 @@ jobs: ARG TARGETARCH COPY tyk-pump*_${TARGETARCH}.deb /tyk-pump.deb RUN apt-get update && apt-get install -y curl - RUN curl -fsSL https://packagecloud.io/install/repositories/tyk/tyk-pump/script.deb.sh | bash && apt-get install -y tyk-pump=1.4.0 + RUN curl -fsSL https://packagecloud.io/install/repositories/tyk/tyk-pump/script.deb.sh | bash && apt-get install -y tyk-pump=1.6.0 RUN dpkg -i tyk-pump.deb ' > Dockerfile @@ -295,7 +295,7 @@ jobs: echo 'FROM registry.access.redhat.com/${{ matrix.distro }} COPY tyk-pump*.x86_64.rpm /tyk-pump.rpm RUN yum install -y curl - RUN curl -fsSL https://packagecloud.io/install/repositories/tyk/tyk-pump/script.rpm.sh | bash && yum install -y tyk-pump-1.4.0-1 + RUN curl -fsSL https://packagecloud.io/install/repositories/tyk/tyk-pump/script.rpm.sh | bash && yum install -y tyk-pump-1.6.0-1 RUN curl https://keyserver.tyk.io/tyk.io.rpm.signing.key.2020 -o tyk-pump.key && rpm --import tyk-pump.key RUN rpm --checksig tyk-pump.rpm RUN rpm -Uvh --force tyk-pump.rpm diff --git a/ci/Dockerfile.std b/ci/Dockerfile.std index ca2e28612..3c4d73d1a 100644 --- a/ci/Dockerfile.std +++ b/ci/Dockerfile.std @@ -1,6 +1,6 @@ # Generated by: gromit policy -# Generated on: Mon Oct 10 21:30:09 UTC 2022 +# Generated on: Wed Nov 23 11:38:15 UTC 2022 FROM debian:bullseye-slim ARG TARGETARCH diff --git a/ci/aws/byol.pkr.hcl b/ci/aws/byol.pkr.hcl index e54a55435..7596e90c1 100644 --- a/ci/aws/byol.pkr.hcl +++ b/ci/aws/byol.pkr.hcl @@ -1,6 +1,6 @@ # Generated by: gromit policy -# Generated on: Mon Oct 10 21:30:09 UTC 2022 +# Generated on: Wed Nov 23 11:38:15 UTC 2022 packer { required_plugins { diff --git a/ci/goreleaser/goreleaser-el7.yml b/ci/goreleaser/goreleaser-el7.yml index edae1c741..4be51887c 100644 --- a/ci/goreleaser/goreleaser-el7.yml +++ b/ci/goreleaser/goreleaser-el7.yml @@ -1,5 +1,5 @@ # Generated by: gromit policy -# Generated on: Mon Oct 10 21:30:09 UTC 2022 +# Generated on: Wed Nov 23 11:38:15 UTC 2022 # Check the documentation at http://goreleaser.com # This project needs CGO_ENABLED=1 and the cross-compiler toolchains for diff --git a/ci/goreleaser/goreleaser.yml b/ci/goreleaser/goreleaser.yml index 0927e9ccc..25835d254 100644 --- a/ci/goreleaser/goreleaser.yml +++ b/ci/goreleaser/goreleaser.yml @@ -1,5 +1,5 @@ # Generated by: gromit policy -# Generated on: Mon Oct 10 21:30:09 UTC 2022 +# Generated on: Wed Nov 23 11:38:15 UTC 2022 # Check the documentation at http://goreleaser.com # This project needs CGO_ENABLED=1 and the cross-compiler toolchains for diff --git a/ci/install/before_install.sh b/ci/install/before_install.sh index c48789750..865d62272 100755 --- a/ci/install/before_install.sh +++ b/ci/install/before_install.sh @@ -1,7 +1,7 @@ #!/bin/bash # Generated by: gromit policy -# Generated on: Mon Oct 10 21:30:09 UTC 2022 +# Generated on: Wed Nov 23 11:38:15 UTC 2022 echo "Creating user and group..." GROUPNAME="tyk" diff --git a/ci/install/post_install.sh b/ci/install/post_install.sh index 5f1a0cb8f..9b7388010 100755 --- a/ci/install/post_install.sh +++ b/ci/install/post_install.sh @@ -2,7 +2,7 @@ # Generated by: gromit policy -# Generated on: Mon Oct 10 21:30:09 UTC 2022 +# Generated on: Wed Nov 23 11:38:15 UTC 2022 # If "True" the install directory ownership will be changed to "tyk:tyk" change_ownership="True" diff --git a/ci/install/post_remove.sh b/ci/install/post_remove.sh index a57d5195d..7dcbb617a 100755 --- a/ci/install/post_remove.sh +++ b/ci/install/post_remove.sh @@ -1,7 +1,7 @@ #!/bin/sh # Generated by: gromit policy -# Generated on: Mon Oct 10 21:30:09 UTC 2022 +# Generated on: Wed Nov 23 11:38:15 UTC 2022 cleanRemove() { diff --git a/ci/install/post_trans.sh b/ci/install/post_trans.sh index b3adcd737..ae06c992c 100644 --- a/ci/install/post_trans.sh +++ b/ci/install/post_trans.sh @@ -1,7 +1,7 @@ #!/bin/sh # Generated by: gromit policy -# Generated on: Mon Oct 10 21:30:09 UTC 2022 +# Generated on: Wed Nov 23 11:38:15 UTC 2022 if command -V systemctl >/dev/null 2>&1; then if [ ! -f /lib/systemd/system/tyk-pump.service ]; then diff --git a/ci/terraform/outputs.tf b/ci/terraform/outputs.tf index 2197a6bb2..1fcf86ac6 100644 --- a/ci/terraform/outputs.tf +++ b/ci/terraform/outputs.tf @@ -1,6 +1,6 @@ # Generated by: gromit policy -# Generated on: Mon Oct 10 21:30:09 UTC 2022 +# Generated on: Wed Nov 23 11:38:15 UTC 2022 From 906d54fcd73e4e054d960f5994a273e170c1b804 Mon Sep 17 00:00:00 2001 From: Esteban Ricardo Mirizio Date: Tue, 29 Nov 2022 05:51:55 -0300 Subject: [PATCH 026/102] Fix Sync-Automation (#535) Co-authored-by: Gromit --- .github/workflows/sync-automation.yml | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/.github/workflows/sync-automation.yml b/.github/workflows/sync-automation.yml index 638d29457..9be9b16b1 100644 --- a/.github/workflows/sync-automation.yml +++ b/.github/workflows/sync-automation.yml @@ -1,5 +1,5 @@ # Generated by: gromit policy -# Generated on: Thu Sep 1 10:18:59 UTC 2022 +# Generated on: Mon Nov 28 22:55:51 UTC 2022 name: Sync automation @@ -9,7 +9,6 @@ on: - master paths: - ci/** - - .github/workflows/api-tests.yml - .github/workflows/release.yml - .github/dependabot.yml @@ -21,7 +20,7 @@ jobs: fail-fast: false matrix: branch: - - release-1.6 + - release-1.7 steps: - uses: actions/checkout@v2 @@ -40,8 +39,6 @@ jobs: git checkout -b $prbranch rm -rf ci git restore --source master -- ci - rm -f .github/workflows/api-tests.yml - git restore --source master -- .github/workflows/api-tests.yml rm -f .github/workflows/release.yml git restore --source master -- .github/workflows/release.yml rm -f .github/dependabot.yml From 13cd67f67b72a09ab7d97ee2dce1afad101d6dfa Mon Sep 17 00:00:00 2001 From: Tomas Buchaillot Date: Fri, 2 Dec 2022 12:05:10 +0100 Subject: [PATCH 027/102] using proper semantic for pump version (#537) --- pumps/version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pumps/version.go b/pumps/version.go index 26c4f5c1f..40601e0cd 100644 --- a/pumps/version.go +++ b/pumps/version.go @@ -1,6 +1,6 @@ package pumps var ( - VERSION = "v1.7" + VERSION = "v1.7.0" builtBy, Commit, buildDate string ) From 94bdb89f9ced5241c400d1bcce9d2c2f1fed18b2 Mon Sep 17 00:00:00 2001 From: Asutosh <1187055+asutosh@users.noreply.github.com> Date: Mon, 5 Dec 2022 17:58:59 +0530 Subject: [PATCH 028/102] Enable automerge for sync automation PRs. (#536) Add an additional step in the sync automation workflow to enable automerge for the created PR. --- .github/workflows/sync-automation.yml | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/.github/workflows/sync-automation.yml b/.github/workflows/sync-automation.yml index 9be9b16b1..3da8a4047 100644 --- a/.github/workflows/sync-automation.yml +++ b/.github/workflows/sync-automation.yml @@ -54,6 +54,7 @@ jobs: uses: actions/github-script@v6 with: github-token: ${{ secrets.ORG_GH_TOKEN }} + result-encoding: string script: | const response = await github.rest.pulls.create({ title: '[CI] Sync automation: Syncing commits from master', @@ -76,3 +77,14 @@ jobs: repo: context.repo.repo, issue_number: response.data.number, labels: ['sync-automation'] }); + return response.data.number + + + - name: Enable automerge for the created PR + id: enable-automerge + run: | + gh pr merge $PULL --auto --squash --subject "[CI] Sync automation: Syncing commits from master" --body "Picking CI changes from the commit $COMMIT" + env: + GITHUB_TOKEN: ${{ secrets.ORG_GH_TOKEN }} + PULL: ${{ steps.create-pr.outputs.result }} + COMMIT: ${{ github.sha }} From 5f1ef168a53b36bd2f07d6eb6f4cfdbec4506b29 Mon Sep 17 00:00:00 2001 From: Matias <83959431+mativm02@users.noreply.github.com> Date: Tue, 6 Dec 2022 09:00:59 -0300 Subject: [PATCH 029/102] TT-506/2 - Adding info about self-healing (#507) * adding info about self-healing * rewritting readme * changing variable name --- README.md | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/README.md b/README.md index 77ab9f592..6750cdf4b 100644 --- a/README.md +++ b/README.md @@ -387,6 +387,23 @@ TYK_PMP_PUMPS_MONGOAGG_META_AGGREGATIONTIME=50 TYK_PMP_PUMPS_MONGOAGG_META_ENABLESELFHEALING=true ``` +###### Self Healing +By default, the maximum size of a document in MongoDB is 16MB. If we try to update a document that has grown to this size, an error is received. + +The Mongo Aggregate pump creates a new document in the database for each "aggregation period"; the length of that period is defined by `aggregation_time`. If, during that period (in minutes) the document grows beyond 16MB, the error will be received and no more records will be recorded until the end of the aggregation period (when a new document will be created). + +The Self Healing option in the Mongo Aggregate Pump avoids this data loss by monitoring the size of the current document. When this hits the 16MB limit, Pump will automatically create a new document with the current timestamp and start writing to that instead. + +When it does this, the Pump will halve the `aggregation_time` so that it aggregates records for half the time period originally configured before creating a new document, reducing the risk of repeatedly hitting the maximum document size. + +This self healing is repeatable, however, such that if the document size does reach maximum (16MB) even with the new shorter aggregation period, a new document will be created and the `aggregation_time` will be halved again. + +The minimum value for `aggregation_time` is 1; Self Healing cannot reduce it beyond this value. + +For example, if the `aggregation_time` is configured as 50 (minutes) but the document hits the maximum size (16MB), a new document will be started and the `aggregation_time` will be set to 25 (minutes). + +Note that `store_analytics_per_minute` takes precedence over `aggregation_time` so if `store_analytics_per_minute` is equal to true, the value of `aggregation_time` will be equal to 1 and self healing will not operate. + ###### Mongo Graph Pump As of Pump 1.7+, a new mongo is available called the `mongo_graph` pump. This pump is specifically for parsing GraphQL and UDG requests, tracking information like types requested, fields requested, specific graphql body errors etc. From 6392fe3d7a476283285509f8694fe4089d73c942 Mon Sep 17 00:00:00 2001 From: Asutosh <1187055+asutosh@users.noreply.github.com> Date: Tue, 6 Dec 2022 17:50:40 +0530 Subject: [PATCH 030/102] Releng sync: test automerge (#542) Tests if sync automation can enable automerge for created PR. Co-authored-by: Gromit --- .github/dependabot.yml | 2 +- .github/workflows/del-env.yml | 2 +- .github/workflows/release.yml | 2 +- ci/Dockerfile.std | 2 +- ci/aws/byol.pkr.hcl | 2 +- ci/goreleaser/goreleaser-el7.yml | 2 +- ci/goreleaser/goreleaser.yml | 2 +- ci/install/before_install.sh | 2 +- ci/install/post_install.sh | 2 +- ci/install/post_remove.sh | 2 +- ci/install/post_trans.sh | 2 +- ci/terraform/outputs.tf | 2 +- 12 files changed, 12 insertions(+), 12 deletions(-) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 89f3bb512..b4eea1c57 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -1,6 +1,6 @@ # Generated by: gromit policy -# Generated on: Wed Nov 23 11:38:15 UTC 2022 +# Generated on: Tue Dec 6 05:57:19 UTC 2022 version: 2 updates: diff --git a/.github/workflows/del-env.yml b/.github/workflows/del-env.yml index ea00f4d4d..e4162008a 100644 --- a/.github/workflows/del-env.yml +++ b/.github/workflows/del-env.yml @@ -1,6 +1,6 @@ # Generated by: gromit policy -# Generated on: Wed Nov 23 11:38:15 UTC 2022 +# Generated on: Tue Dec 6 05:57:19 UTC 2022 name: Retiring dev env diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 30d435418..f1ee4a827 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -1,6 +1,6 @@ # Generated by: gromit policy -# Generated on: Wed Nov 23 11:38:15 UTC 2022 +# Generated on: Tue Dec 6 05:57:19 UTC 2022 # Distribution channels covered by this workflow diff --git a/ci/Dockerfile.std b/ci/Dockerfile.std index 3c4d73d1a..7968b35c0 100644 --- a/ci/Dockerfile.std +++ b/ci/Dockerfile.std @@ -1,6 +1,6 @@ # Generated by: gromit policy -# Generated on: Wed Nov 23 11:38:15 UTC 2022 +# Generated on: Tue Dec 6 05:57:19 UTC 2022 FROM debian:bullseye-slim ARG TARGETARCH diff --git a/ci/aws/byol.pkr.hcl b/ci/aws/byol.pkr.hcl index 7596e90c1..606bb6c4c 100644 --- a/ci/aws/byol.pkr.hcl +++ b/ci/aws/byol.pkr.hcl @@ -1,6 +1,6 @@ # Generated by: gromit policy -# Generated on: Wed Nov 23 11:38:15 UTC 2022 +# Generated on: Tue Dec 6 05:57:19 UTC 2022 packer { required_plugins { diff --git a/ci/goreleaser/goreleaser-el7.yml b/ci/goreleaser/goreleaser-el7.yml index 4be51887c..50d5afa59 100644 --- a/ci/goreleaser/goreleaser-el7.yml +++ b/ci/goreleaser/goreleaser-el7.yml @@ -1,5 +1,5 @@ # Generated by: gromit policy -# Generated on: Wed Nov 23 11:38:15 UTC 2022 +# Generated on: Tue Dec 6 05:57:19 UTC 2022 # Check the documentation at http://goreleaser.com # This project needs CGO_ENABLED=1 and the cross-compiler toolchains for diff --git a/ci/goreleaser/goreleaser.yml b/ci/goreleaser/goreleaser.yml index 25835d254..6ca909a78 100644 --- a/ci/goreleaser/goreleaser.yml +++ b/ci/goreleaser/goreleaser.yml @@ -1,5 +1,5 @@ # Generated by: gromit policy -# Generated on: Wed Nov 23 11:38:15 UTC 2022 +# Generated on: Tue Dec 6 05:57:19 UTC 2022 # Check the documentation at http://goreleaser.com # This project needs CGO_ENABLED=1 and the cross-compiler toolchains for diff --git a/ci/install/before_install.sh b/ci/install/before_install.sh index 865d62272..be153d1e8 100755 --- a/ci/install/before_install.sh +++ b/ci/install/before_install.sh @@ -1,7 +1,7 @@ #!/bin/bash # Generated by: gromit policy -# Generated on: Wed Nov 23 11:38:15 UTC 2022 +# Generated on: Tue Dec 6 05:57:19 UTC 2022 echo "Creating user and group..." GROUPNAME="tyk" diff --git a/ci/install/post_install.sh b/ci/install/post_install.sh index 9b7388010..3953dfe46 100755 --- a/ci/install/post_install.sh +++ b/ci/install/post_install.sh @@ -2,7 +2,7 @@ # Generated by: gromit policy -# Generated on: Wed Nov 23 11:38:15 UTC 2022 +# Generated on: Tue Dec 6 05:57:19 UTC 2022 # If "True" the install directory ownership will be changed to "tyk:tyk" change_ownership="True" diff --git a/ci/install/post_remove.sh b/ci/install/post_remove.sh index 7dcbb617a..81fc3c46e 100755 --- a/ci/install/post_remove.sh +++ b/ci/install/post_remove.sh @@ -1,7 +1,7 @@ #!/bin/sh # Generated by: gromit policy -# Generated on: Wed Nov 23 11:38:15 UTC 2022 +# Generated on: Tue Dec 6 05:57:19 UTC 2022 cleanRemove() { diff --git a/ci/install/post_trans.sh b/ci/install/post_trans.sh index ae06c992c..84a2c9b98 100644 --- a/ci/install/post_trans.sh +++ b/ci/install/post_trans.sh @@ -1,7 +1,7 @@ #!/bin/sh # Generated by: gromit policy -# Generated on: Wed Nov 23 11:38:15 UTC 2022 +# Generated on: Tue Dec 6 05:57:19 UTC 2022 if command -V systemctl >/dev/null 2>&1; then if [ ! -f /lib/systemd/system/tyk-pump.service ]; then diff --git a/ci/terraform/outputs.tf b/ci/terraform/outputs.tf index 1fcf86ac6..87dae6c10 100644 --- a/ci/terraform/outputs.tf +++ b/ci/terraform/outputs.tf @@ -1,6 +1,6 @@ # Generated by: gromit policy -# Generated on: Wed Nov 23 11:38:15 UTC 2022 +# Generated on: Tue Dec 6 05:57:19 UTC 2022 From c98c681d7de3be0cb16c17177e29f2bfb740f310 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 7 Dec 2022 10:51:37 +0100 Subject: [PATCH 031/102] Bump actions/checkout from 2 to 3.1.0 (#494) Bumps [actions/checkout](https://github.com/actions/checkout) from 2 to 3.1.0. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/v2...v3.1.0) --- updated-dependencies: - dependency-name: actions/checkout dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci-test.yml | 2 +- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/sync-automation.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci-test.yml b/.github/workflows/ci-test.yml index ee0caa890..f7ceb46aa 100644 --- a/.github/workflows/ci-test.yml +++ b/.github/workflows/ci-test.yml @@ -25,7 +25,7 @@ jobs: steps: - name: Checkout Tyk Pump - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: fetch-depth: 2 diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index f767bda4f..55c6ad15c 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -30,7 +30,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: # We must fetch at least the immediate parents so that if this is # a pull request then we can checkout the head. diff --git a/.github/workflows/sync-automation.yml b/.github/workflows/sync-automation.yml index 3da8a4047..41e0d82fe 100644 --- a/.github/workflows/sync-automation.yml +++ b/.github/workflows/sync-automation.yml @@ -23,7 +23,7 @@ jobs: - release-1.7 steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 with: ref: ${{matrix.branch}} token: ${{ secrets.ORG_GH_TOKEN }} From 03f498f2f28495cbd97a63d1badc747c3a9e65a8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 7 Dec 2022 10:52:13 +0100 Subject: [PATCH 032/102] Bump actions/cache from 2 to 3 (#521) Bumps [actions/cache](https://github.com/actions/cache) from 2 to 3. - [Release notes](https://github.com/actions/cache/releases) - [Changelog](https://github.com/actions/cache/blob/main/RELEASES.md) - [Commits](https://github.com/actions/cache/compare/v2...v3) --- updated-dependencies: - dependency-name: actions/cache dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci-test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci-test.yml b/.github/workflows/ci-test.yml index f7ceb46aa..664a1f831 100644 --- a/.github/workflows/ci-test.yml +++ b/.github/workflows/ci-test.yml @@ -54,7 +54,7 @@ jobs: mongodb-version: '${{ matrix.mongodb-version }}' - name: Cache - uses: actions/cache@v2 + uses: actions/cache@v3 with: path: ~/go/pkg/mod key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} From a00434bb663ceca6d7ca64118754a58428de7926 Mon Sep 17 00:00:00 2001 From: Asutosh <1187055+asutosh@users.noreply.github.com> Date: Thu, 15 Dec 2022 18:15:12 +0530 Subject: [PATCH 033/102] [TD-1348]: Enabling automerge for sync automation PRs (#552) Co-authored-by: Gromit --- .github/workflows/sync-automation.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/sync-automation.yml b/.github/workflows/sync-automation.yml index 41e0d82fe..76e57cf5c 100644 --- a/.github/workflows/sync-automation.yml +++ b/.github/workflows/sync-automation.yml @@ -1,5 +1,5 @@ # Generated by: gromit policy -# Generated on: Mon Nov 28 22:55:51 UTC 2022 +# Generated on: Thu Dec 15 07:12:58 UTC 2022 name: Sync automation From 06f324ab6358ef9c194383a3120a6937ec80e5c0 Mon Sep 17 00:00:00 2001 From: Matias <83959431+mativm02@users.noreply.github.com> Date: Mon, 19 Dec 2022 10:25:51 -0300 Subject: [PATCH 034/102] [TT-7345] Adding the possibility to create historical and future data in Pump's Demo Mode (#549) * adding demo-future-data flag * linting * using logger instead of fmt package --- README.md | 1 + analytics/demo/demo.go | 74 +++++++++++++++++++++++-------------- analytics/demo/demo_test.go | 14 ++++++- main.go | 3 +- 4 files changed, 62 insertions(+), 30 deletions(-) diff --git a/README.md b/README.md index 6750cdf4b..d82af2c0d 100644 --- a/README.md +++ b/README.md @@ -1301,3 +1301,4 @@ You can run Tyk Pump in demo mode, which will generate fake analytics data and s - `--demo-days=` - Sets the number of days of demo data to generate. Defaults to 30. - `--demo-records-per-hour=` - Sets the number of records to generate per hour. The default value is a random number between 300 and 500. - `--demo-track-path` - Enables tracking of the request path in the demo data. Defaults to false (disabled). Note that setting `track_all_paths` to `true` in your Pump configuration will override this option. +- `--demo-future-data` - By default, the demo data is generated for the past X days (configured in `demo-days` flag). This option will generate data for the next X days. Defaults to false (disabled). \ No newline at end of file diff --git a/analytics/demo/demo.go b/analytics/demo/demo.go index e4bd08721..0ac9423c0 100644 --- a/analytics/demo/demo.go +++ b/analytics/demo/demo.go @@ -1,12 +1,12 @@ package demo import ( - "fmt" "math/rand" "strings" "time" "github.com/TykTechnologies/tyk-pump/analytics" + "github.com/TykTechnologies/tyk-pump/logger" "github.com/gocraft/health" uuid "github.com/satori/go.uuid" @@ -16,6 +16,7 @@ var ( apiKeys []string apiID string apiVersion string + log = logger.GetLogger() ) func DemoInit(orgId, apiId, version string) { @@ -149,43 +150,60 @@ func country() string { return codes[rand.Intn(len(codes))] } -func GenerateDemoData(days, recordsPerHour int, orgID string, trackPath bool, writer func([]interface{}, *health.Job, time.Time, int)) { +func GenerateDemoData(days, recordsPerHour int, orgID string, demoFutureData, trackPath bool, writer func([]interface{}, *health.Job, time.Time, int)) { t := time.Now() start := time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, time.UTC) count := 0 - for d := 0; d < days; d++ { - for h := 0; h < 24; h++ { - set := []interface{}{} - ts := start.AddDate(0, 0, d) - ts = ts.Add(time.Duration(h) * time.Hour) - // Generate daily entries - var volume int - if recordsPerHour > 0 { - volume = recordsPerHour - } else { - volume = randomInRange(300, 500) - } - timeDifference := 3600 / volume // this is the difference in seconds between each record - nextTimestamp := ts // this is the timestamp of the next record - for i := 0; i < volume; i++ { - r := GenerateRandomAnalyticRecord(orgID, trackPath) - r.Day = nextTimestamp.Day() - r.Month = nextTimestamp.Month() - r.Year = nextTimestamp.Year() - r.Hour = nextTimestamp.Hour() - r.TimeStamp = nextTimestamp - nextTimestamp = nextTimestamp.Add(time.Second * time.Duration(timeDifference)) - - set = append(set, r) + // If we are generating future data, we want to start at the current date and create data for the next X days + if demoFutureData { + for d := 0; d < days; d++ { + for h := 0; h < 24; h++ { + WriteDemoData(start, d, h, recordsPerHour, orgID, trackPath, writer) } + count++ + log.Infof("Finished %d of %d\n", count, days) + } + return + } - writer(set, nil, time.Now(), 10) + // Otherwise, we want to start at the (current date - X days) and create data until yesterday's date + for d := days; d > 0; d-- { + for h := 0; h < 24; h++ { + WriteDemoData(start, -d, h, recordsPerHour, orgID, trackPath, writer) } count++ - fmt.Printf("Finished %d of %d\n", count, days) + log.Infof("Finished %d of %d\n", count, days) } } +func WriteDemoData(start time.Time, d, h, recordsPerHour int, orgID string, trackPath bool, writer func([]interface{}, *health.Job, time.Time, int)) { + set := []interface{}{} + ts := start.AddDate(0, 0, d) + ts = ts.Add(time.Duration(h) * time.Hour) + // Generate daily entries + var volume int + if recordsPerHour > 0 { + volume = recordsPerHour + } else { + volume = randomInRange(300, 500) + } + timeDifference := 3600 / volume // this is the difference in seconds between each record + nextTimestamp := ts // this is the timestamp of the next record + for i := 0; i < volume; i++ { + r := GenerateRandomAnalyticRecord(orgID, trackPath) + r.Day = nextTimestamp.Day() + r.Month = nextTimestamp.Month() + r.Year = nextTimestamp.Year() + r.Hour = nextTimestamp.Hour() + r.TimeStamp = nextTimestamp + nextTimestamp = nextTimestamp.Add(time.Second * time.Duration(timeDifference)) + + set = append(set, r) + } + + writer(set, nil, time.Now(), 10) +} + func GenerateRandomAnalyticRecord(orgID string, trackPath bool) analytics.AnalyticsRecord { p := randomPath() api, apiID := randomAPI() diff --git a/analytics/demo/demo_test.go b/analytics/demo/demo_test.go index 1573ad9f1..2abd835ce 100644 --- a/analytics/demo/demo_test.go +++ b/analytics/demo/demo_test.go @@ -16,6 +16,7 @@ func TestGenerateDemoData(t *testing.T) { days int recordsPerHour int trackPath bool + futureData bool } tests := []struct { @@ -29,6 +30,7 @@ func TestGenerateDemoData(t *testing.T) { recordsPerHour: 1, orgID: "test", trackPath: false, + futureData: true, writer: func(data []interface{}, job *health.Job, ts time.Time, n int) { }, }, @@ -105,11 +107,21 @@ func TestGenerateDemoData(t *testing.T) { if !ok { t.Errorf("unexpected type: %T", d) } + // checking timestamp: + // if futureData is true, then timestamp should be in the present and future + // if futureData is false, then timestamp should be in the past + ts := time.Now() + if tt.args.futureData { + val := analyticsRecord.TimeStamp.After(time.Date(ts.Year(), ts.Month(), ts.Day(), 0, 0, 0, 0, time.UTC)) || analyticsRecord.TimeStamp.Equal(time.Date(ts.Year(), ts.Month(), ts.Day(), 0, 0, 0, 0, time.UTC)) + assert.True(t, val) + } else { + assert.True(t, analyticsRecord.TimeStamp.Before(time.Date(ts.Year(), ts.Month(), ts.Day(), 0, 0, 0, 0, time.UTC))) + } assert.Equal(t, tt.args.trackPath, analyticsRecord.TrackPath) } } - GenerateDemoData(tt.args.days, tt.args.recordsPerHour, tt.args.orgID, tt.args.trackPath, tt.args.writer) + GenerateDemoData(tt.args.days, tt.args.recordsPerHour, tt.args.orgID, tt.args.futureData, tt.args.trackPath, tt.args.writer) if tt.args.recordsPerHour == 0 { isValid := counter >= 300*tt.args.days || counter <= 500*tt.args.days assert.True(t, isValid) diff --git a/main.go b/main.go index 2e29f89bb..eb93a340f 100644 --- a/main.go +++ b/main.go @@ -43,6 +43,7 @@ var ( demoTrackPath = kingpin.Flag("demo-track-path", "enable track path in analytics records").Default("false").Bool() demoDays = kingpin.Flag("demo-days", "flag that determines the number of days for the analytics records").Default("30").Int() demoRecordsPerHour = kingpin.Flag("demo-records-per-hour", "flag that determines the number of records per hour for the analytics records").Default("0").Int() + demoFutureData = kingpin.Flag("demo-future-data", "flag that determines if the demo data should be in the future").Default("false").Bool() debugMode = kingpin.Flag("debug", "enable debug mode").Bool() version = kingpin.Version(pumps.VERSION) ) @@ -415,7 +416,7 @@ func main() { log.Info("BUILDING DEMO DATA AND EXITING...") log.Warning("Starting from date: ", time.Now().AddDate(0, 0, -30)) demo.DemoInit(*demoMode, *demoApiMode, *demoApiVersionMode) - demo.GenerateDemoData(*demoDays, *demoRecordsPerHour, *demoMode, *demoTrackPath, writeToPumps) + demo.GenerateDemoData(*demoDays, *demoRecordsPerHour, *demoMode, *demoFutureData, *demoTrackPath, writeToPumps) return } From 911f6afa8101c02dfc78588daba6930bcb7b7f6f Mon Sep 17 00:00:00 2001 From: Esteban Ricardo Mirizio Date: Mon, 19 Dec 2022 13:09:53 -0300 Subject: [PATCH 035/102] Codeowners (#550) * Codeowners * Codeowners * Update CODEOWNERS Co-authored-by: Gromit --- .github/CODEOWNERS | 3 +++ .github/dependabot.yml | 2 +- .github/workflows/del-env.yml | 2 +- .github/workflows/release.yml | 2 +- ci/Dockerfile.std | 2 +- ci/aws/byol.pkr.hcl | 2 +- ci/goreleaser/goreleaser-el7.yml | 2 +- ci/goreleaser/goreleaser.yml | 2 +- ci/install/before_install.sh | 2 +- ci/install/post_install.sh | 2 +- ci/install/post_remove.sh | 2 +- ci/install/post_trans.sh | 2 +- ci/terraform/outputs.tf | 2 +- 13 files changed, 15 insertions(+), 12 deletions(-) create mode 100644 .github/CODEOWNERS diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 000000000..100465216 --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,3 @@ +/ci/ @TykTechnologies/devops +.github/workflows/release.yml @TykTechnologies/devops +.github/workflows/sync-automation.yml @TykTechnologies/devops diff --git a/.github/dependabot.yml b/.github/dependabot.yml index b4eea1c57..f820c41c3 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -1,6 +1,6 @@ # Generated by: gromit policy -# Generated on: Tue Dec 6 05:57:19 UTC 2022 +# Generated on: Wed Dec 14 23:43:05 UTC 2022 version: 2 updates: diff --git a/.github/workflows/del-env.yml b/.github/workflows/del-env.yml index e4162008a..e3f39f6f8 100644 --- a/.github/workflows/del-env.yml +++ b/.github/workflows/del-env.yml @@ -1,6 +1,6 @@ # Generated by: gromit policy -# Generated on: Tue Dec 6 05:57:19 UTC 2022 +# Generated on: Wed Dec 14 23:43:05 UTC 2022 name: Retiring dev env diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index f1ee4a827..fd6475a42 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -1,6 +1,6 @@ # Generated by: gromit policy -# Generated on: Tue Dec 6 05:57:19 UTC 2022 +# Generated on: Wed Dec 14 23:43:05 UTC 2022 # Distribution channels covered by this workflow diff --git a/ci/Dockerfile.std b/ci/Dockerfile.std index 7968b35c0..29246abc6 100644 --- a/ci/Dockerfile.std +++ b/ci/Dockerfile.std @@ -1,6 +1,6 @@ # Generated by: gromit policy -# Generated on: Tue Dec 6 05:57:19 UTC 2022 +# Generated on: Wed Dec 14 23:43:05 UTC 2022 FROM debian:bullseye-slim ARG TARGETARCH diff --git a/ci/aws/byol.pkr.hcl b/ci/aws/byol.pkr.hcl index 606bb6c4c..b08dd7972 100644 --- a/ci/aws/byol.pkr.hcl +++ b/ci/aws/byol.pkr.hcl @@ -1,6 +1,6 @@ # Generated by: gromit policy -# Generated on: Tue Dec 6 05:57:19 UTC 2022 +# Generated on: Wed Dec 14 23:43:05 UTC 2022 packer { required_plugins { diff --git a/ci/goreleaser/goreleaser-el7.yml b/ci/goreleaser/goreleaser-el7.yml index 50d5afa59..a1783a3e1 100644 --- a/ci/goreleaser/goreleaser-el7.yml +++ b/ci/goreleaser/goreleaser-el7.yml @@ -1,5 +1,5 @@ # Generated by: gromit policy -# Generated on: Tue Dec 6 05:57:19 UTC 2022 +# Generated on: Wed Dec 14 23:43:05 UTC 2022 # Check the documentation at http://goreleaser.com # This project needs CGO_ENABLED=1 and the cross-compiler toolchains for diff --git a/ci/goreleaser/goreleaser.yml b/ci/goreleaser/goreleaser.yml index 6ca909a78..007c251c2 100644 --- a/ci/goreleaser/goreleaser.yml +++ b/ci/goreleaser/goreleaser.yml @@ -1,5 +1,5 @@ # Generated by: gromit policy -# Generated on: Tue Dec 6 05:57:19 UTC 2022 +# Generated on: Wed Dec 14 23:43:05 UTC 2022 # Check the documentation at http://goreleaser.com # This project needs CGO_ENABLED=1 and the cross-compiler toolchains for diff --git a/ci/install/before_install.sh b/ci/install/before_install.sh index be153d1e8..8dc19105a 100755 --- a/ci/install/before_install.sh +++ b/ci/install/before_install.sh @@ -1,7 +1,7 @@ #!/bin/bash # Generated by: gromit policy -# Generated on: Tue Dec 6 05:57:19 UTC 2022 +# Generated on: Wed Dec 14 23:43:05 UTC 2022 echo "Creating user and group..." GROUPNAME="tyk" diff --git a/ci/install/post_install.sh b/ci/install/post_install.sh index 3953dfe46..da8368ee9 100755 --- a/ci/install/post_install.sh +++ b/ci/install/post_install.sh @@ -2,7 +2,7 @@ # Generated by: gromit policy -# Generated on: Tue Dec 6 05:57:19 UTC 2022 +# Generated on: Wed Dec 14 23:43:05 UTC 2022 # If "True" the install directory ownership will be changed to "tyk:tyk" change_ownership="True" diff --git a/ci/install/post_remove.sh b/ci/install/post_remove.sh index 81fc3c46e..e6d7ca3f4 100755 --- a/ci/install/post_remove.sh +++ b/ci/install/post_remove.sh @@ -1,7 +1,7 @@ #!/bin/sh # Generated by: gromit policy -# Generated on: Tue Dec 6 05:57:19 UTC 2022 +# Generated on: Wed Dec 14 23:43:05 UTC 2022 cleanRemove() { diff --git a/ci/install/post_trans.sh b/ci/install/post_trans.sh index 84a2c9b98..94235be3d 100644 --- a/ci/install/post_trans.sh +++ b/ci/install/post_trans.sh @@ -1,7 +1,7 @@ #!/bin/sh # Generated by: gromit policy -# Generated on: Tue Dec 6 05:57:19 UTC 2022 +# Generated on: Wed Dec 14 23:43:05 UTC 2022 if command -V systemctl >/dev/null 2>&1; then if [ ! -f /lib/systemd/system/tyk-pump.service ]; then diff --git a/ci/terraform/outputs.tf b/ci/terraform/outputs.tf index 87dae6c10..b4bfe7e6f 100644 --- a/ci/terraform/outputs.tf +++ b/ci/terraform/outputs.tf @@ -1,6 +1,6 @@ # Generated by: gromit policy -# Generated on: Tue Dec 6 05:57:19 UTC 2022 +# Generated on: Wed Dec 14 23:43:05 UTC 2022 From 81be91581d0458748c4b2c192057c0d319670b4c Mon Sep 17 00:00:00 2001 From: Esteban Ricardo Mirizio Date: Wed, 21 Dec 2022 12:06:11 -0300 Subject: [PATCH 036/102] Deprecate set-output directive (#556) --- .github/workflows/ci-test.yml | 2 +- .github/workflows/release.yml | 8 ++++---- .github/workflows/sync-automation.yml | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/ci-test.yml b/.github/workflows/ci-test.yml index 664a1f831..d9b6d96d6 100644 --- a/.github/workflows/ci-test.yml +++ b/.github/workflows/ci-test.yml @@ -66,7 +66,7 @@ jobs: run: | ./bin/ci-test.sh 2>&1 | tee test.log result_code=${PIPESTATUS[0]} - echo "::set-output name=log::$(sed -ze 's/%/%25/g;s/\n/%0A/g' test.log)" + echo "log=$(sed -ze 's/%/%25/g;s/\n/%0A/g' test.log)" >> $GITHUB_OUTPUT exit $result_code - name: Download golangci-lint diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index fd6475a42..9415a6713 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -87,7 +87,7 @@ jobs: run: | ci/bin/unlock-agent.sh current_tag=${GITHUB_REF##*/} - echo "::set-output name=tag::${current_tag}" + echo "tag=${current_tag}" >> $GITHUB_OUTPUT - name: Delete old release assets if: startsWith(github.ref, 'refs/tags') @@ -160,9 +160,9 @@ jobs: eval $(terraform output -json tyk-pump | jq -r 'to_entries[] | [.key,.value] | join("=")') region=$(terraform output region | xargs) [ -z "$key" -o -z "$secret" -o -z "$region" ] && exit 1 - echo "::set-output name=secret::$secret" - echo "::set-output name=key::$key" - echo "::set-output name=region::$region" + echo "secret=$secret" >> $GITHUB_OUTPUT + echo "key=$key" >> $GITHUB_OUTPUT + echo "region=$region" >> $GITHUB_OUTPUT - name: Configure AWS credentials for use uses: aws-actions/configure-aws-credentials@v1 diff --git a/.github/workflows/sync-automation.yml b/.github/workflows/sync-automation.yml index 76e57cf5c..8e355f72d 100644 --- a/.github/workflows/sync-automation.yml +++ b/.github/workflows/sync-automation.yml @@ -45,7 +45,7 @@ jobs: git restore --source master -- .github/dependabot.yml git add -A && git commit -m "[CI]: Syncing CI changes to ${{ matrix.branch }}" git push origin $prbranch - echo "::set-output name=prbranch::$prbranch" + echo "prbranch=$prbranch" >> $GITHUB_OUTPUT echo "::debug::Commit ${{ github.sha }} syncd for ${{matrix.branch}}" exit 0 From 9caf2d74e650ac040fdd652ba05de766d4f5b4d3 Mon Sep 17 00:00:00 2001 From: Matias <83959431+mativm02@users.noreply.github.com> Date: Thu, 22 Dec 2022 13:59:15 -0300 Subject: [PATCH 037/102] Adding unit tests to CSV pump (#539) * adding unit tests to csv file * handling invalid data received, avoiding panics * improving WriteData test * linting --- pumps/csv.go | 5 +- pumps/csv_test.go | 255 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 259 insertions(+), 1 deletion(-) create mode 100644 pumps/csv_test.go diff --git a/pumps/csv.go b/pumps/csv.go index 6d00e68d2..2ce8cf529 100644 --- a/pumps/csv.go +++ b/pumps/csv.go @@ -103,7 +103,10 @@ func (c *CSVPump) WriteData(ctx context.Context, data []interface{}) error { } for _, v := range data { - decoded := v.(analytics.AnalyticsRecord) + decoded, ok := v.(analytics.AnalyticsRecord) + if !ok { + return fmt.Errorf("couldn't convert %v to analytics.AnalyticsRecord", v) + } toWrite := decoded.GetLineValues() // toWrite := []string{ diff --git a/pumps/csv_test.go b/pumps/csv_test.go new file mode 100644 index 000000000..2fefa5462 --- /dev/null +++ b/pumps/csv_test.go @@ -0,0 +1,255 @@ +package pumps + +import ( + "context" + "encoding/csv" + "fmt" + "os" + "reflect" + "testing" + "time" + + "github.com/TykTechnologies/tyk-pump/analytics/demo" + "github.com/stretchr/testify/assert" +) + +func TestCSVPump_New(t *testing.T) { + type fields struct { + csvConf *CSVConf + CommonPumpConfig CommonPumpConfig + wroteHeaders bool + } + tests := []struct { + want Pump + name string + fields fields + }{ + { + name: "TestCSVPump_New", + fields: fields{ + csvConf: &CSVConf{}, + CommonPumpConfig: CommonPumpConfig{ + log: log.WithField("prefix", csvPrefix), + }, + }, + want: &CSVPump{}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &CSVPump{ + csvConf: tt.fields.csvConf, + wroteHeaders: tt.fields.wroteHeaders, + CommonPumpConfig: tt.fields.CommonPumpConfig, + } + if got := c.New(); !reflect.DeepEqual(got, tt.want) { + t.Errorf("CSVPump.New() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestCSVPump_GetName(t *testing.T) { + type fields struct { + csvConf *CSVConf + CommonPumpConfig CommonPumpConfig + wroteHeaders bool + } + tests := []struct { + name string + want string + fields fields + }{ + { + name: "TestCSVPump_GetName", + fields: fields{ + csvConf: &CSVConf{}, + CommonPumpConfig: CommonPumpConfig{ + log: log.WithField("prefix", csvPrefix), + }, + }, + want: "CSV Pump", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &CSVPump{ + csvConf: tt.fields.csvConf, + wroteHeaders: tt.fields.wroteHeaders, + CommonPumpConfig: tt.fields.CommonPumpConfig, + } + if got := c.GetName(); got != tt.want { + t.Errorf("CSVPump.GetName() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestCSVPump_Init(t *testing.T) { + type fields struct { + csvConf *CSVConf + CommonPumpConfig CommonPumpConfig + wroteHeaders bool + } + type args struct { + conf interface{} + } + tests := []struct { + args args + name string + fields fields + wantErr bool + }{ + { + name: "TestCSVPump_Init", + args: args{ + conf: &CSVConf{ + CSVDir: "testingDirectory", + }, + }, + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &CSVPump{ + csvConf: tt.fields.csvConf, + wroteHeaders: tt.fields.wroteHeaders, + CommonPumpConfig: tt.fields.CommonPumpConfig, + } + if err := c.Init(tt.args.conf); (err != nil) != tt.wantErr { + t.Errorf("CSVPump.Init() error = %v, wantErr %v", err, tt.wantErr) + } + + if tt.wantErr { + return + } + + defer os.Remove(c.csvConf.CSVDir) + + _, err := os.Stat(c.csvConf.CSVDir) + if err != nil { + t.Errorf("CSVPump.Init() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func TestCSVPump_WriteData(t *testing.T) { + type fields struct { + csvConf *CSVConf + wroteHeaders bool + } + type args struct { + numberOfRecords int + } + tests := []struct { + name string + fields fields + args args + wantErr bool + }{ + { + name: "writing 1 record", + fields: fields{ + csvConf: &CSVConf{ + CSVDir: "testingDirectory", + }, + }, + args: args{ + numberOfRecords: 1, + }, + }, + { + name: "writing 10 records", + fields: fields{ + csvConf: &CSVConf{ + CSVDir: "testingDirectory", + }, + }, + args: args{ + numberOfRecords: 10, + }, + }, + { + name: "trying to write invalid records", + fields: fields{ + csvConf: &CSVConf{ + CSVDir: "testingDirectory", + }, + }, + args: args{ + numberOfRecords: 0, + }, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // creating and initializing csv pump + c := &CSVPump{ + csvConf: tt.fields.csvConf, + wroteHeaders: tt.fields.wroteHeaders, + } + err := c.Init(tt.fields.csvConf) + assert.Nil(t, err) + // when init, a directory is created, so we need to remove it after the test + defer os.RemoveAll(c.csvConf.CSVDir) + // generating random data to write in the csv file + var records []interface{} + if !tt.wantErr { + for i := 0; i < tt.args.numberOfRecords; i++ { + records = append(records, demo.GenerateRandomAnalyticRecord("orgid", false)) + } + } else { + records = append(records, "invalid record") + } + // writing data + if err := c.WriteData(context.Background(), records); (err != nil) != tt.wantErr { + t.Errorf("CSVPump.WriteData() error = %v, wantErr %v", err, tt.wantErr) + } + + // getting the file name + curtime := time.Now() + fname := fmt.Sprintf("%d-%s-%d-%d.csv", curtime.Year(), curtime.Month().String(), curtime.Day(), curtime.Hour()) + file, totalRows, err := GetFileAndRows(fname) + assert.Nil(t, err) + defer file.Close() + + if tt.wantErr { + assert.Equal(t, tt.args.numberOfRecords, totalRows) + return + } + assert.Equal(t, tt.args.numberOfRecords+1, totalRows) + + // trying to append data to an existing file + if err := c.WriteData(context.Background(), records); (err != nil) != tt.wantErr { + t.Errorf("CSVPump.WriteData() error = %v, wantErr %v", err, tt.wantErr) + } + + file, totalRows, err = GetFileAndRows(fname) + assert.Nil(t, err) + defer file.Close() + assert.Equal(t, tt.args.numberOfRecords*2+1, totalRows) + }) + } +} + +func GetFileAndRows(fname string) (*os.File, int, error) { + // checking if the file exists + openfile, err := os.Open("./testingDirectory/" + fname) + if err != nil { + return nil, 0, err + } + defer openfile.Close() + filedata, err := csv.NewReader(openfile).ReadAll() + if err != nil { + return nil, 0, err + } + + // checking if the file contains the right number of records (number of records +1 because of the header) + totalRows := len(filedata) + return openfile, totalRows, nil +} From e2f277a6116a0d62ec564e05acc6bf126751e564 Mon Sep 17 00:00:00 2001 From: Firas Aboushamalah Date: Thu, 12 Jan 2023 11:27:52 -0500 Subject: [PATCH 038/102] TT-7216 Decode Option For Raw Request/Response (#558) * Created tests for decoding64, added decode method * Updated test file * Updated tests * Test passed, editing into prod * Created config for response and request + tests * Added config for req/response to PumpConf * added raw request/response option in pump.conf * Updated test to reflect both resp/req equal * Enhanced test cases * Added to pump.config * Added error catches in main.go * Fixed linting * Added common.go tests * Removed debug line, added comment to conditional Co-authored-by: tbuchaillot --- config.go | 14 +++++++++++ main.go | 24 ++++++++++++++++--- main_test.go | 57 ++++++++++++++++++++++++++++++++++++++++++++ pumps/common.go | 18 ++++++++++++++ pumps/common_test.go | 23 ++++++++++++++++++ pumps/pump.go | 4 ++++ 6 files changed, 137 insertions(+), 3 deletions(-) create mode 100644 pumps/common_test.go diff --git a/config.go b/config.go index c1b7da78f..ab347ffaf 100644 --- a/config.go +++ b/config.go @@ -96,6 +96,13 @@ type PumpConfig struct { // The field names must be the same as the JSON tags of the analytics record fields. // For example: `["api_key", "api_version"]`. IgnoreFields []string `json:"ignore_fields"` + + // Setting this to True allows the Raw Request to be decoded from base 64 + // for all pumps. This is set to false by default. + DecodeRawRequest bool `json:"raw_request_decoded"` + + // Setting this to True allows the Raw Response to be decoded from base 64 for all pumps. This is set to False by default. + DecodeRawResponse bool `json:"raw_response_decoded"` } type UptimeConf struct { @@ -223,6 +230,13 @@ type TykPumpConfiguration struct { OmitConfigFile bool `json:"omit_config_file"` // Enable debugging of Tyk Pump by exposing profiling information, the same as the gateway https://tyk.io/docs/troubleshooting/tyk-gateway/profiling/ HTTPProfile bool `json:"enable_http_profiler"` + + // Setting this to True allows the Raw Request to be decoded from base 64 + // for all pumps. This is set to false by default. + DecodeRawRequest bool `json:"raw_request_decoded"` + + // Setting this to True allows the Raw Response to be decoded from base 64 for all pumps. This is set to False by default. + DecodeRawResponse bool `json:"raw_response_decoded"` } func LoadConfig(filePath *string, configStruct *TykPumpConfiguration) { diff --git a/main.go b/main.go index eb93a340f..72f6b4643 100644 --- a/main.go +++ b/main.go @@ -2,15 +2,15 @@ package main import ( "context" + "encoding/base64" "fmt" + "os" "os/signal" "strings" "sync" "syscall" "time" - "os" - "github.com/TykTechnologies/tyk-pump/analytics" "github.com/TykTechnologies/tyk-pump/analytics/demo" logger "github.com/TykTechnologies/tyk-pump/logger" @@ -147,6 +147,8 @@ func initialisePumps() { thisPmp.SetOmitDetailedRecording(pmp.OmitDetailedRecording) thisPmp.SetMaxRecordSize(pmp.MaxRecordSize) thisPmp.SetIgnoreFields(pmp.IgnoreFields) + thisPmp.SetDecodingRequest(pmp.DecodeRawRequest) + thisPmp.SetDecodingResponse(pmp.DecodeRawResponse) initErr := thisPmp.Init(pmp.Meta) if initErr != nil { log.WithField("pump", thisPmp.GetName()).Error("Pump init error (skipping): ", initErr) @@ -300,7 +302,10 @@ func filterData(pump pumps.Pump, keys []interface{}) []interface{} { shouldTrim := SystemConfig.MaxRecordSize != 0 || pump.GetMaxRecordSize() != 0 filters := pump.GetFilters() ignoreFields := pump.GetIgnoreFields() - if !filters.HasFilter() && !pump.GetOmitDetailedRecording() && !shouldTrim && len(ignoreFields) == 0 { + getDecodingResponse := pump.GetDecodedResponse() + getDecodingRequest := pump.GetDecodedRequest() + // Checking to see if all the config options are empty/false + if !getDecodingRequest && !getDecodingResponse && !filters.HasFilter() && !pump.GetOmitDetailedRecording() && !shouldTrim && len(ignoreFields) == 0 { return keys } @@ -329,6 +334,19 @@ func filterData(pump pumps.Pump, keys []interface{}) []interface{} { if len(ignoreFields) > 0 { decoded.RemoveIgnoredFields(ignoreFields) } + // DECODING RAW REQUEST AND RESPONSE FROM BASE 64 + if getDecodingRequest { + rawRequest, err := base64.StdEncoding.DecodeString(decoded.RawRequest) + if err == nil { + decoded.RawRequest = string(rawRequest) + } + } + if getDecodingResponse { + rawResponse, err := base64.StdEncoding.DecodeString(decoded.RawResponse) + if err == nil { + decoded.RawResponse = string(rawResponse) + } + } filteredKeys[newLenght] = decoded newLenght++ } diff --git a/main_test.go b/main_test.go index da0f22699..4697c79d9 100644 --- a/main_test.go +++ b/main_test.go @@ -321,3 +321,60 @@ func TestIgnoreFieldsFilterData(t *testing.T) { }) } } + +func TestDecodedKey(t *testing.T) { + keys := make([]interface{}, 1) + record := analytics.AnalyticsRecord{APIID: "api111", RawResponse: "RGVjb2RlZFJlc3BvbnNl", RawRequest: "RGVjb2RlZFJlcXVlc3Q="} + keys[0] = record + + tcs := []struct { + expectedRawResponse string + expectedRawRequest string + testName string + decodeResponse bool + decodeRequest bool + }{ + { + testName: "Decode RESPONSE & REQUEST", + expectedRawResponse: "DecodedResponse", + expectedRawRequest: "DecodedRequest", + decodeResponse: true, + decodeRequest: true, + }, + { + testName: "Decode RESPONSE", + expectedRawResponse: "DecodedResponse", + expectedRawRequest: "RGVjb2RlZFJlcXVlc3Q=", + decodeResponse: true, + decodeRequest: false, + }, + { + testName: "Decode REQUEST", + expectedRawResponse: "RGVjb2RlZFJlc3BvbnNl", + expectedRawRequest: "DecodedRequest", + decodeResponse: false, + decodeRequest: true, + }, + { + testName: "Decode NONE", + expectedRawResponse: "RGVjb2RlZFJlc3BvbnNl", + expectedRawRequest: "RGVjb2RlZFJlcXVlc3Q=", + decodeResponse: false, + decodeRequest: false, + }, + } + + for _, tc := range tcs { + t.Run(tc.testName, func(t *testing.T) { + mockedPump := &MockedPump{} + mockedPump.SetDecodingRequest(tc.decodeRequest) + mockedPump.SetDecodingResponse(tc.decodeResponse) + filteredKeys := filterData(mockedPump, keys) + assert.Len(t, filteredKeys, 1) + record1, ok := filteredKeys[0].(analytics.AnalyticsRecord) + assert.True(t, ok) + assert.Equal(t, tc.expectedRawResponse, record1.RawResponse) + assert.Equal(t, tc.expectedRawRequest, record1.RawRequest) + }) + } +} diff --git a/pumps/common.go b/pumps/common.go index e3cd96514..75eae84bc 100644 --- a/pumps/common.go +++ b/pumps/common.go @@ -12,6 +12,8 @@ type CommonPumpConfig struct { OmitDetailedRecording bool log *logrus.Entry ignoreFields []string + decodeResponseBase64 bool + decodeRequestBase64 bool } func (p *CommonPumpConfig) SetFilters(filters analytics.AnalyticsFilters) { @@ -62,3 +64,19 @@ func (p *CommonPumpConfig) SetIgnoreFields(fields []string) { func (p *CommonPumpConfig) GetIgnoreFields() []string { return p.ignoreFields } + +func (p *CommonPumpConfig) SetDecodingResponse(decoding bool) { + p.decodeResponseBase64 = decoding +} + +func (p *CommonPumpConfig) SetDecodingRequest(decoding bool) { + p.decodeRequestBase64 = decoding +} + +func (p *CommonPumpConfig) GetDecodedRequest() bool { + return p.decodeRequestBase64 +} + +func (p *CommonPumpConfig) GetDecodedResponse() bool { + return p.decodeResponseBase64 +} diff --git a/pumps/common_test.go b/pumps/common_test.go new file mode 100644 index 000000000..bbdf72254 --- /dev/null +++ b/pumps/common_test.go @@ -0,0 +1,23 @@ +package pumps + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestDecodingRequest(t *testing.T) { + pump := &CommonPumpConfig{} + pump.SetDecodingRequest(true) + actualValue := pump.GetDecodedRequest() + assert.Equal(t, actualValue, pump.decodeRequestBase64) + assert.True(t, actualValue) +} + +func TestSetDecodingResponse(t *testing.T) { + pump := &CommonPumpConfig{} + pump.SetDecodingResponse(true) + actualValue := pump.GetDecodedResponse() + assert.Equal(t, actualValue, pump.decodeResponseBase64) + assert.True(t, actualValue) +} diff --git a/pumps/pump.go b/pumps/pump.go index d18e8d624..41f783fee 100644 --- a/pumps/pump.go +++ b/pumps/pump.go @@ -31,6 +31,10 @@ type Pump interface { SetLogLevel(logrus.Level) SetIgnoreFields([]string) GetIgnoreFields() []string + SetDecodingResponse(bool) + GetDecodedResponse() bool + SetDecodingRequest(bool) + GetDecodedRequest() bool } type UptimePump interface { From ee07435d1926ed6b2c1512a128317e0d3d827ebe Mon Sep 17 00:00:00 2001 From: Kofo Okesola Date: Mon, 23 Jan 2023 18:23:42 +0100 Subject: [PATCH 039/102] [TT-7482] feat: implement graph sql pump (#562) * added graph sql pump * added init tests * add write data tests * added tests for nonsharded * sharded db tests * linting fixes * fix broken tests * pr changes * add test for env configuration * moved conf * add gorm tags * fix tests * updated readme * small code documentation --- README.md | 29 ++- analytics/graph_record.go | 65 +++--- analytics/graph_record_test.go | 2 +- pumps/graph_sql.go | 201 +++++++++++++++++++ pumps/graph_sql_test.go | 354 +++++++++++++++++++++++++++++++++ pumps/init.go | 1 + 6 files changed, 617 insertions(+), 35 deletions(-) create mode 100644 pumps/graph_sql.go create mode 100644 pumps/graph_sql_test.go diff --git a/README.md b/README.md index d82af2c0d..8c2dfbae1 100644 --- a/README.md +++ b/README.md @@ -404,16 +404,14 @@ For example, if the `aggregation_time` is configured as 50 (minutes) but the doc Note that `store_analytics_per_minute` takes precedence over `aggregation_time` so if `store_analytics_per_minute` is equal to true, the value of `aggregation_time` will be equal to 1 and self healing will not operate. -###### Mongo Graph Pump +## Mongo Graph Pump As of Pump 1.7+, a new mongo is available called the `mongo_graph` pump. This pump is specifically for parsing GraphQL and UDG requests, tracking information like types requested, fields requested, specific graphql body errors etc. A sample config looks like this: -```.json +```json { - ... "pumps": { - ... "mongo-graph": { "type": "mongo-graph", "meta": { @@ -424,6 +422,29 @@ A sample config looks like this: } ``` +## SQL Graph Pump +Similar to the Mongo graph pump, the `sql-graph` pump is a specialized pump for parsing and recording granular analytics for GraphQL and UDG requests. +The difference, like the name says is this pump uses sql type databases as its storage db. Supported SQL databases are `sqlite`, `postgres`, `mysql`. + +A sample config looks like this: +```json +{ + "pumps": { + "sql-graph": { + "meta": { + "type": "postgres", + "table_name": "graph-records", + "connection_string": "host=localhost user=postgres password=password dbname=postgres", + "table_sharding": false + } + } + } +} +``` + +`table_sharding` - This determines how the sql tables are created, if this is set to true, a new table is created for each day of records for the graph data. +The name format for each table is _. Defaults to false. + ## Elasticsearch Config `"index_name"` - The name of the index that all the analytics data will be placed in. Defaults to "tyk_analytics" diff --git a/analytics/graph_record.go b/analytics/graph_record.go index 1dcc5de16..13b92fdd6 100644 --- a/analytics/graph_record.go +++ b/analytics/graph_record.go @@ -19,14 +19,14 @@ import ( ) type GraphRecord struct { - Types map[string][]string + Types map[string][]string `gorm:"types"` - AnalyticsRecord `bson:",inline"` + AnalyticsRecord AnalyticsRecord `bson:",inline" gorm:"embedded;embeddedPrefix:analytics_"` - OperationType string - Variables string - Errors []GraphError - HasErrors bool + OperationType string `gorm:"column:operation_type"` + Variables string `gorm:"variables"` + Errors []GraphError `gorm:"errors"` + HasErrors bool `gorm:"has_errors"` } func (a *AnalyticsRecord) ToGraphRecord() (GraphRecord, error) { @@ -94,33 +94,38 @@ func (a *AnalyticsRecord) ToGraphRecord() (GraphRecord, error) { record.Types = typesToFieldsMap // get response and check to see errors - responseDecoded, err := base64.StdEncoding.DecodeString(a.RawResponse) - if err != nil { - return record, nil - } - resp, err := http.ReadResponse(bufio.NewReader(bytes.NewReader(responseDecoded)), nil) - if err != nil { - log.WithError(err).Error("error reading raw response") - return record, err - } - defer resp.Body.Close() + if a.RawResponse != "" { + responseDecoded, err := base64.StdEncoding.DecodeString(a.RawResponse) + if err != nil { + return record, nil + } + resp, err := http.ReadResponse(bufio.NewReader(bytes.NewReader(responseDecoded)), nil) + if err != nil { + log.WithError(err).Error("error reading raw response") + return record, err + } + defer resp.Body.Close() - dat, err := ioutil.ReadAll(resp.Body) - if err != nil { - log.WithError(err).Error("error reading response body") - return record, err - } - errBytes, t, _, err := jsonparser.Get(dat, "errors") - if err != nil && err != jsonparser.KeyPathNotFoundError { - log.WithError(err).Error("error getting response errors") - return record, err - } - if t != jsonparser.NotExist { - if err := json.Unmarshal(errBytes, &record.Errors); err != nil { - log.WithError(err).Error("error parsing graph errors") + dat, err := ioutil.ReadAll(resp.Body) + if err != nil { + log.WithError(err).Error("error reading response body") return record, err } - record.HasErrors = true + errBytes, t, _, err := jsonparser.Get(dat, "errors") + // check if the errors key exists in the response + if err != nil && err != jsonparser.KeyPathNotFoundError { + // we got an unexpected error parsing te response + log.WithError(err).Error("error getting response errors") + return record, err + } + if t != jsonparser.NotExist { + // errors key exists so unmarshal it + if err := json.Unmarshal(errBytes, &record.Errors); err != nil { + log.WithError(err).Error("error parsing graph errors") + return record, err + } + record.HasErrors = true + } } return record, nil diff --git a/analytics/graph_record_test.go b/analytics/graph_record_test.go index 5f68a376b..d08f1e2b5 100644 --- a/analytics/graph_record_test.go +++ b/analytics/graph_record_test.go @@ -347,7 +347,7 @@ func TestAnalyticsRecord_ToGraphRecord(t *testing.T) { return } assert.NoError(t, err) - if diff := cmp.Diff(expected, gotten, cmpopts.IgnoreFields(GraphRecord{}, "RawRequest", "RawResponse")); diff != "" { + if diff := cmp.Diff(expected, gotten, cmpopts.IgnoreFields(AnalyticsRecord{}, "RawRequest", "RawResponse")); diff != "" { t.Fatal(diff) } }) diff --git a/pumps/graph_sql.go b/pumps/graph_sql.go new file mode 100644 index 000000000..adabd8a24 --- /dev/null +++ b/pumps/graph_sql.go @@ -0,0 +1,201 @@ +package pumps + +import ( + "context" + "fmt" + + "github.com/TykTechnologies/tyk-pump/analytics" + "github.com/mitchellh/mapstructure" + "github.com/sirupsen/logrus" + "gorm.io/gorm" + gorm_logger "gorm.io/gorm/logger" +) + +const ( + GraphSQLPrefix = "GraphSQL-Pump" + GraphSQLTable = "tyk_analytics_graph" +) + +var GraphSQLDefaultENV = PUMPS_ENV_PREFIX + "_GRAPH_SQL" + PUMPS_ENV_META_PREFIX + +type GraphSQLConf struct { + // TableName is a configuration field unique to the sql-graph pump, this field specifies + // the name of the sql table to be created/used for the pump in the cases of non-sharding + // in the case of sharding, it specifies the table prefix + TableName string `json:"table_name" mapstructure:"table_name"` + + SQLConf `mapstructure:",squash"` +} +type GraphSQLPump struct { + db *gorm.DB + Conf *GraphSQLConf + tableName string + CommonPumpConfig +} + +func (g *GraphSQLPump) GetName() string { + return "Graph SQL Pump" +} + +func (g *GraphSQLPump) New() Pump { + return &GraphSQLPump{} +} + +func (g *GraphSQLPump) Init(conf interface{}) error { + g.log = log.WithField("prefix", GraphSQLPrefix) + + if err := mapstructure.Decode(conf, &g.Conf); err != nil { + g.log.WithError(err).Error("error decoding conf") + return fmt.Errorf("error decoding conf: %w", err) + } + + processPumpEnvVars(g, g.log, g.Conf, GraphSQLDefaultENV) + + logLevel := gorm_logger.Silent + + switch g.Conf.LogLevel { + case "debug": + logLevel = gorm_logger.Info + case "info": + logLevel = gorm_logger.Warn + case "warning": + logLevel = gorm_logger.Error + } + + dialect, errDialect := Dialect(&g.Conf.SQLConf) + if errDialect != nil { + g.log.Error(errDialect) + return errDialect + } + + db, err := gorm.Open(dialect, &gorm.Config{ + AutoEmbedd: true, + UseJSONTags: true, + Logger: gorm_logger.Default.LogMode(logLevel), + }) + if err != nil { + g.log.WithError(err).Error("error opening gorm connection") + return err + } + g.db = db + + if g.Conf.BatchSize == 0 { + g.Conf.BatchSize = SQLDefaultQueryBatchSize + } + + g.tableName = GraphSQLTable + if name := g.Conf.TableName; name != "" { + g.tableName = name + } + if !g.Conf.TableSharding { + if err := g.db.Table(g.tableName).AutoMigrate(&analytics.GraphRecord{}); err != nil { + g.log.WithError(err).Error("error migrating graph analytics table") + return err + } + } + g.db = g.db.Table(g.tableName) + + if g.db.Error != nil { + g.log.WithError(err).Error("error initializing pump") + return err + } + + g.log.Debug("pump initialized and table set up") + return nil +} + +func (g *GraphSQLPump) getGraphRecords(data []interface{}) []*analytics.GraphRecord { + var graphRecords []*analytics.GraphRecord + for _, r := range data { + if r != nil { + var ( + rec analytics.AnalyticsRecord + ok bool + ) + if rec, ok = r.(analytics.AnalyticsRecord); !ok || !rec.IsGraphRecord() { + continue + } + gr, err := rec.ToGraphRecord() + if err != nil { + g.log.Warnf("error converting 1 record") + g.log.WithError(err).Debug("error converting record") + continue + } + graphRecords = append(graphRecords, &gr) + } + } + return graphRecords +} + +func (g *GraphSQLPump) GetEnvPrefix() string { + return g.Conf.EnvPrefix +} + +func (g *GraphSQLPump) WriteData(ctx context.Context, data []interface{}) error { + g.log.Debug("Attempting to write ", len(data), " records...") + + graphRecords := g.getGraphRecords(data) + dataLen := len(graphRecords) + + startIndex := 0 + endIndex := dataLen + // We iterate dataLen +1 times since we're writing the data after the date change on sharding_table:true + if dataLen == 0 { + g.log.Debug("no graphql records") + return nil + } + for i := 0; i <= dataLen; i++ { + if g.Conf.TableSharding { + recDate := graphRecords[startIndex].AnalyticsRecord.TimeStamp.Format("20060102") + var nextRecDate string + // if we're on i == dataLen iteration, it means that we're out of index range. We're going to use the last record date. + if i == dataLen { + nextRecDate = graphRecords[dataLen-1].AnalyticsRecord.TimeStamp.Format("20060102") + recDate = nextRecDate + } else { + nextRecDate = graphRecords[i].AnalyticsRecord.TimeStamp.Format("20060102") + + // if both dates are equal, we shouldn't write in the table yet. + if recDate == nextRecDate { + continue + } + } + + endIndex = i + + table := g.tableName + "_" + recDate + g.db = g.db.Table(table) + if !g.db.Migrator().HasTable(table) { + if err := g.db.AutoMigrate(&analytics.GraphRecord{}); err != nil { + g.log.Error("error creating table for record") + g.log.WithError(err).Debug("error creating table for record") + } + } + } else { + i = dataLen // write all records at once for non-sharded case, stop for loop after 1 iteration + } + + recs := graphRecords[startIndex:endIndex] + + for ri := 0; ri < len(recs); ri += g.Conf.BatchSize { + ends := ri + g.Conf.BatchSize + if ends > len(recs) { + ends = len(recs) + } + tx := g.db.WithContext(ctx).Create(recs[ri:ends]) + if tx.Error != nil { + g.log.Error(tx.Error) + } + } + + startIndex = i // next day start index, necessary for sharded case + } + + g.log.Info("Purged ", dataLen, " records...") + + return nil +} + +func (g *GraphSQLPump) SetLogLevel(level logrus.Level) { + g.log.Level = level +} diff --git a/pumps/graph_sql_test.go b/pumps/graph_sql_test.go new file mode 100644 index 000000000..25451025b --- /dev/null +++ b/pumps/graph_sql_test.go @@ -0,0 +1,354 @@ +package pumps + +import ( + "context" + "encoding/base64" + "fmt" + "os" + "testing" + "time" + + "github.com/TykTechnologies/tyk-pump/analytics" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestGraphSQLPump_Init(t *testing.T) { + r := require.New(t) + pump := &GraphSQLPump{} + t.Run("successful", func(t *testing.T) { + conf := GraphSQLConf{ + SQLConf: SQLConf{ + Type: "sqlite", + ConnectionString: "", + }, + TableName: "rand-table", + } + assert.NoError(t, pump.Init(conf)) + t.Cleanup(func() { + if err := pump.db.Migrator().DropTable(conf.TableName); err != nil { + t.Errorf("error cleaning up table: %v", err) + } + }) + assert.True(t, pump.db.Migrator().HasTable(conf.TableName)) + }) + + t.Run("invalid connection details", func(t *testing.T) { + conf := SQLConf{ + Type: "postgres", + ConnectionString: "host=localhost user=gorm password=gorm DB.name=gorm port=9920 sslmode=disable", + } + assert.Error(t, pump.Init(conf)) + }) + + t.Run("should fail", func(t *testing.T) { + conf := SQLConf{ConnectionString: "random"} + assert.ErrorContains(t, pump.Init(conf), "Unsupported `config_storage.type` value:") + }) + + t.Run("invalid config", func(t *testing.T) { + conf := map[string]interface{}{ + "table_name": 1, + } + assert.ErrorContains(t, pump.Init(conf), "error decoding con") + }) + + t.Run("decode from map", func(t *testing.T) { + conf := map[string]interface{}{ + "table_name": "test_table", + "type": "sqlite", + "table_sharding": true, + } + r.NoError(pump.Init(conf)) + assert.Equal(t, "test_table", pump.Conf.TableName) + assert.Equal(t, "sqlite", pump.Conf.Type) + assert.Equal(t, true, pump.Conf.TableSharding) + }) + + t.Run("sharded table", func(t *testing.T) { + conf := GraphSQLConf{ + SQLConf: SQLConf{ + Type: "sqlite", + ConnectionString: "", + TableSharding: true, + }, + TableName: "test-table", + } + assert.NoError(t, pump.Init(conf)) + assert.False(t, pump.db.Migrator().HasTable(conf.TableName)) + }) + + t.Run("init from env", func(t *testing.T) { + envPrefix := fmt.Sprintf("%s_GRAPH_SQL%s", PUMPS_ENV_PREFIX, PUMPS_ENV_META_PREFIX) + "_%s" + r := require.New(t) + envKeyVal := map[string]string{ + "TYPE": "sqlite", + "TABLENAME": "test-table", + "TABLESHARDING": "true", + } + for key, val := range envKeyVal { + newKey := fmt.Sprintf(envPrefix, key) + r.NoError(os.Setenv(newKey, val)) + } + t.Cleanup(func() { + for k := range envKeyVal { + r.NoError(os.Unsetenv(fmt.Sprintf(envPrefix, k))) + } + }) + + conf := GraphSQLConf{ + SQLConf: SQLConf{ + Type: "postgres", + ConnectionString: "", + TableSharding: false, + }, + TableName: "wrong-name", + } + r.NoError(pump.Init(conf)) + assert.Equal(t, "sqlite", pump.Conf.Type) + assert.Equal(t, "test-table", pump.Conf.TableName) + assert.Equal(t, true, pump.Conf.TableSharding) + }) +} + +func convToBase64(raw string) string { + return base64.StdEncoding.EncodeToString([]byte(raw)) +} + +func TestGraphSQLPump_WriteData(t *testing.T) { + r := require.New(t) + conf := GraphSQLConf{ + SQLConf: SQLConf{ + Type: "sqlite", + ConnectionString: "", + }, + TableName: "test-table", + } + + type customRecord struct { + response string + tags []string + responseCode int + isHTTP bool + } + type customResponses struct { + types map[string][]string + operationType string + expectedErr []analytics.GraphError + } + + testCases := []struct { + name string + records []customRecord + responses []customResponses + hasError bool + }{ + { + name: "default case", + records: []customRecord{ + { + isHTTP: false, + tags: []string{analytics.PredefinedTagGraphAnalytics}, + responseCode: 200, + response: rawGQLResponse, + }, + { + isHTTP: false, + tags: []string{analytics.PredefinedTagGraphAnalytics}, + responseCode: 200, + response: rawGQLResponseWithError, + }, + { + isHTTP: false, + tags: []string{analytics.PredefinedTagGraphAnalytics}, + responseCode: 500, + response: "", + }, + }, + responses: []customResponses{ + { + types: map[string][]string{ + "Country": {"code"}, + }, + operationType: "Query", + }, + { + types: map[string][]string{ + "Country": {"code"}, + }, + operationType: "Query", + expectedErr: []analytics.GraphError{ + { + Message: "test error", + Path: []interface{}{}, + }, + }, + }, + { + types: map[string][]string{ + "Country": {"code"}, + }, + operationType: "Query", + expectedErr: []analytics.GraphError{}, + }, + }, + hasError: false, + }, + { + name: "skip record", + records: []customRecord{ + { + isHTTP: false, + tags: []string{analytics.PredefinedTagGraphAnalytics}, + responseCode: 200, + response: rawGQLResponse, + }, + { + isHTTP: true, + responseCode: 200, + response: rawHTTPResponse, + }, + { + isHTTP: false, + responseCode: 200, + response: rawGQLResponse, + }, + }, + responses: []customResponses{ + { + types: map[string][]string{ + "Country": {"code"}, + }, + operationType: "Query", + }, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + pump := &GraphSQLPump{} + assert.NoError(t, pump.Init(conf)) + + t.Cleanup(func() { + if err := pump.db.Migrator().DropTable(conf.TableName); err != nil { + fmt.Printf("test %s, error: %v\n", tc.name, err) + } + }) + records := make([]interface{}, 0) + expectedResponses := make([]analytics.GraphRecord, 0) + // create the records to passed to the pump + for _, item := range tc.records { + r := analytics.AnalyticsRecord{ + APIName: "Test API", + Path: "POST", + Tags: item.tags, + } + if !item.isHTTP { + r.RawRequest = convToBase64(rawGQLRequest) + r.ApiSchema = convToBase64(schema) + } else { + r.RawRequest = convToBase64(rawHTTPReq) + r.RawResponse = convToBase64(rawHTTPResponse) + } + r.RawResponse = convToBase64(item.response) + if item.responseCode != 0 { + r.ResponseCode = item.responseCode + } + records = append(records, r) + } + + // create the responses to be expected from the db + for _, item := range tc.responses { + r := analytics.GraphRecord{ + Types: item.types, + OperationType: item.operationType, + Errors: []analytics.GraphError{}, + } + if item.expectedErr == nil { + r.Errors = []analytics.GraphError{} + } else { + r.Errors = item.expectedErr + r.HasErrors = true + } + expectedResponses = append(expectedResponses, r) + } + + err := pump.WriteData(context.Background(), records) + if !tc.hasError { + r.NoError(err) + } else { + r.Error(err) + } + + var resultRecords []analytics.GraphRecord + tx := pump.db.Table(conf.TableName).Find(&resultRecords) + r.NoError(tx.Error) + r.Equalf(len(tc.responses), len(resultRecords), "responses count do no match") + if diff := cmp.Diff(expectedResponses, resultRecords, cmpopts.IgnoreFields(analytics.GraphRecord{}, "AnalyticsRecord")); diff != "" { + t.Error(diff) + } + }) + } +} + +func TestGraphSQLPump_Sharded(t *testing.T) { + r := require.New(t) + conf := GraphSQLConf{ + SQLConf: SQLConf{ + Type: "sqlite", + ConnectionString: "", + TableSharding: true, + }, + TableName: "graph-record", + } + pump := &GraphSQLPump{} + assert.NoError(t, pump.Init(conf)) + + baseRecord := analytics.AnalyticsRecord{ + APIID: "test-api", + Path: "/test-api", + RawRequest: convToBase64(rawGQLRequest), + RawResponse: convToBase64(rawGQLResponse), + ApiSchema: convToBase64(schema), + Tags: []string{analytics.PredefinedTagGraphAnalytics}, + APIName: "test-api", + ResponseCode: 200, + Method: "POST", + } + + expectedTables := make([]string, 0) + records := make([]interface{}, 0) + for i := 1; i <= 3; i++ { + day := i + timestamp := time.Date(2023, time.January, day, 0, 1, 0, 0, time.UTC) + rec := baseRecord + rec.TimeStamp = timestamp + rec.Month = timestamp.Month() + rec.Day = timestamp.Day() + rec.Year = timestamp.Year() + records = append(records, rec) + expectedTables = append(expectedTables, fmt.Sprintf("%s_%s", conf.TableName, timestamp.Format("20060102"))) + } + + // cleanup after + t.Cleanup(func() { + for _, i := range expectedTables { + if err := pump.db.Migrator().DropTable(i); err != nil { + t.Error(err) + } + } + }) + + r.NoError(pump.WriteData(context.Background(), records)) + // check tables + for _, item := range expectedTables { + r.Truef(pump.db.Migrator().HasTable(item), "table %s does not exist", item) + recs := make([]analytics.GraphRecord, 0) + q := pump.db.Table(item).Find(&recs) + r.NoError(q.Error) + assert.Equalf(t, 1, len(recs), "expected one record for %s table, instead got %d", item, len(recs)) + } +} diff --git a/pumps/init.go b/pumps/init.go index 7cf49f39a..74bf7de48 100644 --- a/pumps/init.go +++ b/pumps/init.go @@ -33,4 +33,5 @@ func init() { AvailablePumps["stdout"] = &StdOutPump{} AvailablePumps["timestream"] = &TimestreamPump{} AvailablePumps["mongo-graph"] = &GraphMongoPump{} + AvailablePumps["sql-graph"] = &GraphSQLPump{} } From 43630896fd3f46366cd1c0e091a5ab42ff521d14 Mon Sep 17 00:00:00 2001 From: Matias <83959431+mativm02@users.noreply.github.com> Date: Tue, 24 Jan 2023 09:26:48 -0300 Subject: [PATCH 040/102] Increasing test coverage of logger package (#563) * increasing test coverage of logger package * unsetting env var after running each test Co-authored-by: Tomas Buchaillot --- logger/init.go | 5 ----- logger/init_test.go | 47 ++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 46 insertions(+), 6 deletions(-) diff --git a/logger/init.go b/logger/init.go index 010b22d63..e07fe0037 100644 --- a/logger/init.go +++ b/logger/init.go @@ -24,17 +24,12 @@ func GetFormatterWithForcedPrefix() *prefixed.TextFormatter { func GetLogger() *logrus.Logger { level := os.Getenv("TYK_LOGLEVEL") - if level == "" { - level = "info" - } switch strings.ToLower(level) { case "error": log.Level = logrus.ErrorLevel case "warn": log.Level = logrus.WarnLevel - case "info": - log.Level = logrus.InfoLevel case "debug": log.Level = logrus.DebugLevel default: diff --git a/logger/init_test.go b/logger/init_test.go index 832dbc7a1..918e443a9 100644 --- a/logger/init_test.go +++ b/logger/init_test.go @@ -9,7 +9,7 @@ import ( "github.com/sirupsen/logrus" ) -//TestFormatterWithForcedPrefixFileOutput check if the prefix is stored in not TTY outputs +// TestFormatterWithForcedPrefixFileOutput check if the prefix is stored in not TTY outputs func TestFormatterWithForcedPrefixFileOutput(t *testing.T) { outputFile := "test.log" @@ -51,3 +51,48 @@ func TestFormatterWithForcedPrefixFileOutput(t *testing.T) { t.Error("Error removing test logs file:" + err.Error()) } } + +func Test_GetLooger(t *testing.T) { + tests := []struct { + name string + env string + expectedLevel logrus.Level + }{ + { + name: "default", + env: "", + expectedLevel: logrus.InfoLevel, + }, + { + name: "error", + env: "error", + expectedLevel: logrus.ErrorLevel, + }, + { + name: "warn", + env: "warn", + expectedLevel: logrus.WarnLevel, + }, + { + name: "info", + env: "info", + expectedLevel: logrus.InfoLevel, + }, + { + name: "debug", + env: "debug", + expectedLevel: logrus.DebugLevel, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + os.Setenv("TYK_LOGLEVEL", tt.env) + defer os.Unsetenv("TYK_LOGLEVEL") + logger := GetLogger() + if logger.Level != tt.expectedLevel { + t.Errorf("Expected level %v, got %v", tt.expectedLevel, logger.Level) + } + }) + } +} From 2a478d831b77c1b1d57364d61510c90966acea40 Mon Sep 17 00:00:00 2001 From: Tit Petric Date: Thu, 9 Feb 2023 17:39:12 +0100 Subject: [PATCH 041/102] Remove logrus-prefixed-formatter (#567) * Remove logrus-prefixed-formatter * Fix tests to avoid env --------- Co-authored-by: Tit Petric --- go.mod | 1 - logger/init.go | 34 +++++++++++++++++----------------- logger/init_test.go | 11 ++++------- 3 files changed, 21 insertions(+), 25 deletions(-) diff --git a/go.mod b/go.mod index 430e6d277..c39a289d6 100644 --- a/go.mod +++ b/go.mod @@ -45,7 +45,6 @@ require ( github.com/sirupsen/logrus v1.8.1 github.com/stretchr/testify v1.8.0 github.com/syndtr/goleveldb v0.0.0-20190318030020-c3a204f8e965 // indirect - github.com/x-cray/logrus-prefixed-formatter v0.5.2 github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c // indirect golang.org/x/lint v0.0.0-20200302205851-738671d3881b // indirect golang.org/x/net v0.0.0-20210614182718-04defd469f4e diff --git a/logger/init.go b/logger/init.go index e07fe0037..e35edf30b 100644 --- a/logger/init.go +++ b/logger/init.go @@ -5,36 +5,36 @@ import ( "strings" "github.com/sirupsen/logrus" - prefixed "github.com/x-cray/logrus-prefixed-formatter" ) var log = logrus.New() func init() { - log.Formatter = GetFormatterWithForcedPrefix() + log.Level = level(os.Getenv("TYK_LOGLEVEL")) + log.Formatter = formatter() } -func GetFormatterWithForcedPrefix() *prefixed.TextFormatter { - formatter := new(prefixed.TextFormatter) - formatter.TimestampFormat = `Jan 02 15:04:05` - formatter.FullTimestamp = true - - return formatter -} - -func GetLogger() *logrus.Logger { - level := os.Getenv("TYK_LOGLEVEL") - +func level(level string) logrus.Level { switch strings.ToLower(level) { case "error": - log.Level = logrus.ErrorLevel + return logrus.ErrorLevel case "warn": - log.Level = logrus.WarnLevel + return logrus.WarnLevel case "debug": - log.Level = logrus.DebugLevel + return logrus.DebugLevel default: - log.Level = logrus.InfoLevel + return logrus.InfoLevel } +} +func formatter() *logrus.TextFormatter { + formatter := new(logrus.TextFormatter) + formatter.TimestampFormat = `Jan 02 15:04:05` + formatter.FullTimestamp = true + formatter.DisableColors = true + return formatter +} + +func GetLogger() *logrus.Logger { return log } diff --git a/logger/init_test.go b/logger/init_test.go index 918e443a9..61fa5f578 100644 --- a/logger/init_test.go +++ b/logger/init_test.go @@ -11,7 +11,6 @@ import ( // TestFormatterWithForcedPrefixFileOutput check if the prefix is stored in not TTY outputs func TestFormatterWithForcedPrefixFileOutput(t *testing.T) { - outputFile := "test.log" var f *os.File var err error @@ -52,7 +51,7 @@ func TestFormatterWithForcedPrefixFileOutput(t *testing.T) { } } -func Test_GetLooger(t *testing.T) { +func Test_GetLogger(t *testing.T) { tests := []struct { name string env string @@ -87,11 +86,9 @@ func Test_GetLooger(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - os.Setenv("TYK_LOGLEVEL", tt.env) - defer os.Unsetenv("TYK_LOGLEVEL") - logger := GetLogger() - if logger.Level != tt.expectedLevel { - t.Errorf("Expected level %v, got %v", tt.expectedLevel, logger.Level) + logLevel := level(tt.env) + if logLevel != tt.expectedLevel { + t.Errorf("Expected level %v, got %v", tt.expectedLevel, logLevel) } }) } From 5e649dbc75847f78b7fb0acdfde3abb7c8ddd012 Mon Sep 17 00:00:00 2001 From: Tyk-ITS Account <92926870+Tyk-ITS@users.noreply.github.com> Date: Wed, 15 Feb 2023 09:37:45 +0100 Subject: [PATCH 042/102] Add gpac files (#565) Github policy as code is managed in ci/repo-policy and manages the github repository settings for automation needs. Co-authored-by: Esteban Ricardo Mirizio --- .github/workflows/pac.yml | 45 +++++++++++ .gitignore | 1 + ci/repo-policy/main.tf | 44 +++++++++++ ci/repo-policy/modules/github-repos/repo.tf | 74 ++++++++++++++++++ .../modules/github-repos/variables.tf | 78 +++++++++++++++++++ 5 files changed, 242 insertions(+) create mode 100644 .github/workflows/pac.yml create mode 100644 ci/repo-policy/main.tf create mode 100644 ci/repo-policy/modules/github-repos/repo.tf create mode 100644 ci/repo-policy/modules/github-repos/variables.tf diff --git a/.github/workflows/pac.yml b/.github/workflows/pac.yml new file mode 100644 index 000000000..79740bb80 --- /dev/null +++ b/.github/workflows/pac.yml @@ -0,0 +1,45 @@ +name: Policy as Code + +on: + pull_request: + paths: + - ci/repo-policy/** + +env: + TERRAFORM_DIR: "./ci/repo-policy" + GITHUB_TOKEN: ${{ secrets.ITS_GH_TOKEN }} + +jobs: + terraform: + runs-on: ubuntu-latest + permissions: + id-token: write + + steps: + - name: Checkout + uses: actions/checkout@v3 + with: + fetch-depth: 1 + + - uses: aws-actions/configure-aws-credentials@v1 + with: + role-to-assume: arn:aws:iam::754489498669:role/gromit-ci + role-session-name: gromitci + aws-region: eu-central-1 + + - uses: hashicorp/setup-terraform@v2 + with: + terraform_version: 1.3.0 + + - name: Terraform Init + working-directory: ${{ env.TERRAFORM_DIR }} + id: init + run: terraform init -input=false + + - name: Terraform Plan + working-directory: ${{ env.TERRAFORM_DIR }} + id: plan + run: | + echo "::group::Terraform Plan" + terraform validate && terraform plan + echo "::endgroup::" diff --git a/.gitignore b/.gitignore index c688632bb..c68a98853 100644 --- a/.gitignore +++ b/.gitignore @@ -19,3 +19,4 @@ pumps/chunk.go .DS_Store migrate.js utils/release_rc.sh +.terraform** diff --git a/ci/repo-policy/main.tf b/ci/repo-policy/main.tf new file mode 100644 index 000000000..a06571254 --- /dev/null +++ b/ci/repo-policy/main.tf @@ -0,0 +1,44 @@ +terraform { + + #Being used until TFCloud can be used + backend "s3" { + bucket = "terraform-state-devenv" + key = "github-policy/tyk-pump" + region = "eu-central-1" + dynamodb_table = "terraform-state-locks" + } + + required_providers { + github = { + source = "integrations/github" + version = "5.16.0" + } + } +} + +provider "github" { + owner = "TykTechnologies" +} + +module "tyk-pump" { + source = "./modules/github-repos" + repo = "tyk-pump" + description = "Tyk Analytics Pump to move analytics data from Redis to any supported back end (multiple back ends can be written to at once)." + default_branch = "master" + topics = [] + visibility = "public" + wiki = false + vulnerability_alerts = true + squash_merge_commit_message = "COMMIT_MESSAGES" + squash_merge_commit_title = "COMMIT_OR_PR_TITLE" + release_branches = [ +{ branch = "master", + reviewers = "2", + convos = "false", + required_tests = ["1.15,Go 1.16 tests"]}, +{ branch = "release-1.7", + reviewers = "2", + convos = "false", + required_tests = ["1.15,Go 1.16 tests"]}, +] +} \ No newline at end of file diff --git a/ci/repo-policy/modules/github-repos/repo.tf b/ci/repo-policy/modules/github-repos/repo.tf new file mode 100644 index 000000000..d3b9250ff --- /dev/null +++ b/ci/repo-policy/modules/github-repos/repo.tf @@ -0,0 +1,74 @@ +terraform { + required_providers { + github = { + source = "integrations/github" + version = "5.16.0" + } + } + +} + +resource "github_repository" "repository" { + name = var.repo + description = var.description + visibility = var.visibility + allow_rebase_merge = var.rebase_merge + allow_squash_merge = true + squash_merge_commit_message = var.squash_merge_commit_message + squash_merge_commit_title = var.squash_merge_commit_title + allow_merge_commit = var.merge_commit + allow_auto_merge = true + delete_branch_on_merge = var.delete_branch_on_merge + vulnerability_alerts = var.vulnerability_alerts + has_downloads = true + has_issues = true + has_wiki = var.wiki + has_projects = true + topics = var.topics +} + +resource "github_branch" "default" { + repository = github_repository.repository.name + branch = var.default_branch +} + +resource "github_branch" "release_branches" { + for_each = { for i, b in var.release_branches : + b.branch => b } + repository = github_repository.repository.name + branch = each.value.branch + source_branch = each.value.source_branch +} + +resource "github_branch_default" "default" { + repository = github_repository.repository.name + branch = github_branch.default.branch +} + + +resource "github_branch_protection" "automerge" { + for_each = { for i, b in var.release_branches : + b.branch => b } + + repository_id = github_repository.repository.node_id + pattern = each.value.branch + + #checks for automerge + require_signed_commits = false + require_conversation_resolution = each.value.convos + required_linear_history = false + enforce_admins = false + allows_deletions = false + allows_force_pushes = false + + required_status_checks { + strict = true + contexts = each.value.required_tests + } + + required_pull_request_reviews { + require_code_owner_reviews = false + required_approving_review_count = each.value.reviewers + + } +} diff --git a/ci/repo-policy/modules/github-repos/variables.tf b/ci/repo-policy/modules/github-repos/variables.tf new file mode 100644 index 000000000..c7e304a79 --- /dev/null +++ b/ci/repo-policy/modules/github-repos/variables.tf @@ -0,0 +1,78 @@ +variable "repo" { + type = string + description = "Repository name" +} + +variable "description" { + type = string + description = "Repository description" +} + +variable "visibility" { + type = string + description = "Repository visibility , private or public" + default = "public" +} + +variable "wiki" { + type = bool + description = "Repository has wiki enabled or not" + default = true +} + +variable "topics" { + type = list(string) + description = "Github topics" +} + +variable "default_branch" { + type = string + description = "Repository default branch name" +} + +variable "merge_commit" { + type = bool + description = "Set to false to disable merge commits on the repository" + default = false +} + +variable "rebase_merge" { + type = bool + description = "Set to false to disable rebase merges on the repository" + default = false +} + +variable "delete_branch_on_merge" { + type = bool + description = "Automatically delete head branch after a pull request is merged" + default = true +} + +variable "vulnerability_alerts" { + type = bool + description = "Set to true to enable security alerts for vulnerable dependencies. Enabling requires alerts to be enabled on the owner level. (Note for importing: GitHub enables the alerts on public repos but disables them on private repos by default.)" + default = true +} + +variable "squash_merge_commit_message" { + type = string + description = "Can be PR_BODY, COMMIT_MESSAGES, or BLANK for a default squash merge commit message." + default = "COMMIT_MESSAGES" +} + +variable "squash_merge_commit_title" { + type = string + description = "Can be PR_TITLE or COMMIT_OR_PR_TITLE for a default squash merge commit title." + default = "COMMIT_OR_PR_TITLE" +} + +variable "release_branches" { + type = list(object({ + branch = string # Name of the branch + source_branch = optional(string) # Source of the branch, needed when creating it + reviewers = number # Min number of reviews needed + required_tests = list(string) # Workflows that need to pass before merging + convos = bool # Should conversations be resolved before merging + })) + description = "List of branches managed by terraform" +} From 5ccf526b4595723dd2328b223593eebc599c1db0 Mon Sep 17 00:00:00 2001 From: Kofo Okesola Date: Mon, 20 Feb 2023 06:47:58 +0100 Subject: [PATCH 043/102] [TT-7820]Feat: sql aggregate pump (#566) * Squashed commits to remove binary file fix lint fixed lint errors fixed ci-lint lint fix remove duplicate code added sharded tests fix linter issues removed old files tests written added sql pump modifying tests implementing write data can count errors aggregate types and fields separated aggregate function * remove earlier change * add comment for graph errors --- analytics/aggregate.go | 602 ++++++++++++++++++------------ analytics/aggregate_test.go | 249 +++++++++++- pumps/graph_sql_aggregate.go | 186 +++++++++ pumps/graph_sql_aggregate_test.go | 452 ++++++++++++++++++++++ pumps/hybrid.go | 2 +- pumps/init.go | 1 + pumps/mongo_aggregate.go | 2 +- pumps/sql_aggregate.go | 8 +- 8 files changed, 1249 insertions(+), 253 deletions(-) create mode 100644 pumps/graph_sql_aggregate.go create mode 100644 pumps/graph_sql_aggregate_test.go diff --git a/analytics/aggregate.go b/analytics/aggregate.go index 97dfbb353..15dedd44c 100644 --- a/analytics/aggregate.go +++ b/analytics/aggregate.go @@ -16,9 +16,11 @@ import ( ) const ( - AgggregateMixedCollectionName = "tyk_analytics_aggregates" - MongoAggregatePrefix = "mongo-pump-aggregate" - AggregateSQLTable = "tyk_aggregated" + AgggregateMixedCollectionName = "tyk_analytics_aggregates" + GraphAggregateMixedCollectionName = "tyk_graph_analytics_aggregate" + MongoAggregatePrefix = "mongo-pump-aggregate" + AggregateSQLTable = "tyk_aggregated" + AggregateGraphSQLTable = "tyk_graph_aggregated" ) // lastDocumentTimestamp is a map to store the last document timestamps of different Mongo Aggregators @@ -59,6 +61,14 @@ type Counter struct { ErrorList []ErrorData `json:"error_list" sql:"-"` } +type GraphRecordAggregate struct { + AnalyticsRecordAggregate + + Types map[string]*Counter + Fields map[string]*Counter + Operation map[string]*Counter +} + type AnalyticsRecordAggregate struct { TimeStamp time.Time OrgID string @@ -214,6 +224,17 @@ func OnConflictAssignments(tableName string, tempTable string) map[string]interf return assignments } +func NewGraphRecordAggregate() GraphRecordAggregate { + analyticsAggregate := AnalyticsRecordAggregate{}.New() + + return GraphRecordAggregate{ + AnalyticsRecordAggregate: analyticsAggregate, + Types: make(map[string]*Counter), + Fields: make(map[string]*Counter), + Operation: make(map[string]*Counter), + } +} + func (f AnalyticsRecordAggregate) New() AnalyticsRecordAggregate { thisF := AnalyticsRecordAggregate{} thisF.APIID = make(map[string]*Counter) @@ -302,15 +323,32 @@ type Dimension struct { Counter *Counter } -func (f *AnalyticsRecordAggregate) Dimensions() (dimensions []Dimension) { - fnLatencySetter := func(counter *Counter) *Counter { - if counter.Hits > 0 { - counter.Latency = float64(counter.TotalLatency) / float64(counter.Hits) - counter.UpstreamLatency = float64(counter.TotalUpstreamLatency) / float64(counter.Hits) - } - return counter +func fnLatencySetter(counter *Counter) *Counter { + if counter.Hits > 0 { + counter.Latency = float64(counter.TotalLatency) / float64(counter.Hits) + counter.UpstreamLatency = float64(counter.TotalUpstreamLatency) / float64(counter.Hits) + } + return counter +} + +func (g *GraphRecordAggregate) Dimensions() []Dimension { + dimensions := g.AnalyticsRecordAggregate.Dimensions() + for key, inc := range g.Types { + dimensions = append(dimensions, Dimension{Name: "types", Value: key, Counter: fnLatencySetter(inc)}) } + for key, inc := range g.Fields { + dimensions = append(dimensions, Dimension{Name: "fields", Value: key, Counter: fnLatencySetter(inc)}) + } + + for key, inc := range g.Operation { + dimensions = append(dimensions, Dimension{Name: "operation", Value: key, Counter: fnLatencySetter(inc)}) + } + + return dimensions +} + +func (f *AnalyticsRecordAggregate) Dimensions() (dimensions []Dimension) { for key, inc := range f.APIID { dimensions = append(dimensions, Dimension{"apiid", key, fnLatencySetter(inc)}) } @@ -543,8 +581,75 @@ func replaceUnsupportedChars(path string) string { return result } +func AggregateGraphData(data []interface{}, dbIdentifier string, aggregationTime int) map[string]GraphRecordAggregate { + aggregateMap := make(map[string]GraphRecordAggregate) + + for _, item := range data { + record, ok := item.(AnalyticsRecord) + if !ok { + continue + } + + if !record.IsGraphRecord() { + continue + } + + graphRec, err := record.ToGraphRecord() + if err != nil { + log.WithError(err).Debug("error converting record to graph record") + continue + } + + aggregate, found := aggregateMap[record.OrgID] + if !found { + aggregate = NewGraphRecordAggregate() + + // Set the hourly timestamp & expiry + asTime := record.TimeStamp + aggregate.TimeStamp = setAggregateTimestamp(dbIdentifier, asTime, aggregationTime) + aggregate.ExpireAt = record.ExpireAt + aggregate.TimeID.Year = asTime.Year() + aggregate.TimeID.Month = int(asTime.Month()) + aggregate.TimeID.Day = asTime.Day() + aggregate.TimeID.Hour = asTime.Hour() + aggregate.OrgID = record.OrgID + aggregate.LastTime = record.TimeStamp + aggregate.Total.ErrorMap = make(map[string]int) + } + + var counter Counter + aggregate.AnalyticsRecordAggregate, counter = incrementAggregate(&aggregate.AnalyticsRecordAggregate, &graphRec.AnalyticsRecord, false, nil) + // graph errors are different from http status errors and can occur even if a response is gotten. + // check for graph errors and increment the error count if there are indeed graph errors + if graphRec.HasErrors && counter.ErrorTotal < 1 { + counter.ErrorTotal++ + counter.Success-- + } + c := incrementOrSetUnit(&counter, aggregate.Operation[graphRec.OperationType]) + aggregate.Operation[graphRec.OperationType] = c + aggregate.Operation[graphRec.OperationType].Identifier = graphRec.OperationType + aggregate.Operation[graphRec.OperationType].HumanIdentifier = graphRec.OperationType + + for t, fields := range graphRec.Types { + c = incrementOrSetUnit(&counter, aggregate.Types[t]) + aggregate.Types[t] = c + aggregate.Types[t].Identifier = t + aggregate.Types[t].HumanIdentifier = t + for _, f := range fields { + label := fmt.Sprintf("%s_%s", t, f) + c := incrementOrSetUnit(&counter, aggregate.Fields[label]) + aggregate.Fields[label] = c + aggregate.Fields[label].Identifier = label + aggregate.Fields[label].HumanIdentifier = label + } + } + aggregateMap[record.OrgID] = aggregate + } + return aggregateMap +} + // AggregateData calculates aggregated data, returns map orgID => aggregated analytics data -func AggregateData(data []interface{}, trackAllPaths bool, ignoreTagPrefixList []string, dbIdentifier string, aggregationTime int, ignoreGraphData bool) map[string]AnalyticsRecordAggregate { +func AggregateData(data []interface{}, trackAllPaths bool, ignoreTagPrefixList []string, dbIdentifier string, aggregationTime int) map[string]AnalyticsRecordAggregate { analyticsPerOrg := make(map[string]AnalyticsRecordAggregate) for _, v := range data { thisV := v.(AnalyticsRecord) @@ -555,7 +660,7 @@ func AggregateData(data []interface{}, trackAllPaths bool, ignoreTagPrefixList [ } // We don't want to aggregate Graph Data with REST data - there is a different type for that. - if ignoreGraphData && thisV.IsGraphRecord() { + if thisV.IsGraphRecord() { continue } @@ -576,272 +681,279 @@ func AggregateData(data []interface{}, trackAllPaths bool, ignoreTagPrefixList [ thisAggregate.LastTime = thisV.TimeStamp thisAggregate.Total.ErrorMap = make(map[string]int) } + thisAggregate, _ = incrementAggregate(&thisAggregate, &thisV, trackAllPaths, ignoreTagPrefixList) + analyticsPerOrg[orgID] = thisAggregate + } - // Always update the last timestamp - thisAggregate.LastTime = thisV.TimeStamp - thisAggregate.Total.LastTime = thisV.TimeStamp + return analyticsPerOrg +} - // Create the counter for this record - var thisCounter Counter - if thisV.ResponseCode == -1 { - thisCounter = Counter{ - LastTime: thisV.TimeStamp, - OpenConnections: thisV.Network.OpenConnections, - ClosedConnections: thisV.Network.ClosedConnection, - BytesIn: thisV.Network.BytesIn, - BytesOut: thisV.Network.BytesOut, - } - thisAggregate.Total.OpenConnections += thisCounter.OpenConnections - thisAggregate.Total.ClosedConnections += thisCounter.ClosedConnections - thisAggregate.Total.BytesIn += thisCounter.BytesIn - thisAggregate.Total.BytesOut += thisCounter.BytesOut - if thisV.APIID != "" { - c := thisAggregate.APIID[thisV.APIID] - if c == nil { - c = &Counter{ - Identifier: thisV.APIID, - HumanIdentifier: thisV.APIName, - } - thisAggregate.APIID[thisV.APIID] = c +// incrementAggregate increments the analytic record aggregate fields using the analytics record +func incrementAggregate(aggregate *AnalyticsRecordAggregate, record *AnalyticsRecord, trackAllPaths bool, ignoreTagPrefixList []string) (AnalyticsRecordAggregate, Counter) { + // Always update the last timestamp + aggregate.LastTime = record.TimeStamp + aggregate.Total.LastTime = record.TimeStamp + + // Create the counter for this record + var thisCounter Counter + if record.ResponseCode == -1 { + thisCounter = Counter{ + LastTime: record.TimeStamp, + OpenConnections: record.Network.OpenConnections, + ClosedConnections: record.Network.ClosedConnection, + BytesIn: record.Network.BytesIn, + BytesOut: record.Network.BytesOut, + } + aggregate.Total.OpenConnections += thisCounter.OpenConnections + aggregate.Total.ClosedConnections += thisCounter.ClosedConnections + aggregate.Total.BytesIn += thisCounter.BytesIn + aggregate.Total.BytesOut += thisCounter.BytesOut + if record.APIID != "" { + c := aggregate.APIID[record.APIID] + if c == nil { + c = &Counter{ + Identifier: record.APIID, + HumanIdentifier: record.APIName, } - c.BytesIn += thisCounter.BytesIn - c.BytesOut += thisCounter.BytesOut - } - } else { - thisCounter = Counter{ - Hits: 1, - Success: 0, - ErrorTotal: 0, - RequestTime: float64(thisV.RequestTime), - TotalRequestTime: float64(thisV.RequestTime), - LastTime: thisV.TimeStamp, - - MaxUpstreamLatency: thisV.Latency.Upstream, - MinUpstreamLatency: thisV.Latency.Upstream, - TotalUpstreamLatency: thisV.Latency.Upstream, - MaxLatency: thisV.Latency.Total, - MinLatency: thisV.Latency.Total, - TotalLatency: thisV.Latency.Total, - ErrorMap: make(map[string]int), - } - thisAggregate.Total.Hits++ - thisAggregate.Total.TotalRequestTime += float64(thisV.RequestTime) - - // We need an initial value - thisAggregate.Total.RequestTime = thisAggregate.Total.TotalRequestTime / float64(thisAggregate.Total.Hits) - if thisV.ResponseCode >= 400 { - thisCounter.ErrorTotal = 1 - thisCounter.ErrorMap[strconv.Itoa(thisV.ResponseCode)]++ - thisAggregate.Total.ErrorTotal++ - thisAggregate.Total.ErrorMap[strconv.Itoa(thisV.ResponseCode)]++ + aggregate.APIID[record.APIID] = c } + c.BytesIn += thisCounter.BytesIn + c.BytesOut += thisCounter.BytesOut + } + } else { + thisCounter = Counter{ + Hits: 1, + Success: 0, + ErrorTotal: 0, + RequestTime: float64(record.RequestTime), + TotalRequestTime: float64(record.RequestTime), + LastTime: record.TimeStamp, + + MaxUpstreamLatency: record.Latency.Upstream, + MinUpstreamLatency: record.Latency.Upstream, + TotalUpstreamLatency: record.Latency.Upstream, + MaxLatency: record.Latency.Total, + MinLatency: record.Latency.Total, + TotalLatency: record.Latency.Total, + ErrorMap: make(map[string]int), + } + aggregate.Total.Hits++ + aggregate.Total.TotalRequestTime += float64(record.RequestTime) + + // We need an initial value + aggregate.Total.RequestTime = aggregate.Total.TotalRequestTime / float64(aggregate.Total.Hits) + if record.ResponseCode >= 400 { + thisCounter.ErrorTotal = 1 + thisCounter.ErrorMap[strconv.Itoa(record.ResponseCode)]++ + aggregate.Total.ErrorTotal++ + aggregate.Total.ErrorMap[strconv.Itoa(record.ResponseCode)]++ + } - if (thisV.ResponseCode < 300) && (thisV.ResponseCode >= 200) { - thisCounter.Success = 1 - thisAggregate.Total.Success++ - } + if (record.ResponseCode < 300) && (record.ResponseCode >= 200) { + thisCounter.Success = 1 + aggregate.Total.Success++ + } - thisAggregate.Total.TotalLatency += thisV.Latency.Total - thisAggregate.Total.TotalUpstreamLatency += thisV.Latency.Upstream + aggregate.Total.TotalLatency += record.Latency.Total + aggregate.Total.TotalUpstreamLatency += record.Latency.Upstream - if thisAggregate.Total.MaxLatency < thisV.Latency.Total { - thisAggregate.Total.MaxLatency = thisV.Latency.Total - } + if aggregate.Total.MaxLatency < record.Latency.Total { + aggregate.Total.MaxLatency = record.Latency.Total + } - if thisAggregate.Total.MaxUpstreamLatency < thisV.Latency.Upstream { - thisAggregate.Total.MaxUpstreamLatency = thisV.Latency.Upstream - } + if aggregate.Total.MaxUpstreamLatency < record.Latency.Upstream { + aggregate.Total.MaxUpstreamLatency = record.Latency.Upstream + } - // by default, min_total_latency will have 0 value - // it should not be set to 0 always - if thisAggregate.Total.Hits == 1 { - thisAggregate.Total.MinLatency = thisV.Latency.Total - thisAggregate.Total.MinUpstreamLatency = thisV.Latency.Upstream - } else { - // Don't update min latency in case of error - if thisAggregate.Total.MinLatency > thisV.Latency.Total && (thisV.ResponseCode < 300) && (thisV.ResponseCode >= 200) { - thisAggregate.Total.MinLatency = thisV.Latency.Total - } - // Don't update min latency in case of error - if thisAggregate.Total.MinUpstreamLatency > thisV.Latency.Upstream && (thisV.ResponseCode < 300) && (thisV.ResponseCode >= 200) { - thisAggregate.Total.MinUpstreamLatency = thisV.Latency.Upstream - } + // by default, min_total_latency will have 0 value + // it should not be set to 0 always + if aggregate.Total.Hits == 1 { + aggregate.Total.MinLatency = record.Latency.Total + aggregate.Total.MinUpstreamLatency = record.Latency.Upstream + } else { + // Don't update min latency in case of error + if aggregate.Total.MinLatency > record.Latency.Total && (record.ResponseCode < 300) && (record.ResponseCode >= 200) { + aggregate.Total.MinLatency = record.Latency.Total } - - if trackAllPaths { - thisV.TrackPath = true + // Don't update min latency in case of error + if aggregate.Total.MinUpstreamLatency > record.Latency.Upstream && (record.ResponseCode < 300) && (record.ResponseCode >= 200) { + aggregate.Total.MinUpstreamLatency = record.Latency.Upstream } + } - // Convert to a map (for easy iteration) - vAsMap := structs.Map(thisV) - for key, value := range vAsMap { - - // Mini function to handle incrementing a specific counter in our object - IncrementOrSetUnit := func(c *Counter) *Counter { - if c == nil { - newCounter := thisCounter - newCounter.ErrorMap = make(map[string]int) - for k, v := range thisCounter.ErrorMap { - newCounter.ErrorMap[k] = v - } - c = &newCounter - } else { - c.Hits += thisCounter.Hits - c.Success += thisCounter.Success - c.ErrorTotal += thisCounter.ErrorTotal - for k, v := range thisCounter.ErrorMap { - c.ErrorMap[k] += v - } - c.TotalRequestTime += thisCounter.TotalRequestTime - c.RequestTime = c.TotalRequestTime / float64(c.Hits) - - if c.MaxLatency < thisCounter.MaxLatency { - c.MaxLatency = thisCounter.MaxLatency - } - - // don't update min latency in case of errors - if c.MinLatency > thisCounter.MinLatency && thisCounter.ErrorTotal == 0 { - c.MinLatency = thisCounter.MinLatency - } - - if c.MaxUpstreamLatency < thisCounter.MaxUpstreamLatency { - c.MaxUpstreamLatency = thisCounter.MaxUpstreamLatency - } + if trackAllPaths { + record.TrackPath = true + } - // don't update min latency in case of errors - if c.MinUpstreamLatency > thisCounter.MinUpstreamLatency && thisCounter.ErrorTotal == 0 { - c.MinUpstreamLatency = thisCounter.MinUpstreamLatency + // Convert to a map (for easy iteration) + vAsMap := structs.Map(record) + for key, value := range vAsMap { + switch key { + case "APIID": + val, ok := value.(string) + c := incrementOrSetUnit(&thisCounter, aggregate.APIID[val]) + if val != "" && ok { + aggregate.APIID[val] = c + aggregate.APIID[val].Identifier = record.APIID + aggregate.APIID[val].HumanIdentifier = record.APIName + } + case "ResponseCode": + val, ok := value.(int) + if !ok { + break + } + errAsStr := strconv.Itoa(val) + if errAsStr != "" { + c := incrementOrSetUnit(&thisCounter, aggregate.Errors[errAsStr]) + if c.ErrorTotal > 0 { + aggregate.Errors[errAsStr] = c + aggregate.Errors[errAsStr].Identifier = errAsStr + } + } + case "APIVersion": + val, ok := value.(string) + versionStr := doHash(record.APIID + ":" + val) + c := incrementOrSetUnit(&thisCounter, aggregate.Versions[versionStr]) + if val != "" && ok { + aggregate.Versions[versionStr] = c + aggregate.Versions[versionStr].Identifier = val + aggregate.Versions[versionStr].HumanIdentifier = val + } + case "APIKey": + val, ok := value.(string) + if val != "" && ok { + c := incrementOrSetUnit(&thisCounter, aggregate.APIKeys[val]) + aggregate.APIKeys[val] = c + aggregate.APIKeys[val].Identifier = val + aggregate.APIKeys[val].HumanIdentifier = record.Alias + + if record.TrackPath { + keyStr := doHash(record.APIID + ":" + record.Path) + data := aggregate.KeyEndpoint[val] + + if data == nil { + data = make(map[string]*Counter) } - c.TotalLatency += thisCounter.TotalLatency - c.TotalUpstreamLatency += thisCounter.TotalUpstreamLatency + c = incrementOrSetUnit(&thisCounter, data[keyStr]) + c.Identifier = keyStr + c.HumanIdentifier = keyStr + data[keyStr] = c + aggregate.KeyEndpoint[val] = data } - - return c } - - switch key { - case "APIID": - c := IncrementOrSetUnit(thisAggregate.APIID[value.(string)]) - if value.(string) != "" { - thisAggregate.APIID[value.(string)] = c - thisAggregate.APIID[value.(string)].Identifier = thisV.APIID - thisAggregate.APIID[value.(string)].HumanIdentifier = thisV.APIName - } - break - case "ResponseCode": - errAsStr := strconv.Itoa(value.(int)) - if errAsStr != "" { - c := IncrementOrSetUnit(thisAggregate.Errors[errAsStr]) - if c.ErrorTotal > 0 { - thisAggregate.Errors[errAsStr] = c - thisAggregate.Errors[errAsStr].Identifier = errAsStr + case "OauthID": + val, ok := value.(string) + if val != "" && ok { + c := incrementOrSetUnit(&thisCounter, aggregate.OauthIDs[val]) + aggregate.OauthIDs[val] = c + aggregate.OauthIDs[val].Identifier = val + + if record.TrackPath { + keyStr := doHash(record.APIID + ":" + record.Path) + data := aggregate.OauthEndpoint[val] + + if data == nil { + data = make(map[string]*Counter) } - } - break - case "APIVersion": - versionStr := doHash(thisV.APIID + ":" + value.(string)) - c := IncrementOrSetUnit(thisAggregate.Versions[versionStr]) - if value.(string) != "" { - thisAggregate.Versions[versionStr] = c - thisAggregate.Versions[versionStr].Identifier = value.(string) - thisAggregate.Versions[versionStr].HumanIdentifier = value.(string) - } - break - case "APIKey": - if value.(string) != "" { - c := IncrementOrSetUnit(thisAggregate.APIKeys[value.(string)]) - thisAggregate.APIKeys[value.(string)] = c - thisAggregate.APIKeys[value.(string)].Identifier = value.(string) - thisAggregate.APIKeys[value.(string)].HumanIdentifier = thisV.Alias - - if thisV.TrackPath { - keyStr := doHash(thisV.APIID + ":" + thisV.Path) - data := thisAggregate.KeyEndpoint[value.(string)] - - if data == nil { - data = make(map[string]*Counter) - } - - c = IncrementOrSetUnit(data[keyStr]) - c.Identifier = keyStr - c.HumanIdentifier = keyStr - data[keyStr] = c - thisAggregate.KeyEndpoint[value.(string)] = data - } - } - break - case "OauthID": - if value.(string) != "" { - c := IncrementOrSetUnit(thisAggregate.OauthIDs[value.(string)]) - thisAggregate.OauthIDs[value.(string)] = c - thisAggregate.OauthIDs[value.(string)].Identifier = value.(string) - - if thisV.TrackPath { - keyStr := doHash(thisV.APIID + ":" + thisV.Path) - data := thisAggregate.OauthEndpoint[value.(string)] - - if data == nil { - data = make(map[string]*Counter) - } - - c = IncrementOrSetUnit(data[keyStr]) - c.Identifier = keyStr - c.HumanIdentifier = keyStr - data[keyStr] = c - thisAggregate.OauthEndpoint[value.(string)] = data - } + c = incrementOrSetUnit(&thisCounter, data[keyStr]) + c.Identifier = keyStr + c.HumanIdentifier = keyStr + data[keyStr] = c + aggregate.OauthEndpoint[val] = data } - break - case "Geo": - c := IncrementOrSetUnit(thisAggregate.Geo[thisV.Geo.Country.ISOCode]) - if thisV.Geo.Country.ISOCode != "" { - thisAggregate.Geo[thisV.Geo.Country.ISOCode] = c - thisAggregate.Geo[thisV.Geo.Country.ISOCode].Identifier = thisV.Geo.Country.ISOCode - thisAggregate.Geo[thisV.Geo.Country.ISOCode].HumanIdentifier = thisV.Geo.Country.ISOCode - } - break + } + case "Geo": + c := incrementOrSetUnit(&thisCounter, aggregate.Geo[record.Geo.Country.ISOCode]) + if record.Geo.Country.ISOCode != "" { + aggregate.Geo[record.Geo.Country.ISOCode] = c + aggregate.Geo[record.Geo.Country.ISOCode].Identifier = record.Geo.Country.ISOCode + aggregate.Geo[record.Geo.Country.ISOCode].HumanIdentifier = record.Geo.Country.ISOCode + } - case "Tags": - for _, thisTag := range thisV.Tags { - trimmedTag := TrimTag(thisTag) + case "Tags": + for _, thisTag := range record.Tags { + trimmedTag := TrimTag(thisTag) - if trimmedTag != "" && !ignoreTag(thisTag, ignoreTagPrefixList) { - c := IncrementOrSetUnit(thisAggregate.Tags[trimmedTag]) - thisAggregate.Tags[trimmedTag] = c - thisAggregate.Tags[trimmedTag].Identifier = trimmedTag - thisAggregate.Tags[trimmedTag].HumanIdentifier = trimmedTag - } + if trimmedTag != "" && !ignoreTag(thisTag, ignoreTagPrefixList) { + c := incrementOrSetUnit(&thisCounter, aggregate.Tags[trimmedTag]) + aggregate.Tags[trimmedTag] = c + aggregate.Tags[trimmedTag].Identifier = trimmedTag + aggregate.Tags[trimmedTag].HumanIdentifier = trimmedTag } - break + } - case "TrackPath": - log.Debug("TrackPath=", value.(bool)) - if value.(bool) { - fixedPath := replaceUnsupportedChars(thisV.Path) - c := IncrementOrSetUnit(thisAggregate.Endpoints[fixedPath]) - thisAggregate.Endpoints[fixedPath] = c - thisAggregate.Endpoints[fixedPath].Identifier = thisV.Path - thisAggregate.Endpoints[fixedPath].HumanIdentifier = thisV.Path - - keyStr := hex.EncodeToString([]byte(thisV.APIID + ":" + thisV.APIVersion + ":" + thisV.Path)) - c = IncrementOrSetUnit(thisAggregate.ApiEndpoint[keyStr]) - thisAggregate.ApiEndpoint[keyStr] = c - thisAggregate.ApiEndpoint[keyStr].Identifier = keyStr - thisAggregate.ApiEndpoint[keyStr].HumanIdentifier = thisV.Path - } + case "TrackPath": + val, ok := value.(bool) + if !ok { break } + log.Debug("TrackPath=", val) + if val { + fixedPath := replaceUnsupportedChars(record.Path) + c := incrementOrSetUnit(&thisCounter, aggregate.Endpoints[fixedPath]) + aggregate.Endpoints[fixedPath] = c + aggregate.Endpoints[fixedPath].Identifier = record.Path + aggregate.Endpoints[fixedPath].HumanIdentifier = record.Path + + keyStr := hex.EncodeToString([]byte(record.APIID + ":" + record.APIVersion + ":" + record.Path)) + c = incrementOrSetUnit(&thisCounter, aggregate.ApiEndpoint[keyStr]) + aggregate.ApiEndpoint[keyStr] = c + aggregate.ApiEndpoint[keyStr].Identifier = keyStr + aggregate.ApiEndpoint[keyStr].HumanIdentifier = record.Path + } } + } + } + return *aggregate, thisCounter +} +// incrementOrSetUnit is a Mini function to handle incrementing a specific counter in our object +func incrementOrSetUnit(b, c *Counter) *Counter { + base := *b + if c == nil { + newCounter := base + newCounter.ErrorMap = make(map[string]int) + for k, v := range base.ErrorMap { + newCounter.ErrorMap[k] = v + } + c = &newCounter + } else { + c.Hits += base.Hits + c.Success += base.Success + c.ErrorTotal += base.ErrorTotal + for k, v := range base.ErrorMap { + c.ErrorMap[k] += v + } + c.TotalRequestTime += base.TotalRequestTime + c.RequestTime = c.TotalRequestTime / float64(c.Hits) + + if c.MaxLatency < base.MaxLatency { + c.MaxLatency = base.MaxLatency } - analyticsPerOrg[orgID] = thisAggregate + + // don't update min latency in case of errors + if c.MinLatency > base.MinLatency && base.ErrorTotal == 0 { + c.MinLatency = base.MinLatency + } + + if c.MaxUpstreamLatency < base.MaxUpstreamLatency { + c.MaxUpstreamLatency = base.MaxUpstreamLatency + } + + // don't update min latency in case of errors + if c.MinUpstreamLatency > base.MinUpstreamLatency && base.ErrorTotal == 0 { + c.MinUpstreamLatency = base.MinUpstreamLatency + } + + c.TotalLatency += base.TotalLatency + c.TotalUpstreamLatency += base.TotalUpstreamLatency } - return analyticsPerOrg + return c } func TrimTag(thisTag string) string { diff --git a/analytics/aggregate_test.go b/analytics/aggregate_test.go index eab26d9fd..8a1de8c93 100644 --- a/analytics/aggregate_test.go +++ b/analytics/aggregate_test.go @@ -1,12 +1,26 @@ package analytics import ( + "encoding/base64" + "fmt" "testing" "time" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/assert" ) +const graphErrorResponse = `{ + "errors": [ + { + "message": "Name for character with ID 1002 could not be fetched.", + "locations": [{ "line": 6, "column": 7 }], + "path": ["hero", "heroFriends", 1, "name"] + } + ] +}` + func TestCode_ProcessStatusCodes(t *testing.T) { errorMap := map[string]int{ "400": 4, @@ -57,7 +71,7 @@ func TestAggregate_Tags(t *testing.T) { } func runTestAggregatedTags(t *testing.T, name string, records []interface{}) { - aggregations := AggregateData(records, false, []string{}, "", 60, false) + aggregations := AggregateData(records, false, []string{}, "", 60) t.Run(name, func(t *testing.T) { for _, aggregation := range aggregations { @@ -73,6 +87,237 @@ func TestTrimTag(t *testing.T) { assert.Equal(t, "hello world", TrimTag(" hello world ")) } +func TestAggregateGraphData(t *testing.T) { + sampleRecord := AnalyticsRecord{ + TimeStamp: time.Date(2022, 1, 1, 0, 0, 0, 0, time.UTC), + Method: "POST", + Host: "localhost:8281", + Path: "/", + RawPath: "/", + APIName: "test-api", + APIID: "test-api", + ApiSchema: base64.StdEncoding.EncodeToString([]byte(sampleSchema)), + Tags: []string{PredefinedTagGraphAnalytics}, + ResponseCode: 200, + Day: 1, + Month: 1, + Year: 2022, + Hour: 0, + OrgID: "test-org", + APIKey: "test-key", + TrackPath: true, + OauthID: "test-id", + } + + compareFields := func(r *require.Assertions, expected, actual map[string]*Counter) { + r.Equal(len(expected), len(actual), "field map not equal") + for k, expectedVal := range expected { + actualVal, ok := actual[k] + r.True(ok) + r.Equal(expectedVal.Hits, actualVal.Hits, "hits not matching for %s", k) + r.Equal(expectedVal.ErrorTotal, actualVal.ErrorTotal, "error total not matching for %s", k) + r.Equal(expectedVal.Success, actualVal.Success, "success not matching for %s", k) + } + } + + testCases := []struct { + expectedAggregate map[string]GraphRecordAggregate + recordGenerator func() []interface{} + name string + }{ + { + name: "default", + recordGenerator: func() []interface{} { + records := make([]interface{}, 3) + for i := range records { + record := sampleRecord + query := `{"query":"query{\n characters(filter: {\n \n }){\n info{\n count\n }\n }\n}"}` + response := `{"data":{"characters":{"info":{"count":758}}}}` + record.RawRequest = base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf(requestTemplate, len(query), query))) + record.RawResponse = base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf(responseTemplate, len(response), response))) + records[i] = record + } + return records + }, + expectedAggregate: map[string]GraphRecordAggregate{ + "test-org": { + Types: map[string]*Counter{ + "Characters": {Hits: 3, ErrorTotal: 0, Success: 3}, + "Info": {Hits: 3, ErrorTotal: 0, Success: 3}, + }, + Fields: map[string]*Counter{ + "Characters_info": {Hits: 3, ErrorTotal: 0, Success: 3}, + "Info_count": {Hits: 3, ErrorTotal: 0, Success: 3}, + }, + }, + }, + }, + { + name: "skip non graph records", + recordGenerator: func() []interface{} { + records := make([]interface{}, 3) + for i := range records { + record := sampleRecord + query := `{"query":"query{\n characters(filter: {\n \n }){\n info{\n count\n }\n }\n}"}` + response := `{"data":{"characters":{"info":{"count":758}}}}` + record.RawRequest = base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf(requestTemplate, len(query), query))) + record.RawResponse = base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf(responseTemplate, len(response), response))) + if i == 1 { + record.Tags = []string{} + } + records[i] = record + } + return records + }, + expectedAggregate: map[string]GraphRecordAggregate{ + "test-org": { + Types: map[string]*Counter{ + "Characters": {Hits: 2, ErrorTotal: 0, Success: 2}, + "Info": {Hits: 2, ErrorTotal: 0, Success: 2}, + }, + Fields: map[string]*Counter{ + "Characters_info": {Hits: 2, ErrorTotal: 0, Success: 2}, + "Info_count": {Hits: 2, ErrorTotal: 0, Success: 2}, + }, + }, + }, + }, + { + name: "has errors", + recordGenerator: func() []interface{} { + records := make([]interface{}, 3) + for i := range records { + record := sampleRecord + query := `{"query":"query{\n characters(filter: {\n \n }){\n info{\n count\n }\n }\n}"}` + response := `{"data":{"characters":{"info":{"count":758}}}}` + if i == 1 { + response = graphErrorResponse + } + record.RawRequest = base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf(requestTemplate, len(query), query))) + record.RawResponse = base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf(responseTemplate, len(response), response))) + records[i] = record + } + return records + }, + expectedAggregate: map[string]GraphRecordAggregate{ + "test-org": { + Types: map[string]*Counter{ + "Characters": {Hits: 3, ErrorTotal: 1, Success: 2}, + "Info": {Hits: 3, ErrorTotal: 1, Success: 2}, + }, + Fields: map[string]*Counter{ + "Characters_info": {Hits: 3, ErrorTotal: 1, Success: 2}, + "Info_count": {Hits: 3, ErrorTotal: 1, Success: 2}, + }, + }, + }, + }, + { + name: "error response code", + recordGenerator: func() []interface{} { + records := make([]interface{}, 5) + for i := range records { + record := sampleRecord + query := `{"query":"query{\n characters(filter: {\n \n }){\n info{\n count\n }\n }\n}"}` + response := `{"data":{"characters":{"info":{"count":758}}}}` + record.RawRequest = base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf(requestTemplate, len(query), query))) + record.RawResponse = base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf(responseTemplate, len(response), response))) + if i == 2 || i == 4 { + record.ResponseCode = 500 + } + records[i] = record + } + return records + }, + expectedAggregate: map[string]GraphRecordAggregate{ + "test-org": { + Types: map[string]*Counter{ + "Characters": {Hits: 5, ErrorTotal: 2, Success: 3}, + "Info": {Hits: 5, ErrorTotal: 2, Success: 3}, + }, + Fields: map[string]*Counter{ + "Characters_info": {Hits: 5, ErrorTotal: 2, Success: 3}, + "Info_count": {Hits: 5, ErrorTotal: 2, Success: 3}, + }, + }, + }, + }, + } + + for _, c := range testCases { + t.Run(c.name, func(t *testing.T) { + r := require.New(t) + records := c.recordGenerator() + aggregated := AggregateGraphData(records, "", 0) + r.Len(aggregated, len(c.expectedAggregate)) + for key, expectedAggregate := range c.expectedAggregate { + actualAggregate, ok := aggregated[key] + r.True(ok) + // check types and fields + compareFields(r, expectedAggregate.Types, actualAggregate.Types) + compareFields(r, expectedAggregate.Fields, actualAggregate.Fields) + } + }) + } +} + +func TestAggregateGraphData_Dimension(t *testing.T) { + sampleRecord := AnalyticsRecord{ + TimeStamp: time.Date(2022, 1, 1, 0, 0, 0, 0, time.UTC), + Method: "POST", + Host: "localhost:8281", + Path: "/", + RawPath: "/", + APIName: "test-api", + APIID: "test-api", + ApiSchema: base64.StdEncoding.EncodeToString([]byte(sampleSchema)), + Tags: []string{PredefinedTagGraphAnalytics}, + ResponseCode: 200, + Day: 1, + Month: 1, + Year: 2022, + Hour: 0, + OrgID: "test-org", + } + + records := make([]interface{}, 3) + for i := range records { + record := sampleRecord + query := `{"query":"query{\n characters(filter: {\n \n }){\n info{\n count\n }\n }\n}"}` + response := `{"data":{"characters":{"info":{"count":758}}}}` + record.RawRequest = base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf(requestTemplate, len(query), query))) + record.RawResponse = base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf(responseTemplate, len(response), response))) + records[i] = record + } + + responsesCheck := map[string][]string{ + "types": { + "Characters", + "Info", + }, + "fields": { + "Characters_info", + "Info_count", + }, + } + + r := require.New(t) + aggregated := AggregateGraphData(records, "", 1) + r.Len(aggregated, 1) + aggre := aggregated["test-org"] + dimensions := aggre.Dimensions() + fmt.Println(dimensions) + for d, values := range responsesCheck { + for _, v := range values { + for _, dimension := range dimensions { + if d != dimension.Name && v != dimension.Value && dimension.Counter.Hits != 3 { + t.Errorf("item missing from dimensions: NameL %s, Value: %s, Hits:3", d, v) + } + } + } + } +} + func TestAggregateData_SkipGraphRecords(t *testing.T) { run := func(records []AnalyticsRecord, expectedAggregatedRecordCount int, expectedExistingOrgKeys []string, expectedNonExistingOrgKeys []string) func(t *testing.T) { return func(t *testing.T) { @@ -80,7 +325,7 @@ func TestAggregateData_SkipGraphRecords(t *testing.T) { for i := range records { data[i] = records[i] } - aggregatedData := AggregateData(data, true, nil, "", 1, true) + aggregatedData := AggregateData(data, true, nil, "", 1) assert.Equal(t, expectedAggregatedRecordCount, len(aggregatedData)) for _, expectedExistingOrgKey := range expectedExistingOrgKeys { _, exists := aggregatedData[expectedExistingOrgKey] diff --git a/pumps/graph_sql_aggregate.go b/pumps/graph_sql_aggregate.go new file mode 100644 index 000000000..050fc766d --- /dev/null +++ b/pumps/graph_sql_aggregate.go @@ -0,0 +1,186 @@ +package pumps + +import ( + "context" + "encoding/hex" + "fmt" + + "github.com/TykTechnologies/tyk-pump/analytics" + "github.com/mitchellh/mapstructure" + "gorm.io/gorm" + "gorm.io/gorm/clause" + gorm_logger "gorm.io/gorm/logger" +) + +var SQLGraphAggregateDefaultENV = PUMPS_ENV_PREFIX + "_SQLGRAPHAGGREGATE" + PUMPS_ENV_META_PREFIX + +type GraphSQLAggregatePump struct { + SQLConf *SQLAggregatePumpConf + db *gorm.DB + + CommonPumpConfig +} + +func (s *GraphSQLAggregatePump) GetName() string { + return "Sql Graph Aggregate Pump" +} + +func (s *GraphSQLAggregatePump) New() Pump { + return &GraphSQLAggregatePump{} +} + +func (s *GraphSQLAggregatePump) Init(conf interface{}) error { + s.SQLConf = &SQLAggregatePumpConf{} + s.log = log.WithField("prefix", SQLAggregatePumpPrefix) + + err := mapstructure.Decode(conf, &s.SQLConf) + if err != nil { + s.log.Error("Failed to decode configuration: ", err) + return err + } + + processPumpEnvVars(s, s.log, s.SQLConf, SQLGraphAggregateDefaultENV) + + logLevel := gorm_logger.Silent + + switch s.SQLConf.LogLevel { + case "debug": + logLevel = gorm_logger.Info + case "info": + logLevel = gorm_logger.Warn + case "warning": + logLevel = gorm_logger.Error + } + + dialect, errDialect := Dialect(&s.SQLConf.SQLConf) + if errDialect != nil { + s.log.Error(errDialect) + return errDialect + } + db, err := gorm.Open(dialect, &gorm.Config{ + AutoEmbedd: true, + UseJSONTags: true, + Logger: gorm_logger.Default.LogMode(logLevel), + }) + if err != nil { + s.log.Error(err) + return err + } + s.db = db + if !s.SQLConf.TableSharding { + if err := s.db.Table(analytics.AggregateGraphSQLTable).AutoMigrate(&analytics.SQLAnalyticsRecordAggregate{}); err != nil { + s.log.WithError(err).Warn("error migrating table") + } + } + + if s.SQLConf.BatchSize == 0 { + s.SQLConf.BatchSize = SQLDefaultQueryBatchSize + } + + s.log.Debug("SQLAggregate Initialized") + return nil +} + +func (s *GraphSQLAggregatePump) WriteData(ctx context.Context, data []interface{}) error { + dataLen := len(data) + s.log.Debug("Attempting to write ", dataLen, " records...") + + if dataLen == 0 { + return nil + } + + startIndex := 0 + endIndex := dataLen + table := "" + + for i := 0; i <= dataLen; i++ { + if s.SQLConf.TableSharding { + recDate := data[startIndex].(analytics.AnalyticsRecord).TimeStamp.Format("20060102") + var nextRecDate string + // if we're on i == dataLen iteration, it means that we're out of index range. We're going to use the last record date. + if i == dataLen { + nextRecDate = data[dataLen-1].(analytics.AnalyticsRecord).TimeStamp.Format("20060102") + recDate = nextRecDate + } else { + nextRecDate = data[i].(analytics.AnalyticsRecord).TimeStamp.Format("20060102") + + // if both dates are equal, we shouldn't write in the table yet. + if recDate == nextRecDate { + continue + } + } + + endIndex = i + + table = analytics.AggregateGraphSQLTable + "_" + recDate + s.db = s.db.Table(table) + if !s.db.Migrator().HasTable(table) { + if err := s.db.AutoMigrate(&analytics.SQLAnalyticsRecordAggregate{}); err != nil { + s.log.WithError(err).Warn("error running auto migration") + } + } + } else { + i = dataLen // write all records at once for non-sharded case, stop for loop after 1 iteration + table = analytics.AggregateGraphSQLTable + } + } + + // if StoreAnalyticsPerMinute is set to true, we will create new documents with records every 1 minute + var aggregationTime int + if s.SQLConf.StoreAnalyticsPerMinute { + aggregationTime = 1 + } else { + aggregationTime = 60 + } + + aggregates := analytics.AggregateGraphData(data[startIndex:endIndex], "", aggregationTime) + + for orgID := range aggregates { + aggr := aggregates[orgID] + if err := s.DoAggregatedWriting(ctx, table, orgID, &aggr); err != nil { + s.log.WithError(err).Error("error writing record") + return err + } + } + + return nil +} + +func (s *GraphSQLAggregatePump) DoAggregatedWriting(ctx context.Context, table, orgID string, ag *analytics.GraphRecordAggregate) error { + recs := []analytics.SQLAnalyticsRecordAggregate{} + + dimensions := ag.Dimensions() + for _, d := range dimensions { + rec := analytics.SQLAnalyticsRecordAggregate{ + ID: hex.EncodeToString([]byte(fmt.Sprintf("%v", ag.TimeStamp.Unix()) + orgID + d.Name + d.Value)), + OrgID: orgID, + TimeStamp: ag.TimeStamp.Unix(), + Counter: *d.Counter, + Dimension: d.Name, + DimensionValue: d.Value, + } + rec.ProcessStatusCodes(rec.Counter.ErrorMap) + rec.Counter.ErrorList = nil + rec.Counter.ErrorMap = nil + recs = append(recs, rec) + } + + for i := 0; i < len(recs); i += s.SQLConf.BatchSize { + ends := i + s.SQLConf.BatchSize + if ends > len(recs) { + ends = len(recs) + } + + // we use excluded as temp table since it's supported by our SQL storages https://www.postgresql.org/docs/9.5/sql-insert.html#SQL-ON-CONFLICT https://www.sqlite.org/lang_UPSERT.html + tx := s.db.WithContext(ctx).Table(analytics.AggregateGraphSQLTable).Clauses(clause.OnConflict{ + Columns: []clause.Column{{Name: "id"}}, + DoUpdates: clause.Assignments(analytics.OnConflictAssignments(table, "excluded")), + }).Create(recs[i:ends]) + if tx.Error != nil { + s.log.Error("error writing aggregated records into "+table+":", tx.Error) + return tx.Error + } + } + + return nil +} diff --git a/pumps/graph_sql_aggregate_test.go b/pumps/graph_sql_aggregate_test.go new file mode 100644 index 000000000..e261e9311 --- /dev/null +++ b/pumps/graph_sql_aggregate_test.go @@ -0,0 +1,452 @@ +package pumps + +import ( + "context" + "encoding/base64" + "fmt" + "os" + "testing" + "time" + + "github.com/TykTechnologies/tyk-pump/analytics" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + requestTemplate = "POST / HTTP/1.1\r\nHost: localhost:8281\r\nUser-Agent: test-agent\r\nContent-Length: %d\r\n\r\n%s" + responseTemplate = "HTTP/0.0 200 OK\r\nContent-Length: %d\r\nConnection: close\r\nContent-Type: application/json\r\n\r\n%s" +) + +const graphErrorResponse = `{ + "errors": [ + { + "message": "Name for character with ID 1002 could not be fetched.", + "locations": [{ "line": 6, "column": 7 }], + "path": ["hero", "heroFriends", 1, "name"] + } + ] +}` + +const sampleSchema = ` +type Query { + characters(filter: FilterCharacter, page: Int): Characters + listCharacters(): [Characters]! +} + +type Mutation { + changeCharacter(): String +} + +type Subscription { + listenCharacter(): Characters +} +input FilterCharacter { + name: String + status: String + species: String + type: String + gender: String! = "M" +} +type Characters { + info: Info + secondInfo: String + results: [Character] +} +type Info { + count: Int + next: Int + pages: Int + prev: Int +} +type Character { + gender: String + id: ID + name: String +} + +type EmptyType{ +}` + +const ( + sampleQuery = `{"query":"query{\n characters(filter: {\n \n }){\n info{\n count\n }\n }\n}"}` + sampleResponse = `{"data":{"characters":{"info":{"count":758}}}}` +) + +func TestSqlGraphAggregatePump_Init(t *testing.T) { + tableName := analytics.AggregateGraphSQLTable + r := require.New(t) + pump := &GraphSQLAggregatePump{} + t.Run("successful", func(t *testing.T) { + conf := SQLAggregatePumpConf{ + SQLConf: SQLConf{ + Type: "sqlite", + ConnectionString: "", + }, + } + assert.NoError(t, pump.Init(conf)) + t.Cleanup(func() { + if err := pump.db.Migrator().DropTable(tableName); err != nil { + t.Errorf("error cleaning up table: %v", err) + } + }) + assert.True(t, pump.db.Migrator().HasTable(tableName)) + }) + + t.Run("invalid connection details", func(t *testing.T) { + conf := SQLConf{ + Type: "postgres", + ConnectionString: "host=localhost user=gorm password=gorm DB.name=gorm port=9920 sslmode=disable", + } + assert.Error(t, pump.Init(conf)) + }) + + t.Run("should fail", func(t *testing.T) { + conf := SQLConf{ConnectionString: "random"} + assert.ErrorContains(t, pump.Init(conf), "Unsupported `config_storage.type` value:") + }) + + t.Run("invalid config", func(t *testing.T) { + conf := map[string]interface{}{ + "connection_string": 1, + } + assert.ErrorContains(t, pump.Init(conf), "expected type") + }) + + t.Run("decode from map", func(t *testing.T) { + conf := map[string]interface{}{ + "type": "sqlite", + "table_sharding": true, + } + r.NoError(pump.Init(conf)) + assert.Equal(t, "sqlite", pump.SQLConf.Type) + assert.Equal(t, true, pump.SQLConf.TableSharding) + }) + + t.Run("sharded table", func(t *testing.T) { + conf := SQLAggregatePumpConf{ + SQLConf: SQLConf{ + Type: "sqlite", + ConnectionString: "", + TableSharding: true, + }, + } + assert.NoError(t, pump.Init(conf)) + assert.False(t, pump.db.Migrator().HasTable(tableName)) + }) + + t.Run("init from env", func(t *testing.T) { + envPrefix := fmt.Sprintf("%s_SQLGRAPHAGGREGATE%s", PUMPS_ENV_PREFIX, PUMPS_ENV_META_PREFIX) + "_%s" + r := require.New(t) + envKeyVal := map[string]string{ + "TYPE": "sqlite", + "TABLESHARDING": "true", + } + for key, val := range envKeyVal { + newKey := fmt.Sprintf(envPrefix, key) + r.NoError(os.Setenv(newKey, val)) + } + t.Cleanup(func() { + for k := range envKeyVal { + r.NoError(os.Unsetenv(fmt.Sprintf(envPrefix, k))) + } + }) + + conf := SQLAggregatePumpConf{ + SQLConf: SQLConf{ + Type: "postgres", + ConnectionString: "", + TableSharding: false, + }, + } + r.NoError(pump.Init(conf)) + assert.Equal(t, "sqlite", pump.SQLConf.Type) + assert.Equal(t, true, pump.SQLConf.TableSharding) + }) +} + +func TestSqlGraphAggregatePump_WriteData(t *testing.T) { + r := require.New(t) + conf := SQLConf{ + Type: "sqlite", + ConnectionString: "", + } + pump := GraphSQLAggregatePump{} + r.NoError(pump.Init(conf)) + t.Cleanup(func() { + if err := pump.db.Migrator().DropTable(analytics.AggregateGraphSQLTable); err != nil { + t.Errorf("error cleaning up table: %v", err) + } + }) + + sampleRecord := analytics.AnalyticsRecord{ + TimeStamp: time.Date(2022, 1, 1, 0, 0, 0, 0, time.UTC), + Method: "POST", + Host: "localhost:8281", + Path: "/", + RawPath: "/", + APIName: "test-api", + APIID: "test-api", + ApiSchema: base64.StdEncoding.EncodeToString([]byte(sampleSchema)), + Tags: []string{analytics.PredefinedTagGraphAnalytics}, + ResponseCode: 200, + Day: 1, + Month: 1, + Year: 2022, + Hour: 0, + OrgID: "test-org", + } + + type expectedResponseCheck struct { + name string + orgID string + dimension string + hits int + success int + error int + } + + testCases := []struct { + name string + recordGenerator func() []interface{} + expectedResults []expectedResponseCheck + }{ + { + name: "default", + recordGenerator: func() []interface{} { + records := make([]interface{}, 3) + for i := range records { + record := sampleRecord + query := `{"query":"query{\n characters(filter: {\n \n }){\n info{\n count\n }\n }\n}"}` + response := `{"data":{"characters":{"info":{"count":758}}}}` + record.RawRequest = base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf(requestTemplate, len(query), query))) + record.RawResponse = base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf(responseTemplate, len(response), response))) + records[i] = record + } + return records + }, + expectedResults: []expectedResponseCheck{ + { + orgID: "test-org", + dimension: "types", + name: "Characters", + hits: 3, + error: 0, + success: 3, + }, + { + orgID: "test-org", + dimension: "types", + name: "Info", + hits: 3, + error: 0, + success: 3, + }, + { + orgID: "test-org", + dimension: "fields", + name: "Characters_info", + hits: 3, + error: 0, + success: 3, + }, + { + orgID: "test-org", + dimension: "fields", + name: "Info_count", + hits: 3, + error: 0, + success: 3, + }, + }, + }, + { + name: "skip non graph records", + recordGenerator: func() []interface{} { + records := make([]interface{}, 3) + for i := range records { + record := sampleRecord + query := `{"query":"query{\n characters(filter: {\n \n }){\n info{\n count\n }\n }\n}"}` + response := `{"data":{"characters":{"info":{"count":758}}}}` + record.RawRequest = base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf(requestTemplate, len(query), query))) + record.RawResponse = base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf(responseTemplate, len(response), response))) + if i == 1 { + record.Tags = []string{} + } + records[i] = record + } + return records + }, + expectedResults: []expectedResponseCheck{ + { + orgID: "test-org", + dimension: "types", + name: "Characters", + hits: 2, + error: 0, + success: 2, + }, + { + orgID: "test-org", + dimension: "types", + name: "Info", + hits: 2, + error: 0, + success: 2, + }, + { + orgID: "test-org", + dimension: "fields", + name: "Characters_info", + hits: 2, + error: 0, + success: 2, + }, + { + orgID: "test-org", + dimension: "fields", + name: "Info_count", + hits: 2, + error: 0, + success: 2, + }, + }, + }, + { + name: "has errors", + recordGenerator: func() []interface{} { + records := make([]interface{}, 3) + for i := range records { + record := sampleRecord + query := `{"query":"query{\n characters(filter: {\n \n }){\n info{\n count\n }\n }\n}"}` + response := `{"data":{"characters":{"info":{"count":758}}}}` + if i == 1 { + response = graphErrorResponse + } + record.RawRequest = base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf(requestTemplate, len(query), query))) + record.RawResponse = base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf(responseTemplate, len(response), response))) + records[i] = record + } + return records + }, + expectedResults: []expectedResponseCheck{ + { + orgID: "test-org", + dimension: "types", + name: "Characters", + hits: 3, + error: 1, + success: 2, + }, + { + orgID: "test-org", + dimension: "types", + name: "Info", + hits: 3, + error: 1, + success: 2, + }, + { + orgID: "test-org", + dimension: "fields", + name: "Characters_info", + hits: 3, + error: 1, + success: 2, + }, + { + orgID: "test-org", + dimension: "fields", + name: "Info_count", + hits: 3, + error: 1, + success: 2, + }, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + r := require.New(t) + records := tc.recordGenerator() + r.NoError(pump.WriteData(context.Background(), records)) + t.Cleanup(func() { + // use DELETE FROM table; since it is sqlite + if tx := pump.db.Exec(fmt.Sprintf("DELETE FROM %s", analytics.AggregateGraphSQLTable)); tx.Error != nil { + t.Error(tx.Error) + } + }) + + for _, expected := range tc.expectedResults { + resp := make([]analytics.SQLAnalyticsRecordAggregate, 0) + tx := pump.db.Table(analytics.AggregateGraphSQLTable).Where( + "org_id = ? AND dimension = ? AND dimension_value = ? AND counter_hits = ? AND counter_success = ? AND counter_error = ?", + expected.orgID, expected.dimension, expected.name, expected.hits, expected.success, expected.error, + ).Find(&resp) + r.NoError(tx.Error) + if len(resp) < 1 { + t.Errorf( + "couldn't find record with fields: org_id: %s, dimension: %s, dimension_value: %s, counter_hits: %d, counter_success: %d, counter_error: %d", + expected.orgID, expected.dimension, expected.name, expected.hits, expected.success, expected.error, + ) + } + } + // assert the responses + }) + } +} + +func TestGraphSQLAggregatePump_WriteData_Sharded(t *testing.T) { + pump := GraphSQLAggregatePump{} + + sampleRecord := analytics.AnalyticsRecord{ + TimeStamp: time.Date(2022, 1, 1, 0, 0, 0, 0, time.UTC), + Method: "POST", + Host: "localhost:8281", + Path: "/", + RawPath: "/", + APIName: "test-api", + APIID: "test-api", + ApiSchema: base64.StdEncoding.EncodeToString([]byte(sampleSchema)), + Tags: []string{analytics.PredefinedTagGraphAnalytics}, + ResponseCode: 200, + Day: 1, + Month: 1, + Year: 2022, + Hour: 0, + OrgID: "test-org", + RawRequest: fmt.Sprintf(requestTemplate, len(sampleQuery), sampleQuery), + RawResponse: fmt.Sprintf(responseTemplate, len(sampleResponse), sampleResponse), + } + + t.Run("should shard successfully", func(t *testing.T) { + r := require.New(t) + r.NoError(pump.Init(SQLAggregatePumpConf{ + SQLConf: SQLConf{ + Type: "sqlite", + TableSharding: true, + }, + })) + assert.False(t, pump.db.Migrator().HasTable(analytics.AggregateGraphSQLTable)) + r.NoError(pump.WriteData(context.Background(), []interface{}{sampleRecord})) + assert.True(t, pump.db.Migrator().HasTable(analytics.AggregateGraphSQLTable+"_20220101")) + }) + + t.Run("shard multiple tables", func(t *testing.T) { + r := require.New(t) + r.NoError(pump.Init(SQLAggregatePumpConf{ + SQLConf: SQLConf{ + Type: "sqlite", + TableSharding: true, + }, + })) + record := sampleRecord + secondRecord := sampleRecord + secondRecord.TimeStamp = time.Date(2023, 1, 2, 0, 0, 0, 0, time.UTC) + assert.False(t, pump.db.Migrator().HasTable(analytics.AggregateGraphSQLTable)) + r.NoError(pump.WriteData(context.Background(), []interface{}{record, secondRecord})) + assert.True(t, pump.db.Migrator().HasTable(analytics.AggregateGraphSQLTable+"_20220101")) + assert.True(t, pump.db.Migrator().HasTable(analytics.AggregateGraphSQLTable+"_20230102")) + }) +} diff --git a/pumps/hybrid.go b/pumps/hybrid.go index 196e2946c..cd887af81 100644 --- a/pumps/hybrid.go +++ b/pumps/hybrid.go @@ -212,7 +212,7 @@ func (p *HybridPump) WriteData(ctx context.Context, data []interface{}) error { } } else { // send aggregated data // calculate aggregates - aggregates := analytics.AggregateData(data, p.trackAllPaths, p.ignoreTagPrefixList, p.rpcConfig.ConnectionString, p.aggregationTime, false) + aggregates := analytics.AggregateData(data, p.trackAllPaths, p.ignoreTagPrefixList, p.rpcConfig.ConnectionString, p.aggregationTime) // turn map with analytics aggregates into JSON payload jsonData, err := json.Marshal(aggregates) diff --git a/pumps/init.go b/pumps/init.go index 74bf7de48..1bdaacdf8 100644 --- a/pumps/init.go +++ b/pumps/init.go @@ -34,4 +34,5 @@ func init() { AvailablePumps["timestream"] = &TimestreamPump{} AvailablePumps["mongo-graph"] = &GraphMongoPump{} AvailablePumps["sql-graph"] = &GraphSQLPump{} + AvailablePumps["sql-graph-aggregate"] = &GraphSQLAggregatePump{} } diff --git a/pumps/mongo_aggregate.go b/pumps/mongo_aggregate.go index 1444b50c0..a63eece4b 100644 --- a/pumps/mongo_aggregate.go +++ b/pumps/mongo_aggregate.go @@ -281,7 +281,7 @@ func (m *MongoAggregatePump) WriteData(ctx context.Context, data []interface{}) m.WriteData(ctx, data) } else { // calculate aggregates - analyticsPerOrg := analytics.AggregateData(data, m.dbConf.TrackAllPaths, m.dbConf.IgnoreTagPrefixList, m.dbConf.MongoURL, m.dbConf.AggregationTime, true) + analyticsPerOrg := analytics.AggregateData(data, m.dbConf.TrackAllPaths, m.dbConf.IgnoreTagPrefixList, m.dbConf.MongoURL, m.dbConf.AggregationTime) // put aggregated data into MongoDB for orgID, filteredData := range analyticsPerOrg { err := m.DoAggregatedWriting(ctx, orgID, filteredData) diff --git a/pumps/sql_aggregate.go b/pumps/sql_aggregate.go index b34d98a78..56001a76d 100644 --- a/pumps/sql_aggregate.go +++ b/pumps/sql_aggregate.go @@ -158,7 +158,7 @@ func (c *SQLAggregatePump) WriteData(ctx context.Context, data []interface{}) er aggregationTime = 60 } - analyticsPerOrg := analytics.AggregateData(data[startIndex:endIndex], c.SQLConf.TrackAllPaths, c.SQLConf.IgnoreTagPrefixList, "", aggregationTime, false) + analyticsPerOrg := analytics.AggregateData(data[startIndex:endIndex], c.SQLConf.TrackAllPaths, c.SQLConf.IgnoreTagPrefixList, "", aggregationTime) for orgID, ag := range analyticsPerOrg { @@ -179,9 +179,9 @@ func (c *SQLAggregatePump) WriteData(ctx context.Context, data []interface{}) er func (c *SQLAggregatePump) DoAggregatedWriting(ctx context.Context, table, orgID string, ag analytics.AnalyticsRecordAggregate) error { recs := []analytics.SQLAnalyticsRecordAggregate{} - for _, d := range ag.Dimensions() { - id := fmt.Sprintf("%v", ag.TimeStamp.Unix()) + orgID + d.Name + d.Value - uID := hex.EncodeToString([]byte(id)) + dimensions := ag.Dimensions() + for _, d := range dimensions { + uID := hex.EncodeToString([]byte(fmt.Sprintf("%v", ag.TimeStamp.Unix()) + orgID + d.Name + d.Value)) rec := analytics.SQLAnalyticsRecordAggregate{ ID: uID, OrgID: orgID, From 4088d900e03e1eee0bd8adf23e67f675afe02054 Mon Sep 17 00:00:00 2001 From: Tyk-ITS Account <92926870+Tyk-ITS@users.noreply.github.com> Date: Mon, 20 Feb 2023 15:23:21 +0100 Subject: [PATCH 044/102] Add remote backend to tf cloud (#572) Co-authored-by: Esteban Ricardo Mirizio --- .github/workflows/pac.yml | 44 ++++++++++++++++++++- ci/repo-policy/main.tf | 15 +++---- ci/repo-policy/modules/github-repos/repo.tf | 1 + 3 files changed, 51 insertions(+), 9 deletions(-) diff --git a/.github/workflows/pac.yml b/.github/workflows/pac.yml index 79740bb80..328a23cd7 100644 --- a/.github/workflows/pac.yml +++ b/.github/workflows/pac.yml @@ -7,13 +7,13 @@ on: env: TERRAFORM_DIR: "./ci/repo-policy" - GITHUB_TOKEN: ${{ secrets.ITS_GH_TOKEN }} jobs: terraform: runs-on: ubuntu-latest permissions: id-token: write + pull-requests: write steps: - name: Checkout @@ -30,16 +30,56 @@ jobs: - uses: hashicorp/setup-terraform@v2 with: terraform_version: 1.3.0 + cli_config_credentials_token: ${{ secrets.TF_API_TOKEN }} - name: Terraform Init working-directory: ${{ env.TERRAFORM_DIR }} id: init run: terraform init -input=false + - name: Terraform Validate + id: validate + run: terraform validate -no-color + - name: Terraform Plan working-directory: ${{ env.TERRAFORM_DIR }} id: plan run: | echo "::group::Terraform Plan" - terraform validate && terraform plan + terraform plan -no-color -input=false echo "::endgroup::" + continue-on-error: true + + - name: Update Pull Request + uses: actions/github-script@v6 + if: github.event_name == 'pull_request' + env: + PLAN: "terraform\n${{ steps.plan.outputs.stdout }}" + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + script: | + const output = `#### Terraform Format and Style 🖌\`${{ steps.fmt.outcome }}\` + #### Terraform Initialization ⚙️\`${{ steps.init.outcome }}\` + #### Terraform Plan 📖\`${{ steps.plan.outcome }}\` + #### Terraform Validation 🤖\`${{ steps.validate.outcome }}\` + +
Show Plan + + \`\`\`\n + ${process.env.PLAN} + \`\`\` + +
+ + *Pushed by: @${{ github.actor }}, Action: \`${{ github.event_name }}\`*`; + + github.rest.issues.createComment({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + body: output + }) + + - name: Terraform Plan Status + if: steps.plan.outcome == 'failure' + run: exit 1 \ No newline at end of file diff --git a/ci/repo-policy/main.tf b/ci/repo-policy/main.tf index a06571254..9fe33adbc 100644 --- a/ci/repo-policy/main.tf +++ b/ci/repo-policy/main.tf @@ -1,11 +1,12 @@ terraform { #Being used until TFCloud can be used - backend "s3" { - bucket = "terraform-state-devenv" - key = "github-policy/tyk-pump" - region = "eu-central-1" - dynamodb_table = "terraform-state-locks" + backend "remote" { + hostname = "app.terraform.io" + organization = "Tyk" + workspaces { + name = "repo-policy-tyk-pump" + } } required_providers { @@ -35,10 +36,10 @@ module "tyk-pump" { { branch = "master", reviewers = "2", convos = "false", - required_tests = ["1.15,Go 1.16 tests"]}, + required_tests = ["1.15","Go 1.16 tests"]}, { branch = "release-1.7", reviewers = "2", convos = "false", - required_tests = ["1.15,Go 1.16 tests"]}, + required_tests = ["1.15","Go 1.16 tests"]}, ] } \ No newline at end of file diff --git a/ci/repo-policy/modules/github-repos/repo.tf b/ci/repo-policy/modules/github-repos/repo.tf index d3b9250ff..d8c273ba0 100644 --- a/ci/repo-policy/modules/github-repos/repo.tf +++ b/ci/repo-policy/modules/github-repos/repo.tf @@ -20,6 +20,7 @@ resource "github_repository" "repository" { allow_auto_merge = true delete_branch_on_merge = var.delete_branch_on_merge vulnerability_alerts = var.vulnerability_alerts + allow_update_branch = true has_downloads = true has_issues = true has_wiki = var.wiki From 17072c081e11ef1895725eba7c15b80a431c5ff1 Mon Sep 17 00:00:00 2001 From: Kofo Okesola Date: Tue, 21 Feb 2023 12:06:29 +0100 Subject: [PATCH 045/102] [TT-7977] fix: include RootFields in graph mongo and sql pumps (#571) * squash commits modified graph sql pump fix graph pump extended tests extract operation names added operations * rename fields * added rootfields to aggregate graph records, and dimensions * add rootfields test to sql aggregate pump --- analytics/aggregate.go | 19 ++++++++++-- analytics/aggregate_test.go | 27 +++++++++++++++-- analytics/graph_record.go | 11 +++++-- analytics/graph_record_test.go | 24 ++++++++++++++++ pumps/graph_mongo_test.go | 19 ++++++++---- pumps/graph_sql_aggregate_test.go | 48 +++++++++++++++++++++++++++++++ pumps/graph_sql_test.go | 11 +++++++ 7 files changed, 145 insertions(+), 14 deletions(-) diff --git a/analytics/aggregate.go b/analytics/aggregate.go index 15dedd44c..4ad7178f0 100644 --- a/analytics/aggregate.go +++ b/analytics/aggregate.go @@ -64,9 +64,10 @@ type Counter struct { type GraphRecordAggregate struct { AnalyticsRecordAggregate - Types map[string]*Counter - Fields map[string]*Counter - Operation map[string]*Counter + Types map[string]*Counter + Fields map[string]*Counter + Operation map[string]*Counter + RootFields map[string]*Counter } type AnalyticsRecordAggregate struct { @@ -232,6 +233,7 @@ func NewGraphRecordAggregate() GraphRecordAggregate { Types: make(map[string]*Counter), Fields: make(map[string]*Counter), Operation: make(map[string]*Counter), + RootFields: make(map[string]*Counter), } } @@ -345,6 +347,10 @@ func (g *GraphRecordAggregate) Dimensions() []Dimension { dimensions = append(dimensions, Dimension{Name: "operation", Value: key, Counter: fnLatencySetter(inc)}) } + for key, inc := range g.RootFields { + dimensions = append(dimensions, Dimension{Name: "rootfields", Value: key, Counter: fnLatencySetter(inc)}) + } + return dimensions } @@ -643,6 +649,13 @@ func AggregateGraphData(data []interface{}, dbIdentifier string, aggregationTime aggregate.Fields[label].HumanIdentifier = label } } + + for _, field := range graphRec.RootFields { + c = incrementOrSetUnit(&counter, aggregate.RootFields[field]) + aggregate.RootFields[field] = c + aggregate.RootFields[field].Identifier = field + aggregate.RootFields[field].HumanIdentifier = field + } aggregateMap[record.OrgID] = aggregate } return aggregateMap diff --git a/analytics/aggregate_test.go b/analytics/aggregate_test.go index 8a1de8c93..1086db310 100644 --- a/analytics/aggregate_test.go +++ b/analytics/aggregate_test.go @@ -149,6 +149,9 @@ func TestAggregateGraphData(t *testing.T) { "Characters_info": {Hits: 3, ErrorTotal: 0, Success: 3}, "Info_count": {Hits: 3, ErrorTotal: 0, Success: 3}, }, + RootFields: map[string]*Counter{ + "characters": {Hits: 3, ErrorTotal: 0, Success: 3}, + }, }, }, }, @@ -179,6 +182,9 @@ func TestAggregateGraphData(t *testing.T) { "Characters_info": {Hits: 2, ErrorTotal: 0, Success: 2}, "Info_count": {Hits: 2, ErrorTotal: 0, Success: 2}, }, + RootFields: map[string]*Counter{ + "characters": {Hits: 2, ErrorTotal: 0, Success: 2}, + }, }, }, }, @@ -209,6 +215,9 @@ func TestAggregateGraphData(t *testing.T) { "Characters_info": {Hits: 3, ErrorTotal: 1, Success: 2}, "Info_count": {Hits: 3, ErrorTotal: 1, Success: 2}, }, + RootFields: map[string]*Counter{ + "characters": {Hits: 3, ErrorTotal: 1, Success: 2}, + }, }, }, }, @@ -239,6 +248,9 @@ func TestAggregateGraphData(t *testing.T) { "Characters_info": {Hits: 5, ErrorTotal: 2, Success: 3}, "Info_count": {Hits: 5, ErrorTotal: 2, Success: 3}, }, + RootFields: map[string]*Counter{ + "characters": {Hits: 5, ErrorTotal: 2, Success: 3}, + }, }, }, }, @@ -256,6 +268,7 @@ func TestAggregateGraphData(t *testing.T) { // check types and fields compareFields(r, expectedAggregate.Types, actualAggregate.Types) compareFields(r, expectedAggregate.Fields, actualAggregate.Fields) + compareFields(r, expectedAggregate.RootFields, actualAggregate.RootFields) } }) } @@ -299,6 +312,12 @@ func TestAggregateGraphData_Dimension(t *testing.T) { "Characters_info", "Info_count", }, + "operation": { + "Query", + }, + "rootfields": { + "characters", + }, } r := require.New(t) @@ -309,11 +328,15 @@ func TestAggregateGraphData_Dimension(t *testing.T) { fmt.Println(dimensions) for d, values := range responsesCheck { for _, v := range values { + found := false for _, dimension := range dimensions { - if d != dimension.Name && v != dimension.Value && dimension.Counter.Hits != 3 { - t.Errorf("item missing from dimensions: NameL %s, Value: %s, Hits:3", d, v) + if dimension.Name == d && dimension.Value == v && dimension.Counter.Hits == 3 { + found = true } } + if !found { + t.Errorf("item missing from dimensions: NameL %s, Value: %s, Hits:3", d, v) + } } } } diff --git a/analytics/graph_record.go b/analytics/graph_record.go index 13b92fdd6..4d611814a 100644 --- a/analytics/graph_record.go +++ b/analytics/graph_record.go @@ -25,6 +25,7 @@ type GraphRecord struct { OperationType string `gorm:"column:operation_type"` Variables string `gorm:"variables"` + RootFields []string `gorm:"root_fields"` Errors []GraphError `gorm:"errors"` HasErrors bool `gorm:"has_errors"` } @@ -32,6 +33,8 @@ type GraphRecord struct { func (a *AnalyticsRecord) ToGraphRecord() (GraphRecord, error) { record := GraphRecord{ AnalyticsRecord: *a, + RootFields: make([]string, 0), + Types: make(map[string][]string), } if a.ResponseCode >= 400 { record.HasErrors = true @@ -78,7 +81,7 @@ func (a *AnalyticsRecord) ToGraphRecord() (GraphRecord, error) { } // get the selection set types to start with - fieldTypeList, err := extractTypesOfSelectionSet(operationRef, request, schema) + fieldTypeList, err := extractOperationSelectionSetTypes(operationRef, &record.RootFields, request, schema) if err != nil { log.WithError(err).Error("error extracting selection set types") return record, err @@ -131,9 +134,9 @@ func (a *AnalyticsRecord) ToGraphRecord() (GraphRecord, error) { return record, nil } -// extractTypesOfSelectionSet extracts all type names of the selection sets in the operation +// extractOperationSelectionSetTypes extracts all type names of the selection sets in the operation // it returns a map of the FieldRef in the req to the type Definition in the schema -func extractTypesOfSelectionSet(operationRef int, req, schema *ast.Document) (map[int]int, error) { +func extractOperationSelectionSetTypes(operationRef int, rootFields *[]string, req, schema *ast.Document) (map[int]int, error) { fieldTypeMap := make(map[int]int) operationDef := req.OperationDefinitions[operationRef] if !operationDef.HasSelections { @@ -154,6 +157,8 @@ func extractTypesOfSelectionSet(operationRef int, req, schema *ast.Document) (ma return nil, errors.New("error getting selection set") } + *rootFields = append(*rootFields, req.FieldNameString(sel.Ref)) + typeRef := schema.ResolveUnderlyingType(schema.FieldDefinitions[selFieldDefRef].Type) if schema.TypeIsScalar(typeRef, schema) || schema.TypeIsEnum(typeRef, schema) { continue diff --git a/analytics/graph_record_test.go b/analytics/graph_record_test.go index d08f1e2b5..4df1c4da1 100644 --- a/analytics/graph_record_test.go +++ b/analytics/graph_record_test.go @@ -139,6 +139,7 @@ func TestAnalyticsRecord_ToGraphRecord(t *testing.T) { graphRecordSample := GraphRecord{ AnalyticsRecord: recordSample, Types: make(map[string][]string), + RootFields: make([]string, 0), } testCases := []struct { @@ -161,6 +162,23 @@ func TestAnalyticsRecord_ToGraphRecord(t *testing.T) { "Info": {"count"}, } g.OperationType = "Query" + g.RootFields = []string{"characters"} + return g + }, + }, + { + title: "multiple query operations", + request: `{"query":"query {\r\n characters(filter: {}) {\r\n info {\r\n count\r\n }\r\n }\r\n listCharacters {\r\n info {\r\n count\r\n }\r\n }\r\n}\r\n"}`, + response: `{"data":{"characters":{"info":{"count":758}}}}`, + expected: func(request, response string) GraphRecord { + g := graphRecordSample + g.HasErrors = false + g.Types = map[string][]string{ + "Characters": {"info"}, + "Info": {"count"}, + } + g.OperationType = "Query" + g.RootFields = []string{"characters", "listCharacters"} return g }, }, @@ -190,6 +208,7 @@ func TestAnalyticsRecord_ToGraphRecord(t *testing.T) { g := graphRecordSample g.HasErrors = false g.OperationType = "Mutation" + g.RootFields = []string{"changeCharacter"} return g }, }, @@ -205,6 +224,7 @@ func TestAnalyticsRecord_ToGraphRecord(t *testing.T) { "Info": {"count"}, } g.OperationType = "Subscription" + g.RootFields = []string{"listenCharacter"} return g }, }, @@ -229,6 +249,7 @@ func TestAnalyticsRecord_ToGraphRecord(t *testing.T) { "Info": {"count"}, } g.OperationType = "Query" + g.RootFields = []string{"listCharacters"} return g }, }, @@ -244,6 +265,7 @@ func TestAnalyticsRecord_ToGraphRecord(t *testing.T) { "Info": {"count"}, } g.OperationType = "Query" + g.RootFields = []string{"characters"} g.Variables = base64.StdEncoding.EncodeToString([]byte(`{"a":"test"}`)) return g }, @@ -268,6 +290,7 @@ func TestAnalyticsRecord_ToGraphRecord(t *testing.T) { "Characters": {"info", "secondInfo"}, } g.OperationType = "Query" + g.RootFields = []string{"listCharacters"} return g }, expectedErr: "", @@ -296,6 +319,7 @@ func TestAnalyticsRecord_ToGraphRecord(t *testing.T) { Message: "Name for character with ID 1002 could not be fetched.", Path: []interface{}{"hero", "heroFriends", float64(1), "name"}, }) + g.RootFields = []string{"characters"} return g }, }, diff --git a/pumps/graph_mongo_test.go b/pumps/graph_mongo_test.go index a86a7c157..3493012c6 100644 --- a/pumps/graph_mongo_test.go +++ b/pumps/graph_mongo_test.go @@ -205,6 +205,7 @@ func TestGraphMongoPump_WriteData(t *testing.T) { OperationType: "Query", HasErrors: false, Errors: []analytics.GraphError{}, + RootFields: []string{"country"}, }, { Types: map[string][]string{ @@ -218,6 +219,7 @@ func TestGraphMongoPump_WriteData(t *testing.T) { Path: []interface{}{}, }, }, + RootFields: []string{"country"}, }, { Types: map[string][]string{ @@ -226,6 +228,7 @@ func TestGraphMongoPump_WriteData(t *testing.T) { OperationType: "Query", HasErrors: true, Errors: []analytics.GraphError{}, + RootFields: []string{"country"}, }, }, }, @@ -251,6 +254,7 @@ func TestGraphMongoPump_WriteData(t *testing.T) { OperationType: "Query", HasErrors: false, Errors: []analytics.GraphError{}, + RootFields: []string{"country"}, }, }, }, @@ -277,16 +281,19 @@ func TestGraphMongoPump_WriteData(t *testing.T) { }, expectedGraphRecords: []analytics.GraphRecord{ { - Types: map[string][]string{}, - Errors: []analytics.GraphError{}, + Types: map[string][]string{}, + Errors: []analytics.GraphError{}, + RootFields: []string{}, }, { - Types: map[string][]string{}, - Errors: []analytics.GraphError{}, + Types: map[string][]string{}, + Errors: []analytics.GraphError{}, + RootFields: []string{}, }, { - Types: map[string][]string{}, - Errors: []analytics.GraphError{}, + Types: map[string][]string{}, + Errors: []analytics.GraphError{}, + RootFields: []string{}, }, }, }, diff --git a/pumps/graph_sql_aggregate_test.go b/pumps/graph_sql_aggregate_test.go index e261e9311..ae82b154e 100644 --- a/pumps/graph_sql_aggregate_test.go +++ b/pumps/graph_sql_aggregate_test.go @@ -258,6 +258,22 @@ func TestSqlGraphAggregatePump_WriteData(t *testing.T) { error: 0, success: 3, }, + { + orgID: "test-org", + dimension: "rootfields", + name: "characters", + hits: 3, + error: 0, + success: 3, + }, + { + orgID: "test-org", + dimension: "operation", + name: "Query", + hits: 3, + error: 0, + success: 3, + }, }, }, { @@ -310,6 +326,22 @@ func TestSqlGraphAggregatePump_WriteData(t *testing.T) { error: 0, success: 2, }, + { + orgID: "test-org", + dimension: "rootfields", + name: "characters", + hits: 2, + error: 0, + success: 2, + }, + { + orgID: "test-org", + dimension: "operation", + name: "Query", + hits: 2, + error: 0, + success: 2, + }, }, }, { @@ -362,6 +394,22 @@ func TestSqlGraphAggregatePump_WriteData(t *testing.T) { error: 1, success: 2, }, + { + orgID: "test-org", + dimension: "rootfields", + name: "characters", + hits: 3, + error: 1, + success: 2, + }, + { + orgID: "test-org", + dimension: "operation", + name: "Query", + hits: 3, + error: 1, + success: 2, + }, }, }, } diff --git a/pumps/graph_sql_test.go b/pumps/graph_sql_test.go index 25451025b..6e4eb4ff8 100644 --- a/pumps/graph_sql_test.go +++ b/pumps/graph_sql_test.go @@ -137,6 +137,7 @@ func TestGraphSQLPump_WriteData(t *testing.T) { types map[string][]string operationType string expectedErr []analytics.GraphError + operations []string } testCases := []struct { @@ -173,6 +174,7 @@ func TestGraphSQLPump_WriteData(t *testing.T) { "Country": {"code"}, }, operationType: "Query", + operations: []string{"country"}, }, { types: map[string][]string{ @@ -185,6 +187,7 @@ func TestGraphSQLPump_WriteData(t *testing.T) { Path: []interface{}{}, }, }, + operations: []string{"country"}, }, { types: map[string][]string{ @@ -192,6 +195,7 @@ func TestGraphSQLPump_WriteData(t *testing.T) { }, operationType: "Query", expectedErr: []analytics.GraphError{}, + operations: []string{"country"}, }, }, hasError: false, @@ -222,6 +226,7 @@ func TestGraphSQLPump_WriteData(t *testing.T) { "Country": {"code"}, }, operationType: "Query", + operations: []string{"country"}, }, }, }, @@ -273,6 +278,12 @@ func TestGraphSQLPump_WriteData(t *testing.T) { r.Errors = item.expectedErr r.HasErrors = true } + + if item.operations == nil { + r.RootFields = []string{} + } else { + r.RootFields = item.operations + } expectedResponses = append(expectedResponses, r) } From 5ec13c36bfb4348d675e32497fdb1b72792dc93a Mon Sep 17 00:00:00 2001 From: Tomas Buchaillot Date: Thu, 2 Mar 2023 13:45:57 +0100 Subject: [PATCH 046/102] adding time to stdout pump (#574) --- pumps/stdout.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pumps/stdout.go b/pumps/stdout.go index ba7f448df..f7fd26516 100644 --- a/pumps/stdout.go +++ b/pumps/stdout.go @@ -3,6 +3,7 @@ package pumps import ( "context" "fmt" + "time" "github.com/TykTechnologies/tyk-pump/analytics" "github.com/mitchellh/mapstructure" @@ -84,6 +85,7 @@ func (s *StdOutPump) WriteData(ctx context.Context, data []interface{}) error { formatter := &logrus.JSONFormatter{} entry := log.WithField(s.conf.LogFieldName, decoded) entry.Level = logrus.InfoLevel + entry.Time = time.Now().UTC() data, _ := formatter.Format(entry) fmt.Print(string(data)) } else { From 7fa0754ddec20460687945c94638c27cd172496d Mon Sep 17 00:00:00 2001 From: Kofo Okesola Date: Mon, 6 Mar 2023 11:08:58 +0100 Subject: [PATCH 047/102] [TT-7820] fix: Fix aggregate graph pump sharding and errors (#575) * fix issue with writing sharded * restructured to graph record * fix tests * fix comments --- analytics/aggregate.go | 6 +- analytics/graph_record.go | 139 ++++++++++++++++++------------ analytics/graph_record_test.go | 130 ++++++++++++++++++++-------- pumps/graph_mongo.go | 2 +- pumps/graph_sql.go | 7 +- pumps/graph_sql_aggregate.go | 34 ++++---- pumps/graph_sql_aggregate_test.go | 21 ++++- 7 files changed, 215 insertions(+), 124 deletions(-) diff --git a/analytics/aggregate.go b/analytics/aggregate.go index 4ad7178f0..7080712c6 100644 --- a/analytics/aggregate.go +++ b/analytics/aggregate.go @@ -600,11 +600,7 @@ func AggregateGraphData(data []interface{}, dbIdentifier string, aggregationTime continue } - graphRec, err := record.ToGraphRecord() - if err != nil { - log.WithError(err).Debug("error converting record to graph record") - continue - } + graphRec := record.ToGraphRecord() aggregate, found := aggregateMap[record.OrgID] if !found { diff --git a/analytics/graph_record.go b/analytics/graph_record.go index 4d611814a..0a87e9259 100644 --- a/analytics/graph_record.go +++ b/analytics/graph_record.go @@ -10,12 +10,13 @@ import ( "io/ioutil" "net/http" + "github.com/buger/jsonparser" + "github.com/TykTechnologies/graphql-go-tools/pkg/ast" "github.com/TykTechnologies/graphql-go-tools/pkg/astnormalization" "github.com/TykTechnologies/graphql-go-tools/pkg/astparser" gql "github.com/TykTechnologies/graphql-go-tools/pkg/graphql" "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" - "github.com/buger/jsonparser" ) type GraphRecord struct { @@ -30,31 +31,34 @@ type GraphRecord struct { HasErrors bool `gorm:"has_errors"` } -func (a *AnalyticsRecord) ToGraphRecord() (GraphRecord, error) { - record := GraphRecord{ - AnalyticsRecord: *a, - RootFields: make([]string, 0), - Types: make(map[string][]string), - } - if a.ResponseCode >= 400 { - record.HasErrors = true +// parseRequest reads the raw encoded request and schema, extracting the type information +// operation information and root field operations +// if an error is encountered it simply breaks the operation regardless of how far along it is. +func (g *GraphRecord) parseRequest(encodedRequest, encodedSchema string) { + if encodedRequest == "" || encodedSchema == "" { + log.Warn("empty request/schema") + return } - rawRequest, err := base64.StdEncoding.DecodeString(a.RawRequest) + rawRequest, err := base64.StdEncoding.DecodeString(encodedRequest) if err != nil { - return record, fmt.Errorf("error decoding raw request: %w", err) + log.WithError(err).Error("error decoding raw request") + return } - schemaBody, err := base64.StdEncoding.DecodeString(a.ApiSchema) + schemaBody, err := base64.StdEncoding.DecodeString(encodedSchema) if err != nil { - return record, fmt.Errorf("error decoding schema: %w", err) + log.WithError(err).Error("error decoding schema") + return } request, schema, operationName, err := generateNormalizedDocuments(rawRequest, schemaBody) if err != nil { - return record, fmt.Errorf("error generating documents: %w", err) + log.WithError(err).Error("error generating document") + return } + if len(request.Input.Variables) != 0 && string(request.Input.Variables) != "null" { - record.Variables = base64.StdEncoding.EncodeToString(request.Input.Variables) + g.Variables = base64.StdEncoding.EncodeToString(request.Input.Variables) } // get the operation ref @@ -67,71 +71,96 @@ func (a *AnalyticsRecord) ToGraphRecord() (GraphRecord, error) { } } } else if len(request.OperationDefinitions) > 1 { - return record, errors.New("no operation name specified") + log.Warn("no operation name specified") + return } // get operation type switch request.OperationDefinitions[operationRef].OperationType { case ast.OperationTypeMutation: - record.OperationType = string(ast.DefaultMutationTypeName) + g.OperationType = string(ast.DefaultMutationTypeName) case ast.OperationTypeSubscription: - record.OperationType = string(ast.DefaultSubscriptionTypeName) + g.OperationType = string(ast.DefaultSubscriptionTypeName) case ast.OperationTypeQuery: - record.OperationType = string(ast.DefaultQueryTypeName) + g.OperationType = string(ast.DefaultQueryTypeName) } // get the selection set types to start with - fieldTypeList, err := extractOperationSelectionSetTypes(operationRef, &record.RootFields, request, schema) + fieldTypeList, err := extractOperationSelectionSetTypes(operationRef, &g.RootFields, request, schema) if err != nil { log.WithError(err).Error("error extracting selection set types") - return record, err + return } typesToFieldsMap := make(map[string][]string) for fieldRef, typeDefRef := range fieldTypeList { if typeDefRef == ast.InvalidRef { err = errors.New("invalid selection set field type") - return record, err + log.Warn("invalid type found") + continue } extractTypesAndFields(fieldRef, typeDefRef, typesToFieldsMap, request, schema) } - record.Types = typesToFieldsMap + g.Types = typesToFieldsMap +} - // get response and check to see errors - if a.RawResponse != "" { - responseDecoded, err := base64.StdEncoding.DecodeString(a.RawResponse) - if err != nil { - return record, nil - } - resp, err := http.ReadResponse(bufio.NewReader(bytes.NewReader(responseDecoded)), nil) - if err != nil { - log.WithError(err).Error("error reading raw response") - return record, err - } - defer resp.Body.Close() +// parseResponse looks through the encoded response string and parses information like +// the errors +func (g *GraphRecord) parseResponse(encodedResponse string) { + if encodedResponse == "" { + log.Warn("empty response body") + return + } - dat, err := ioutil.ReadAll(resp.Body) - if err != nil { - log.WithError(err).Error("error reading response body") - return record, err - } - errBytes, t, _, err := jsonparser.Get(dat, "errors") - // check if the errors key exists in the response - if err != nil && err != jsonparser.KeyPathNotFoundError { - // we got an unexpected error parsing te response - log.WithError(err).Error("error getting response errors") - return record, err - } - if t != jsonparser.NotExist { - // errors key exists so unmarshal it - if err := json.Unmarshal(errBytes, &record.Errors); err != nil { - log.WithError(err).Error("error parsing graph errors") - return record, err - } - record.HasErrors = true + responseDecoded, err := base64.StdEncoding.DecodeString(encodedResponse) + if err != nil { + log.WithError(err).Error("error decoding response") + return + } + resp, err := http.ReadResponse(bufio.NewReader(bytes.NewReader(responseDecoded)), nil) + if err != nil { + log.WithError(err).Error("error reading raw response") + return + } + defer resp.Body.Close() + + dat, err := ioutil.ReadAll(resp.Body) + if err != nil { + log.WithError(err).Error("error reading response body") + return + } + errBytes, t, _, err := jsonparser.Get(dat, "errors") + // check if the errors key exists in the response + if err != nil && err != jsonparser.KeyPathNotFoundError { + // we got an unexpected error parsing te response + log.WithError(err).Error("error getting response errors") + return + } + if t != jsonparser.NotExist { + // errors key exists so unmarshal it + if err := json.Unmarshal(errBytes, &g.Errors); err != nil { + log.WithError(err).Error("error parsing graph errors") + return } + g.HasErrors = true } +} + +func (a *AnalyticsRecord) ToGraphRecord() GraphRecord { + record := GraphRecord{ + AnalyticsRecord: *a, + RootFields: make([]string, 0), + Types: make(map[string][]string), + Errors: make([]GraphError, 0), + } + if a.ResponseCode >= 400 { + record.HasErrors = true + } + + record.parseRequest(a.RawRequest, a.ApiSchema) + + record.parseResponse(a.RawResponse) - return record, nil + return record } // extractOperationSelectionSetTypes extracts all type names of the selection sets in the operation diff --git a/analytics/graph_record_test.go b/analytics/graph_record_test.go index 4df1c4da1..268d7ab08 100644 --- a/analytics/graph_record_test.go +++ b/analytics/graph_record_test.go @@ -140,21 +140,21 @@ func TestAnalyticsRecord_ToGraphRecord(t *testing.T) { AnalyticsRecord: recordSample, Types: make(map[string][]string), RootFields: make([]string, 0), + Errors: make([]GraphError, 0), } testCases := []struct { - expected func(string, string) GraphRecord + expected func() GraphRecord modifyRecord func(a AnalyticsRecord) AnalyticsRecord title string request string response string - expectedErr string }{ { title: "no error", request: `{"query":"query{\n characters(filter: {\n \n }){\n info{\n count\n }\n }\n}"}`, response: `{"data":{"characters":{"info":{"count":758}}}}`, - expected: func(request, response string) GraphRecord { + expected: func() GraphRecord { g := graphRecordSample g.HasErrors = false g.Types = map[string][]string{ @@ -170,7 +170,7 @@ func TestAnalyticsRecord_ToGraphRecord(t *testing.T) { title: "multiple query operations", request: `{"query":"query {\r\n characters(filter: {}) {\r\n info {\r\n count\r\n }\r\n }\r\n listCharacters {\r\n info {\r\n count\r\n }\r\n }\r\n}\r\n"}`, response: `{"data":{"characters":{"info":{"count":758}}}}`, - expected: func(request, response string) GraphRecord { + expected: func() GraphRecord { g := graphRecordSample g.HasErrors = false g.Types = map[string][]string{ @@ -183,18 +183,17 @@ func TestAnalyticsRecord_ToGraphRecord(t *testing.T) { }, }, { - title: "error field type", + title: "subgraph request", request: `{"query":"query($representations: [_Any!]!){_entities(representations: $representations){... on User {reviews {body}}}}","variables":{"representations":[{"id":"1234","__typename":"User"}]}}`, response: `{"data":{"_entities":[{"reviews":[{"body":"A highly effective form of birth control."},{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits."}]}]}}`, - expected: func(s, s2 string) GraphRecord { + expected: func() GraphRecord { variables := `{"representations":[{"id":"1234","__typename":"User"}]}` g := graphRecordSample g.OperationType = "Query" g.Variables = base64.StdEncoding.EncodeToString([]byte(variables)) - g.Types = nil + g.RootFields = []string{"_entities"} return g }, - expectedErr: "invalid selection set field type", modifyRecord: func(a AnalyticsRecord) AnalyticsRecord { a.ApiSchema = base64.StdEncoding.EncodeToString([]byte(subgraphSchema)) return a @@ -204,7 +203,7 @@ func TestAnalyticsRecord_ToGraphRecord(t *testing.T) { title: "no error mutation", request: `{"query":"mutation{\n changeCharacter()\n}"}`, response: `{"data":{"characters":{"info":{"count":758}}}}`, - expected: func(request, response string) GraphRecord { + expected: func() GraphRecord { g := graphRecordSample g.HasErrors = false g.OperationType = "Mutation" @@ -216,7 +215,7 @@ func TestAnalyticsRecord_ToGraphRecord(t *testing.T) { title: "no error subscription", request: `{"query":"subscription{\n listenCharacter(){\n info{\n count\n }\n }\n}"}`, response: `{"data":{"characters":{"info":{"count":758}}}}`, - expected: func(request, response string) GraphRecord { + expected: func() GraphRecord { g := graphRecordSample g.HasErrors = false g.Types = map[string][]string{ @@ -231,17 +230,23 @@ func TestAnalyticsRecord_ToGraphRecord(t *testing.T) { { title: "bad document", request: `{"query":"subscriptiona{\n listenCharacter(){\n info{\n count\n }\n }\n}"}`, - response: `{"data":{"characters":{"info":{"count":758}}}}`, - expected: func(request, response string) GraphRecord { - return GraphRecord{} + response: `{"errors":[{"message":"invalid document error"}]}`, + expected: func() GraphRecord { + doc := graphRecordSample + doc.HasErrors = true + doc.Errors = []GraphError{ + { + Message: "invalid document error", + }, + } + return doc }, - expectedErr: "error generating documents", }, { title: "no error list operation", request: `{"query":"query{\n listCharacters(){\n info{\n count\n }\n }\n}"}`, response: `{"data":{"characters":{"info":{"count":758}}}}`, - expected: func(request, response string) GraphRecord { + expected: func() GraphRecord { g := graphRecordSample g.HasErrors = false g.Types = map[string][]string{ @@ -257,7 +262,7 @@ func TestAnalyticsRecord_ToGraphRecord(t *testing.T) { title: "has variables", request: `{"query":"query{\n characters(filter: {\n \n }){\n info{\n count\n }\n }\n}","variables":{"a":"test"}}`, response: `{"data":{"characters":{"info":{"count":758}}}}`, - expected: func(request, response string) GraphRecord { + expected: func() GraphRecord { g := graphRecordSample g.HasErrors = false g.Types = map[string][]string{ @@ -273,17 +278,23 @@ func TestAnalyticsRecord_ToGraphRecord(t *testing.T) { { title: "no operation", request: `{"query":"query main {\ncharacters {\ninfo\n}\n}\n\nquery second {\nlistCharacters{\ninfo\n}\n}","variables":null,"operationName":""}`, - response: `{"data":{"characters":{"info":{"count":758}}}}`, - expected: func(request, response string) GraphRecord { - return GraphRecord{} + response: `{"errors":[{"message":"no operation specified"}]}`, + expected: func() GraphRecord { + doc := graphRecordSample + doc.HasErrors = true + doc.Errors = []GraphError{ + { + Message: "no operation specified", + }, + } + return doc }, - expectedErr: "no operation name specified", }, { title: "operation name specified", request: `{"query":"query main {\ncharacters {\ninfo\n}\n}\n\nquery second {\nlistCharacters{\ninfo\n secondInfo}\n}","variables":null,"operationName":"second"}`, response: `{"data":{"characters":{"info":{"count":758}}}}`, - expected: func(request, response string) GraphRecord { + expected: func() GraphRecord { g := graphRecordSample g.HasErrors = false g.Types = map[string][]string{ @@ -293,7 +304,6 @@ func TestAnalyticsRecord_ToGraphRecord(t *testing.T) { g.RootFields = []string{"listCharacters"} return g }, - expectedErr: "", }, { title: "has errors", @@ -307,7 +317,7 @@ func TestAnalyticsRecord_ToGraphRecord(t *testing.T) { } ] }`, - expected: func(request, response string) GraphRecord { + expected: func() GraphRecord { g := graphRecordSample g.HasErrors = true g.Types = map[string][]string{ @@ -324,25 +334,74 @@ func TestAnalyticsRecord_ToGraphRecord(t *testing.T) { }, }, { - title: "corrupted raw request should error out", + title: "corrupted raw request ", modifyRecord: func(a AnalyticsRecord) AnalyticsRecord { a.RawRequest = "this isn't a base64 is it?" return a }, - expectedErr: "error decoding raw request", - expected: func(s, s2 string) GraphRecord { - return GraphRecord{} + expected: func() GraphRecord { + return graphRecordSample }, }, { - title: "corrupted schema should error out", + title: "corrupted raw response ", + request: `{"query":"query{\n characters(filter: {\n \n }){\n info{\n count\n }\n }\n}"}`, + modifyRecord: func(a AnalyticsRecord) AnalyticsRecord { + a.RawResponse = "this isn't a base64 is it?" + return a + }, + expected: func() GraphRecord { + g := graphRecordSample + g.Types = map[string][]string{ + "Characters": {"info"}, + "Info": {"count"}, + } + g.OperationType = "Query" + g.RootFields = []string{"characters"} + return g + }, + }, + { + title: "invalid response json ", + request: `{"query":"query{\n characters(filter: {\n \n }){\n info{\n count\n }\n }\n}"}`, + response: "invalid json", + expected: func() GraphRecord { + g := graphRecordSample + g.Types = map[string][]string{ + "Characters": {"info"}, + "Info": {"count"}, + } + g.OperationType = "Query" + g.RootFields = []string{"characters"} + return g + }, + }, + { + title: "corrupted schema should error out", + request: `{"query":"query main {\ncharacters {\ninfo\n}\n}\n\nquery second {\nlistCharacters{\ninfo\n}\n}","variables":null,"operationName":""}`, + response: `{"errors":[{"message":"no operation specified"}]}`, modifyRecord: func(a AnalyticsRecord) AnalyticsRecord { a.ApiSchema = "this isn't a base64 is it?" return a }, - expectedErr: "error decoding schema", - expected: func(s, s2 string) GraphRecord { - return GraphRecord{} + expected: func() GraphRecord { + rec := graphRecordSample + rec.Errors = []GraphError{{Message: "no operation specified"}} + rec.HasErrors = true + return rec + }, + }, + { + title: "error in request", + request: `{"query":"query{\n characters(filter: {\n \n }){\n info{\n counts\n }\n }\n}"}`, + response: `{"errors":[{"message":"illegal field"}]}`, + expected: func() GraphRecord { + g := graphRecordSample + g.HasErrors = true + g.Errors = append(g.Errors, GraphError{ + Message: "illegal field", + }) + return g }, }, } @@ -363,14 +422,9 @@ func TestAnalyticsRecord_ToGraphRecord(t *testing.T) { if testCase.modifyRecord != nil { a = testCase.modifyRecord(a) } - expected := testCase.expected(testCase.request, testCase.response) + expected := testCase.expected() expected.AnalyticsRecord = a - gotten, err := a.ToGraphRecord() - if testCase.expectedErr != "" { - assert.ErrorContains(t, err, testCase.expectedErr) - return - } - assert.NoError(t, err) + gotten := a.ToGraphRecord() if diff := cmp.Diff(expected, gotten, cmpopts.IgnoreFields(AnalyticsRecord{}, "RawRequest", "RawResponse")); diff != "" { t.Fatal(diff) } diff --git a/pumps/graph_mongo.go b/pumps/graph_mongo.go index 11ecd282f..754d43ffd 100644 --- a/pumps/graph_mongo.go +++ b/pumps/graph_mongo.go @@ -112,7 +112,7 @@ func (g *GraphMongoPump) WriteData(ctx context.Context, data []interface{}) erro g.log.Warn("skipping record parsing") gr = analytics.GraphRecord{AnalyticsRecord: r} } else { - gr, err = r.ToGraphRecord() + gr = r.ToGraphRecord() if err != nil { errCh <- err g.log.WithError(err).Warn("error converting 1 record to graph record") diff --git a/pumps/graph_sql.go b/pumps/graph_sql.go index adabd8a24..89b415ba2 100644 --- a/pumps/graph_sql.go +++ b/pumps/graph_sql.go @@ -115,12 +115,7 @@ func (g *GraphSQLPump) getGraphRecords(data []interface{}) []*analytics.GraphRec if rec, ok = r.(analytics.AnalyticsRecord); !ok || !rec.IsGraphRecord() { continue } - gr, err := rec.ToGraphRecord() - if err != nil { - g.log.Warnf("error converting 1 record") - g.log.WithError(err).Debug("error converting record") - continue - } + gr := rec.ToGraphRecord() graphRecords = append(graphRecords, &gr) } } diff --git a/pumps/graph_sql_aggregate.go b/pumps/graph_sql_aggregate.go index 050fc766d..cd9ef78f7 100644 --- a/pumps/graph_sql_aggregate.go +++ b/pumps/graph_sql_aggregate.go @@ -123,25 +123,29 @@ func (s *GraphSQLAggregatePump) WriteData(ctx context.Context, data []interface{ i = dataLen // write all records at once for non-sharded case, stop for loop after 1 iteration table = analytics.AggregateGraphSQLTable } - } - // if StoreAnalyticsPerMinute is set to true, we will create new documents with records every 1 minute - var aggregationTime int - if s.SQLConf.StoreAnalyticsPerMinute { - aggregationTime = 1 - } else { - aggregationTime = 60 - } + // if StoreAnalyticsPerMinute is set to true, we will create new documents with records every 1 minute + var aggregationTime int + if s.SQLConf.StoreAnalyticsPerMinute { + aggregationTime = 1 + } else { + aggregationTime = 60 + } - aggregates := analytics.AggregateGraphData(data[startIndex:endIndex], "", aggregationTime) + analyticsPerOrg := analytics.AggregateGraphData(data[startIndex:endIndex], "", aggregationTime) - for orgID := range aggregates { - aggr := aggregates[orgID] - if err := s.DoAggregatedWriting(ctx, table, orgID, &aggr); err != nil { - s.log.WithError(err).Error("error writing record") - return err + for orgID := range analyticsPerOrg { + ag := analyticsPerOrg[orgID] + err := s.DoAggregatedWriting(ctx, table, orgID, &ag) + if err != nil { + s.log.WithError(err).Error("error writing record") + return err + } } + + startIndex = i // next day start index, necessary for sharded case } + s.log.Info("Purged ", dataLen, " records...") return nil } @@ -172,7 +176,7 @@ func (s *GraphSQLAggregatePump) DoAggregatedWriting(ctx context.Context, table, } // we use excluded as temp table since it's supported by our SQL storages https://www.postgresql.org/docs/9.5/sql-insert.html#SQL-ON-CONFLICT https://www.sqlite.org/lang_UPSERT.html - tx := s.db.WithContext(ctx).Table(analytics.AggregateGraphSQLTable).Clauses(clause.OnConflict{ + tx := s.db.WithContext(ctx).Table(table).Clauses(clause.OnConflict{ Columns: []clause.Column{{Name: "id"}}, DoUpdates: clause.Assignments(analytics.OnConflictAssignments(table, "excluded")), }).Create(recs[i:ends]) diff --git a/pumps/graph_sql_aggregate_test.go b/pumps/graph_sql_aggregate_test.go index ae82b154e..7ebfd4d2d 100644 --- a/pumps/graph_sql_aggregate_test.go +++ b/pumps/graph_sql_aggregate_test.go @@ -464,8 +464,8 @@ func TestGraphSQLAggregatePump_WriteData_Sharded(t *testing.T) { Year: 2022, Hour: 0, OrgID: "test-org", - RawRequest: fmt.Sprintf(requestTemplate, len(sampleQuery), sampleQuery), - RawResponse: fmt.Sprintf(responseTemplate, len(sampleResponse), sampleResponse), + RawRequest: base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf(requestTemplate, len(sampleQuery), sampleQuery))), + RawResponse: base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf(responseTemplate, len(sampleResponse), sampleResponse))), } t.Run("should shard successfully", func(t *testing.T) { @@ -492,9 +492,22 @@ func TestGraphSQLAggregatePump_WriteData_Sharded(t *testing.T) { record := sampleRecord secondRecord := sampleRecord secondRecord.TimeStamp = time.Date(2023, 1, 2, 0, 0, 0, 0, time.UTC) + secondRecord.Year = 2023 assert.False(t, pump.db.Migrator().HasTable(analytics.AggregateGraphSQLTable)) r.NoError(pump.WriteData(context.Background(), []interface{}{record, secondRecord})) - assert.True(t, pump.db.Migrator().HasTable(analytics.AggregateGraphSQLTable+"_20220101")) - assert.True(t, pump.db.Migrator().HasTable(analytics.AggregateGraphSQLTable+"_20230102")) + firstShardedTable, secondShardedTable := analytics.AggregateGraphSQLTable+"_20220101", analytics.AggregateGraphSQLTable+"_20230102" + assert.True(t, pump.db.Migrator().HasTable(firstShardedTable), "table %s does not exist", firstShardedTable) + assert.True(t, pump.db.Migrator().HasTable(secondShardedTable), "table %s does not exist", secondShardedTable) + + // check records + aggr := make([]analytics.SQLAnalyticsRecordAggregate, 0) + res := pump.db.Table(firstShardedTable).Find(&aggr) + assert.NoError(t, res.Error) + assert.NotEmpty(t, aggr, "table %s does not contain records", firstShardedTable) + + aggr = make([]analytics.SQLAnalyticsRecordAggregate, 0) + res = pump.db.Table(secondShardedTable).Find(&aggr) + assert.NoError(t, res.Error) + assert.NotEmpty(t, aggr, "table %s does not contain records", secondShardedTable) }) } From 18b16f1d4a64ee593feacd7c7752dec57ec3f5bd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ram=C3=B3n=20M=C3=A1rquez?= Date: Mon, 13 Mar 2023 12:29:23 +0100 Subject: [PATCH 048/102] Add Resurface backend to pumps (#531) - Populates http.Request and http.Response structs with (preferably raw) data obtained from each Tyk Analytics record. - Supports capture of custom Tyk fields, including API ID, Name, Version, Org ID, and Oauth ID. - Test file and updated README are included. --- README.md | 31 ++++ go.mod | 12 +- go.sum | 178 +++++++++++++++++++--- pump.example.conf | 7 + pumps/init.go | 1 + pumps/resurface.go | 237 +++++++++++++++++++++++++++++ pumps/resurface_test.go | 327 ++++++++++++++++++++++++++++++++++++++++ 7 files changed, 770 insertions(+), 23 deletions(-) create mode 100644 pumps/resurface.go create mode 100644 pumps/resurface_test.go diff --git a/README.md b/README.md index 8c2dfbae1..fc432e57e 100644 --- a/README.md +++ b/README.md @@ -54,6 +54,7 @@ The table below provides details on the fields within each `tyk_analytics` recor - [CSV](#csv-config) - [ElasticSearch (2.0+)](#elasticsearch-config) - [Graylog](#graylog) +- [Resurface.io](#resurfaceio) - InfluxDB - [InfluxDB2](#influx2-config) - [Moesif](#moesif-config) @@ -288,6 +289,36 @@ TYK_PMP_PUMPS_GRAYLOG_META_GRAYLOGPORT=12216 TYK_PMP_PUMPS_GRAYLOG_META_TAGS=method,path,response_code,api_key,api_version,api_name,api_id,org_id,oauth_id,raw_request,request_time,raw_response,ip_address ``` +## Resurface.io +Resurface provides data-driven API security, by making each and every API call a durable transaction inside a purpose-built data lake. Use Resurface for attack and failure triage, root cause, threat and risk identification, and simply just knowing how your APIs are being used (and misused!). By continously scanning your own data lake, Resurface provides retroactive analysis. It identifies what's important in your API data, sending warnings and alerts in real-time for fast action. + +The only two fields necessary in the pump cofiguration are: + + - `capture_url` corresponds to the Resurface database [capture endpoint URL](https://resurface.io/docs/#getting-capture-url). You might need to subsitute `localhost` for the corresponding hostname, if you're not running resurface locally. + - `rules` corresponds to an [active set of rules](https://resurface.io/logging-rules) that control what data is logged and how sensitive data is masked. The example below applies a predefined set of rules (`include debug`), but logging rules are easily customized to meet the needs of any application. + +**Note: Resurface requires Detailed Logging to be enabled in order to capture API call details in full.** + +###### JSON / Conf File Example + +```.json + +"resurface": { + "type": "resurfaceio", + "meta": { + "capture_url": "http://localhost:7701/message", + "rules": "include debug" + } +} +``` + +###### Env Variables +``` +TYK_PMP_PUMPS_RESURFACEIO_TYPE=resurfaceio +TYK_PMP_PUMPS_RESURFACEIO_META_URL=http://localhost:7701/message +TYK_PMP_PUMPS_RESURFACEIO_META_RULES="include debug" +``` + ## StatsD Example of integrating with StatsD: diff --git a/go.mod b/go.mod index c39a289d6..6f79d1d56 100644 --- a/go.mod +++ b/go.mod @@ -18,7 +18,7 @@ require ( github.com/go-ole/go-ole v1.2.4 // indirect github.com/go-redis/redis/v8 v8.3.1 github.com/gocraft/health v0.0.0-20170925182251-8675af27fef0 - github.com/golang/protobuf v1.5.0 + github.com/golang/protobuf v1.5.2 github.com/google/go-cmp v0.5.9 github.com/gorilla/mux v1.8.0 github.com/influxdata/influxdb v1.8.3 @@ -34,8 +34,9 @@ require ( github.com/olivere/elastic/v7 v7.0.28 github.com/oschwald/maxminddb-golang v1.5.0 github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.11.0 + github.com/prometheus/client_golang v1.13.0 github.com/quipo/statsd v0.0.0-20160923160612-75b7afedf0d2 + github.com/resurfaceio/logger-go/v3 v3.2.1 github.com/robertkowalski/graylog-golang v0.0.0-20151121031040-e5295cfa2827 github.com/satori/go.uuid v1.2.0 github.com/segmentio/analytics-go v0.0.0-20160711225931-bdb0aeca8a99 @@ -43,12 +44,11 @@ require ( github.com/segmentio/kafka-go v0.3.6 github.com/shirou/gopsutil v3.20.11+incompatible // indirect github.com/sirupsen/logrus v1.8.1 - github.com/stretchr/testify v1.8.0 + github.com/stretchr/testify v1.8.1 github.com/syndtr/goleveldb v0.0.0-20190318030020-c3a204f8e965 // indirect github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c // indirect - golang.org/x/lint v0.0.0-20200302205851-738671d3881b // indirect - golang.org/x/net v0.0.0-20210614182718-04defd469f4e - google.golang.org/protobuf v1.27.1 + golang.org/x/net v0.0.0-20220225172249-27dd8689420f + google.golang.org/protobuf v1.28.1 gopkg.in/alecthomas/kingpin.v2 v2.2.6 gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22 gopkg.in/olivere/elastic.v3 v3.0.56 diff --git a/go.sum b/go.sum index 80e4e01f2..ac2153cf0 100644 --- a/go.sum +++ b/go.sum @@ -8,14 +8,31 @@ cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTj cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/bigtable v1.2.0/go.mod h1:JcVAOl45lrTmQfLj7T6TxyMzIN/3FGGcFm+2xVAli2o= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/99designs/gqlgen v0.13.1-0.20210728041543-7e38dd46943c h1:tEDQ6XnvZQ98sZd7iqq5pe4YsstBu7TOS6T5GhNsp2s= @@ -82,6 +99,8 @@ github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hC github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d h1:Byv0BzEl3/e6D5CLfI0j/7hiIEtvGVFPCZ7Ei2oq8iQ= +github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/aws/aws-sdk-go v1.29.11/go.mod h1:1KvfttTE3SPKMpo8g2c6jL3ZKfXtFvKscTgahTma5Xg= github.com/aws/aws-sdk-go v1.40.32/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= github.com/aws/aws-sdk-go-v2 v1.10.0/go.mod h1:U/EyyVvKtzmFeQQcca7eBotKdlpcP2zzU6bXBYcf7CE= @@ -229,13 +248,16 @@ github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1T github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJpoZOs= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI= github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= @@ -288,12 +310,16 @@ github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4er github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.1 h1:ocYkMQY5RrXTYgXl7ICpV0IXwlEQGwKIsery4gyXa1U= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4 h1:l75CXGRSwbaYNpl/Z2X1XIIAMSCquvXgpVZDhwEIJsc= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= @@ -303,8 +329,9 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0 h1:LUVKkCeviFUMKqHa4tXIIij/lbhnMbP7Fn5wKdKkRh4= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= @@ -317,7 +344,9 @@ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5a github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= @@ -329,9 +358,14 @@ github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= @@ -489,14 +523,17 @@ github.com/jinzhu/now v1.1.2/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/ github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/joho/godotenv v1.4.0 h1:3l4+N6zfMWnkbPEXKng2o2/MR5mSwTrBih4ZEkkz1lg= +github.com/joho/godotenv v1.4.0/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o= @@ -616,8 +653,9 @@ github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/moesif/moesifapi-go v1.0.6 h1:r3ppy6p5jxzdauziRI3lMtcjDpVH/zW2an2rYXLkNWE= github.com/moesif/moesifapi-go v1.0.6/go.mod h1:wRGgVy0QeiCgnjFEiD13HD2Aa7reI8nZXtCnddNnZGs= github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= @@ -697,8 +735,10 @@ github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndr github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU= +github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -707,13 +747,17 @@ github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6T github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= +github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= +github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= github.com/qri-io/jsonpointer v0.1.1 h1:prVZBZLL6TW5vsSB9fFHFAMBLI4b0ri5vribQlTJiBA= github.com/qri-io/jsonpointer v0.1.1/go.mod h1:DnJPaYgiKu56EuDp8TU5wFLdZIcAnb/uH9v37ZaMV64= github.com/qri-io/jsonschema v0.2.1 h1:NNFoKms+kut6ABPf6xiKNM5214jzxAhDBrPHCJ97Wg0= @@ -722,6 +766,8 @@ github.com/quipo/statsd v0.0.0-20160923160612-75b7afedf0d2 h1:IvjiJDGCF8L8TjKHQK github.com/quipo/statsd v0.0.0-20160923160612-75b7afedf0d2/go.mod h1:1COUodqytMiv/GkAVUGhc0CA6e8xak5U4551TY7iEe0= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/resurfaceio/logger-go/v3 v3.2.1 h1:tTPvGp+FpH35aaT/nnhP4n/Rh/f1vHe64WoXTDgv0fY= +github.com/resurfaceio/logger-go/v3 v3.2.1/go.mod h1:YPcxFUcloW37F1WQA9MUcGWu2JzlvBxlCfFF5+T3GO8= github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc= github.com/robertkowalski/graylog-golang v0.0.0-20151121031040-e5295cfa2827 h1:D2Xs0bSuqpKnUOOlK4yu6lloeOs4+oD+pjbOfsxgWu0= github.com/robertkowalski/graylog-golang v0.0.0-20151121031040-e5295cfa2827/go.mod h1:jONcYFk83vUF1lv0aERAwaFtDM9wUW4BMGmlnpLJyZY= @@ -789,8 +835,9 @@ github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3 github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= @@ -800,8 +847,9 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/goleveldb v0.0.0-20190318030020-c3a204f8e965 h1:V/AztY/q2oW5ghho7YMgUJQkKvSACHRxpeDyT5DxpIo= github.com/syndtr/goleveldb v0.0.0-20190318030020-c3a204f8e965/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA= @@ -852,13 +900,16 @@ github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6Ut github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c h1:3lbZUMbMiGUW/LMkfsEABsc5zNT9+b1CvsJx47JzJ8g= github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c/go.mod h1:UrdRz5enIKZ63MEE3IF9l2/ebyx59GyGgPi+tICQdmM= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opentelemetry.io/otel v0.13.0 h1:2isEnyzjjJZq6r2EKMsFj4TxiQiexsM04AVhwbR/oBA= go.opentelemetry.io/otel v0.13.0/go.mod h1:dlSNewoRYikTkotEnxdmuBHgzT+k/idJSfDv/FxEnOY= @@ -913,6 +964,9 @@ golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm0 golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -924,6 +978,7 @@ golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= @@ -931,6 +986,7 @@ golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCc golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -949,15 +1005,24 @@ golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191009170851-d66e71096ffb/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= @@ -965,23 +1030,31 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210614182718-04defd469f4e h1:XpT3nA5TvE525Ne3hInMh6+GETgn27Zfm9dxsThnX2Q= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f h1:Ax0t5p6N38Ga0dThY21weqDEyz2oklo4IvDkpigvkD8= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1007,6 +1080,7 @@ golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1015,12 +1089,23 @@ golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1032,11 +1117,15 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220405210540-1e041c57c461 h1:kHVeDEnfKn3T238CvrUcz6KeEsFHVaKh4kMTt6Wsysg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220405210540-1e041c57c461/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1044,8 +1133,9 @@ golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5f golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1083,11 +1173,29 @@ golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a h1:CB3a9Nez8M13wwlr/E2YtwoU+qYHKfC+JrDa45RXXoQ= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1110,12 +1218,22 @@ google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEn google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -1131,7 +1249,23 @@ google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvx google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200108215221-bd8f9a0ef82f/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= @@ -1143,6 +1277,11 @@ google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -1152,11 +1291,13 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/Masterminds/sprig.v2 v2.21.0/go.mod h1:DtHmW+kdrJpYMY6Mk6OHFNi/8EBAnNYVRUffwRCNHgA= gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= @@ -1190,8 +1331,9 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= @@ -1207,6 +1349,8 @@ honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= nhooyr.io/websocket v1.8.7 h1:usjR2uOr/zjjkVMy0lW+PPohFok7PCow5sDjLgX4P4g= nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= diff --git a/pump.example.conf b/pump.example.conf index f6d7fab66..ed4ee6535 100644 --- a/pump.example.conf +++ b/pump.example.conf @@ -62,6 +62,13 @@ "buffered": true, "buffered_max_messages": 32 } + }, + "resurfaceio": { + "type": "resurfaceio", + "meta": { + "capture_url": "http://localhost:7701/message", + "rules": "include debug" + } } }, "uptime_pump_config": { diff --git a/pumps/init.go b/pumps/init.go index 1bdaacdf8..86f9393a6 100644 --- a/pumps/init.go +++ b/pumps/init.go @@ -35,4 +35,5 @@ func init() { AvailablePumps["mongo-graph"] = &GraphMongoPump{} AvailablePumps["sql-graph"] = &GraphSQLPump{} AvailablePumps["sql-graph-aggregate"] = &GraphSQLAggregatePump{} + AvailablePumps["resurfaceio"] = &ResurfacePump{} } diff --git a/pumps/resurface.go b/pumps/resurface.go new file mode 100644 index 000000000..a5d2daca1 --- /dev/null +++ b/pumps/resurface.go @@ -0,0 +1,237 @@ +package pumps + +import ( + "context" + "encoding/base64" + "errors" + "io/ioutil" + "net/http" + "net/url" + "reflect" + "strconv" + "strings" + + "github.com/TykTechnologies/tyk-pump/analytics" + "github.com/mitchellh/mapstructure" + "github.com/resurfaceio/logger-go/v3" +) + +type ResurfacePump struct { + logger *logger.HttpLogger + config *ResurfacePumpConfig + CommonPumpConfig +} + +type ResurfacePumpConfig struct { + EnvPrefix string `mapstructure:"meta_env_prefix"` + URL string `mapstructure:"capture_url"` + Rules string + Queue []string +} + +const ( + resurfacePrefix = "resurface-pump" + resurfacePumpName = "Resurface Pump" + resurfaceDefaultEnv = PUMPS_ENV_PREFIX + "_RESURFACEIO" + PUMPS_ENV_META_PREFIX +) + +func (rp *ResurfacePump) New() Pump { + newPump := ResurfacePump{} + return &newPump +} + +func (rp *ResurfacePump) GetName() string { + return resurfacePumpName +} + +func (rp *ResurfacePump) GetEnvPrefix() string { + return rp.config.EnvPrefix +} + +func (rp *ResurfacePump) Init(config interface{}) error { + rp.config = &ResurfacePumpConfig{} + rp.log = log.WithField("prefix", resurfacePrefix) + + err := mapstructure.Decode(config, &rp.config) + if err != nil { + rp.log.Debug("Failed to decode configuration: ", err) + return err + } + + processPumpEnvVars(rp, rp.log, rp.config, resurfaceDefaultEnv) + + opt := logger.Options{ + Rules: rp.config.Rules, + Url: rp.config.URL, + Queue: rp.config.Queue, + } + rp.logger, err = logger.NewHttpLogger(opt) + if err != nil { + rp.log.Error(err) + return err + } + if !rp.logger.Enabled() { + rp.log.Info(rp.GetName() + " Initialized (Logger disabled)") + return errors.New("logger is not enabled") + } + rp.log.Info(rp.GetName() + " Initialized") + return nil +} + +func parseHeaders(headersString string, existingHeaders http.Header) (headers http.Header) { + if existingHeaders != nil { + headers = http.Header.Clone(existingHeaders) + } else { + headers = http.Header{} + } + for _, line := range strings.Split(headersString, "\r\n") { + header := strings.Split(line, ": ") + if len(header) < 2 { + continue + } + headers.Add(header[0], header[1]) + } + return +} + +func mapRawData(rec *analytics.AnalyticsRecord) (httpReq http.Request, httpResp http.Response, customFields map[string]string, err error) { + var req [3]string + var res [3]string + tykFields := [6]string{ + "API-ID", + "API-Key", + "API-Name", + "API-Version", + "Oauth-ID", + "Org-ID", + } + + // Decode raw HTTP transaction from base64 strings + rawBytesReq, err := base64.StdEncoding.DecodeString(rec.RawRequest) + if err != nil { + return + } + rawBytesRes, err := base64.StdEncoding.DecodeString(rec.RawResponse) + if err != nil { + return + } + rawReq := string(rawBytesReq) + rawRes := string(rawBytesRes) + + // Slice first line, headers, body+trailers + copy(req[:2], strings.SplitN(rawReq, "\r\n", 2)) + copy(res[:2], strings.SplitN(rawRes, "\r\n", 2)) + copy(req[1:], strings.SplitN(req[1], "\r\n\r\n", 2)) + copy(res[1:], strings.SplitN(res[1], "\r\n\r\n", 2)) + + // Request method + method := rec.Method + if method == "" { + method = strings.Fields(req[0])[0] + } + + // Request URL + // schema := "http" // TODO - could the AnalyticsRecord struct be modified to include the target URL Schema? + path := rec.RawPath + rawPath := strings.Fields(req[0])[1] + if path == "" { + path = rawPath + } else if idx := strings.Index(rawPath, "?"); idx != -1 { + path += rawPath[idx:] + } + if !strings.Contains(path, "://") && !strings.HasPrefix(path, "/") { + path = "/" + path + } + parsedURL, err := url.Parse(path) + if err != nil { + return + } + + // Request headers + reqHeaders := parseHeaders(req[1], nil) + + // Request address + if reqHeaders.Get("X-FORWARDED-FOR") == "" { + reqHeaders.Add("X-FORWARDED-FOR", rec.IPAddress) + } + + // Request host + host := rec.Host + if host == "" { + host = reqHeaders.Get("Host") + } + + // Custom Tyk fields + customFields = make(map[string]string, len(tykFields)) + for _, field := range tykFields { + key := strings.ReplaceAll(field, "-", "") + if value := reflect.ValueOf(rec).Elem().FieldByName(key).String(); value != "" { + customFields["tyk-"+field] = value + } + } + + // Response Status + status := rec.ResponseCode + if status == 0 { + status, err = strconv.Atoi(strings.Fields(res[0])[1]) + if err != nil { + return + } + } + + // Response Headers + resHeaders := parseHeaders(res[1], nil) + + // Response Trailers + if res[2] != "" && resHeaders.Get("Transfer-Encoding") == "chunked" && resHeaders.Get("Trailer") != "" { + lastChunkIndex := strings.LastIndex(res[2], "0\r\n") + 3 + resHeaders = parseHeaders(res[2][lastChunkIndex:], resHeaders) + res[2] = res[2][:lastChunkIndex] + } + + httpReq = http.Request{ + Method: method, + Host: host, + URL: parsedURL, + Header: reqHeaders, + Body: ioutil.NopCloser(strings.NewReader(req[2])), + } + + if parsedURL.IsAbs() { + httpReq.RequestURI = path + } + + httpResp = http.Response{ + StatusCode: status, + Header: resHeaders, + Body: ioutil.NopCloser(strings.NewReader(res[2])), + } + + return +} + +func (rp *ResurfacePump) WriteData(ctx context.Context, data []interface{}) error { + rp.log.Debug("Writing ", len(data), " records") + + for _, v := range data { + decoded, ok := v.(analytics.AnalyticsRecord) + if !ok { + rp.log.Error("Error decoding analytic record") + continue + } + if len(decoded.RawRequest) == 0 && len(decoded.RawResponse) == 0 { + rp.log.Warn("Record dropped. Please enable Detailed Logging.") + continue + } + + req, resp, customFields, err := mapRawData(&decoded) + if err != nil { + rp.log.Error(err) + continue + } + + logger.SendHttpMessage(rp.logger, &resp, &req, decoded.TimeStamp.Unix()*1000, decoded.RequestTime, customFields) + } + + return nil +} diff --git a/pumps/resurface_test.go b/pumps/resurface_test.go new file mode 100644 index 000000000..7e6f8e861 --- /dev/null +++ b/pumps/resurface_test.go @@ -0,0 +1,327 @@ +package pumps + +import ( + "context" + "strings" + "testing" + "time" + + "github.com/TykTechnologies/tyk-pump/analytics" + "github.com/stretchr/testify/assert" +) + +const ( + rawReq string = "R0VUIGdldCBIVFRQLzEuMQ0KSG9zdDogbG9jYWxob3N0OjgwODANClVzZXItQWdlbnQ6IE1vemls" + + "bGEvNS4wIChYMTE7IFVidW50dTsgTGludXggeDg2XzY0OyBydjo5MS4wKSBHZWNrby8yMDEwMDEwMSBGaXJlZm94" + + "LzkxLjANCkFjY2VwdDogdGV4dC9odG1sLGFwcGxpY2F0aW9uL3hodG1sK3htbCxhcHBsaWNhdGlvbi94bWw7cT0w" + + "LjksaW1hZ2Uvd2VicCwqLyo7cT0wLjgNCkFjY2VwdC1FbmNvZGluZzogZ3ppcCwgZGVmbGF0ZQ0KQWNjZXB0LUxh" + + "bmd1YWdlOiBlbi1VUyxlbjtxPTAuNQ0KU2VjLUZldGNoLURlc3Q6IGRvY3VtZW50DQpTZWMtRmV0Y2gtTW9kZTog" + + "bmF2aWdhdGUNClNlYy1GZXRjaC1TaXRlOiBub25lDQo=" + + rawResp string = "SFRUUC8xLjEgMjAwIE9LDQpDb250ZW50LUxlbmd0aDogNDI5DQpBY2Nlc3MtQ29udHJvbC1BbGxv" + + "dy1DcmVkZW50aWFsczogdHJ1ZQ0KQWNjZXNzLUNvbnRyb2wtQWxsb3ctT3JpZ2luOiAqDQpDb250ZW50LVR5cGU6I" + + "GFwcGxpY2F0aW9uL2pzb24NCkRhdGU6IFR1ZSwgMTYgTWF5IDIwMjAgMjA6NDA6NDUgR01UDQpTZXJ2ZXI6IGd1bm" + + "ljb3JuLzE5LjkuMA0KWC1SYXRlbGltaXQtTGltaXQ6IDANClgtUmF0ZWxpbWl0LVJlbWFpbmluZzogMA0KWC1SYXR" + + "lbGltaXQtUmVzZXQ6IDANCg0Kew0KICAic2xpZGVzaG93Ijogew0KICAgICJhdXRob3IiOiAiWW91cnMgVHJ1bHki" + + "LCANCiAgICAiZGF0ZSI6ICJkYXRlIG9mIHB1YmxpY2F0aW9uIiwgDQogICAgInNsaWRlcyI6IFsNCiAgICAgIHsNC" + + "iAgICAgICAgInRpdGxlIjogIldha2UgdXAgdG8gV29uZGVyV2lkZ2V0cyEiLCANCiAgICAgICAgInR5cGUiOiAiYW" + + "xsIg0KICAgICAgfSwgDQogICAgICB7DQogICAgICAgICJpdGVtcyI6IFsNCiAgICAgICAgICAiV2h5IDxlbT5Xb25" + + "kZXJXaWRnZXRzPC9lbT4gYXJlIGdyZWF0IiwgDQogICAgICAgICAgIldobyA8ZW0+YnV5czwvZW0+IFdvbmRlcldp" + + "ZGdldHMiDQogICAgICAgIF0sIA0KICAgICAgICAidGl0bGUiOiAiT3ZlcnZpZXciLCANCiAgICAgICAgInR5cGUiO" + + "iAiYWxsIg0KICAgICAgfQ0KICAgIF0sIA0KICAgICJ0aXRsZSI6ICJTYW1wbGUgU2xpZGUgU2hvdyINCiAgfQ0KfQ" + + "==" + + rawRespOneChunk string = "SFRUUC8xLjEgMjAwIE9LDQpDb250ZW50LVR5cGU6IHRleHQvcGxhaW4NClRyYW5zZmVy" + + "LUVuY29kaW5nOiBjaHVua2VkDQoNCjcNCg0KTW96aWxsYQ0KDQoxMQ0KDQpEZXZlbG9wZXIgTmV0d29yaw0KDQowD" + + "QoNCg0KDQo=" + + rawRespChunks string = "SFRUUC8xLjEgMjAwIE9LDQpUcmFuc2Zlci1FbmNvZGluZzogY2h1bmtlZA0KQ29udGVudC" + + "1UeXBlOiB0ZXh0L2h0bWwNCg0KYw0KPGgxPmdvITwvaDE+DQoNCjFiDQo8aDE+Zmlyc3QgY2h1bmsgbG9hZGVkPC9" + + "oMT4NCg0KMmENCjxoMT5zZWNvbmQgY2h1bmsgbG9hZGVkIGFuZCBkaXNwbGF5ZWQ8L2gxPg0KDQoyOQ0KPGgxPnRo" + + "aXJkIGNodW5rIGxvYWRlZCBhbmQgZGlzcGxheWVkPC9oMT4NCg0KMA0K" + + rawRespChunksTrailer string = "SFRUUC8xLjEgMjAwIE9LDQpUcmFuc2Zlci1FbmNvZGluZzogY2h1bmtlZA0KQ29" + + "udGVudC1UeXBlOiB0ZXh0L2h0bWwNClRyYWlsZXI6IEV4cGlyZXMNCg0KYw0KPGgxPmdvITwvaDE+DQoNCjFiDQo8" + + "aDE+Zmlyc3QgY2h1bmsgbG9hZGVkPC9oMT4NCg0KMmENCjxoMT5zZWNvbmQgY2h1bmsgbG9hZGVkIGFuZCBkaXNwb" + + "GF5ZWQ8L2gxPg0KDQoyOQ0KPGgxPnRoaXJkIGNodW5rIGxvYWRlZCBhbmQgZGlzcGxheWVkPC9oMT4NCg0KMA0KRX" + + "hwaXJlczogV2VkLCAyMSBPY3QgMjAxNSAwNzoyODowMCBHTVQNCg0K" +) + +func SetUp(t *testing.T, url string, queue []string, rules string) (*ResurfacePump, map[string]interface{}) { + pmp := ResurfacePump{} + cfg := make(map[string]interface{}) + cfg["capture_url"] = url + cfg["queue"] = queue + cfg["rules"] = rules + + err := pmp.Init(cfg) + assert.Nil(t, err, "Problem initializing "+pmp.GetName()) + + return &pmp, cfg +} + +func TestResurfaceInit(t *testing.T) { + pmp, cfg := SetUp(t, "http://localhost:7701/message", nil, "include debug") + assert.NotNil(t, pmp.logger) + assert.True(t, pmp.logger.Enabled()) + + // Checking with invalid config + cfg["capture_url"] = "not a valid URL" + pmp2 := ResurfacePump{} + err2 := pmp2.Init(cfg) + assert.NotNil(t, err2) + assert.False(t, pmp2.logger.Enabled()) +} + +func TestResurfaceWriteData(t *testing.T) { + const MockHost = "test0" + + pmp, _ := SetUp(t, "", make([]string, 0), "include debug") + + recs := []interface{}{ + analytics.AnalyticsRecord{ + Host: MockHost, + Method: "GET", + ResponseCode: 200, + RawRequest: rawReq, + RawResponse: rawResp, + TimeStamp: time.Now(), + }, + analytics.AnalyticsRecord{ + Host: MockHost, + Method: "POST", + ResponseCode: 200, + RawRequest: rawReq, + RawResponse: rawResp, + TimeStamp: time.Now(), + }, + analytics.AnalyticsRecord{ + Host: MockHost, + Method: "GET", + ResponseCode: 500, + RawRequest: rawReq, + RawResponse: rawResp, + TimeStamp: time.Now(), + }, + analytics.AnalyticsRecord{ + Host: MockHost, + Method: "Not valid", + ResponseCode: 1200, + RawRequest: rawReq, + RawResponse: rawResp, + TimeStamp: time.Now(), + }, + analytics.AnalyticsRecord{ + Host: MockHost, + Method: "GET", + RawRequest: rawReq, + RawResponse: rawResp, + TimeStamp: time.Now(), + }, + } + + err := pmp.WriteData(context.TODO(), recs) + assert.Nil(t, err, pmp.GetName()+"couldn't write records") + + queue := pmp.logger.Queue() + assert.Equal(t, len(recs), len(queue)) + + for i, message := range queue { + assert.Contains(t, message, "[\"request_url\",\"http://"+MockHost) + assert.NotContains(t, message, "[\"request_url\",\"http://localhost:8080/get\"]") + if i%2 == 0 { + assert.Contains(t, message, "[\"request_method\",\"GET\"]") + } + assert.Contains(t, message, "[\"request_header:user-agent\",\"Mozilla/5.0 (X11; Ubuntu") + assert.Contains(t, message, "[\"request_header:accept\",\"text/html,application/xhtml+xml,application/xml;q=0.9,") + assert.Contains(t, message, "[\"request_header:accept-encoding\",\"gzip, deflate\"]") + assert.Contains(t, message, "[\"request_header:accept-language\",\"en-US,en;q=0.5\"]") + assert.Contains(t, message, "[\"request_header:sec-fetch-dest\",\"document\"]") + assert.Contains(t, message, "[\"request_header:sec-fetch-mode\",\"navigate\"]") + assert.Contains(t, message, "[\"request_header:sec-fetch-site\",\"none\"]") + + if i&2 != 2 { + assert.Contains(t, message, "response_code\",\"200") + } + assert.Contains(t, message, "[\"response_header:content-length\",\"429\"]") + assert.Contains(t, message, "[\"response_header:access-control-allow-credentials\",\"true\"]") + assert.Contains(t, message, "[\"response_header:access-control-allow-origin\",\"*\"]") + assert.Contains(t, message, "[\"response_header:content-type\",\"application/json\"]") + assert.Contains(t, message, "[\"response_header:content-type\",\"application/json\"]") + assert.Contains(t, message, "[\"response_body") + assert.Contains(t, message, "Yours Truly") + } + + err = pmp.WriteData(context.TODO(), []interface{}{ + analytics.AnalyticsRecord{ + Host: MockHost, + Method: "PUT", + ResponseCode: 404, + RawRequest: "bm90IHZhbGlkCg==", + RawResponse: "bm90IHZhbGlkCg==", + TimeStamp: time.Now(), + }, + }) + assert.Nil(t, err, pmp.GetName()+"couldn't write records") + + queue = pmp.logger.Queue() + assert.Equal(t, len(recs)+1, len(queue)) + + message := queue[len(queue)-1] + assert.Contains(t, message, "[\"request_url\",\"http://"+MockHost) + assert.NotContains(t, message, "[\"request_url\",\"http://localhost:8080/get\"]") + assert.Contains(t, message, "[\"request_method\",\"PUT\"]") + assert.NotContains(t, message, "[\"request_method\",\"GET\"]") + assert.NotContains(t, message, "request_header") + + assert.Contains(t, message, "response_code\",\"404") + assert.NotContains(t, message, "response_code\",\"200") + assert.NotContains(t, message, "response_header") + assert.NotContains(t, message, "response_body") + assert.NotContains(t, message, "Yours Truly") +} + +func TestResurfaceWriteCustomFields(t *testing.T) { + pmp, _ := SetUp(t, "", make([]string, 0), "include debug") + + recs := []interface{}{ + analytics.AnalyticsRecord{ + APIID: "my-api-123", + OrgID: "my-org-abc", + Host: "testone", + Method: "GET", + ResponseCode: 200, + RawRequest: rawReq, + RawResponse: rawResp, + TimeStamp: time.Now(), + }, + analytics.AnalyticsRecord{ + APIID: " hello ", + OrgID: " world", + Host: "testtwo", + Method: "POST", + ResponseCode: 200, + RawRequest: rawReq, + RawResponse: rawResp, + TimeStamp: time.Now(), + }, + analytics.AnalyticsRecord{ + APIID: "727dad853a8a45f64ab981154d1ffdad", + APIKey: "an-uhashed-key", + APIName: "Foo API", + APIVersion: "0.1.0-b", + OauthID: "my-oauth-client-id", + OrgID: "my-org-abc", + Host: "test-3", + Method: "GET", + ResponseCode: 500, + RawRequest: rawReq, + RawResponse: rawResp, + TimeStamp: time.Now(), + }, + analytics.AnalyticsRecord{ + APIID: "", + OrgID: "", + Host: "test-four", + Method: "GET", + RawRequest: rawReq, + RawResponse: rawResp, + TimeStamp: time.Now(), + }, + } + + err := pmp.WriteData(context.TODO(), recs) + assert.Nil(t, err, pmp.GetName()+"couldn't write records") + + queue := pmp.logger.Queue() + assert.Equal(t, len(recs), len(queue)) + + for i, message := range queue { + if i < 3 { + assert.Contains(t, message, strings.ToLower("custom_field:tyk-API-ID\",\""+recs[i].(analytics.AnalyticsRecord).APIID)) + assert.Contains(t, message, strings.ToLower("custom_field:tyk-Org-ID\",\""+recs[i].(analytics.AnalyticsRecord).OrgID)) + if i == 2 { + assert.Contains(t, message, strings.ToLower("custom_field:tyk-API-Key\",\""+recs[i].(analytics.AnalyticsRecord).APIKey)) + assert.Contains(t, message, strings.ToLower("custom_field:tyk-API-Name\",\""+recs[i].(analytics.AnalyticsRecord).APIName)) + assert.Contains(t, message, strings.ToLower("custom_field:tyk-API-Version\",\""+recs[i].(analytics.AnalyticsRecord).APIVersion)) + assert.Contains(t, message, strings.ToLower("custom_field:tyk-Oauth-ID\",\""+recs[i].(analytics.AnalyticsRecord).OauthID)) + } + } else { + assert.NotContains(t, message, "custom_field:tyk") + } + } +} + +func TestResurfaceWriteChunkedResponse(t *testing.T) { + pmp, _ := SetUp(t, "", make([]string, 0), "include debug") + + recs := []interface{}{ + analytics.AnalyticsRecord{ + Host: "test-three", + Method: "GET", + RawRequest: rawReq, + RawResponse: rawRespOneChunk, + TimeStamp: time.Now(), + }, + analytics.AnalyticsRecord{ + APIID: "api-id-x", + OrgID: "api-org-y", + Host: "test-4", + Method: "GET", + RawRequest: rawReq, + RawResponse: rawRespChunks, + TimeStamp: time.Now(), + }, + analytics.AnalyticsRecord{ + APIID: "", + OrgID: "", + Host: "test.five", + Method: "GET", + RawRequest: rawReq, + RawResponse: rawRespChunksTrailer, + TimeStamp: time.Now(), + }, + } + + err := pmp.WriteData(context.TODO(), recs) + if err != nil { + t.Fatal(pmp.GetName()+"couldn't write records with err:", err) + } + + queue := pmp.logger.Queue() + assert.Equal(t, len(recs), len(queue)) + + for i, message := range queue { + assert.Contains(t, message, "request_url\",\"http://test") + assert.NotContains(t, message, "response_header:content-length") + assert.Contains(t, message, "[\"response_header:transfer-encoding\",\"chunked\"]") + if i != 4 { + assert.Regexp(t, `\[\"response_body\",\".*\\r\\n0(?:\\r\\n)+\"\]`, message) + } else { + assert.Regexp(t, `\[\"response_body\",\".*\\r\\n0\\r\\nExpires:.*\"\]`, message) + } + } +} + +func TestResurfaceSkipWrite(t *testing.T) { + pmp, _ := SetUp(t, "", make([]string, 0), "include debug") + + recs := []interface{}{ + analytics.AnalyticsRecord{ + APIID: "an-api-id", + OrgID: "an-api-org", + Host: "test6", + Method: "POST", + RawRequest: "", + RawResponse: "", + TimeStamp: time.Now(), + }, + } + + err := pmp.WriteData(context.TODO(), recs) + assert.Nil(t, err, pmp.GetName()+"couldn't write records") + + queue := pmp.logger.Queue() + assert.Equal(t, 0, len(queue)) + assert.Empty(t, queue) +} From 59339289445557e0d70c20465108e12141aa952a Mon Sep 17 00:00:00 2001 From: Tomas Buchaillot Date: Mon, 13 Mar 2023 16:47:34 +0100 Subject: [PATCH 049/102] TT-7163 Fix prometheus import #584 --- go.mod | 2 +- go.sum | 154 +++++---------------------------------------------------- 2 files changed, 12 insertions(+), 144 deletions(-) diff --git a/go.mod b/go.mod index 6f79d1d56..531b69410 100644 --- a/go.mod +++ b/go.mod @@ -34,7 +34,7 @@ require ( github.com/olivere/elastic/v7 v7.0.28 github.com/oschwald/maxminddb-golang v1.5.0 github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.13.0 + github.com/prometheus/client_golang v1.11.0 github.com/quipo/statsd v0.0.0-20160923160612-75b7afedf0d2 github.com/resurfaceio/logger-go/v3 v3.2.1 github.com/robertkowalski/graylog-golang v0.0.0-20151121031040-e5295cfa2827 diff --git a/go.sum b/go.sum index ac2153cf0..611687329 100644 --- a/go.sum +++ b/go.sum @@ -8,31 +8,14 @@ cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTj cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/bigtable v1.2.0/go.mod h1:JcVAOl45lrTmQfLj7T6TxyMzIN/3FGGcFm+2xVAli2o= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/99designs/gqlgen v0.13.1-0.20210728041543-7e38dd46943c h1:tEDQ6XnvZQ98sZd7iqq5pe4YsstBu7TOS6T5GhNsp2s= @@ -248,16 +231,13 @@ github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1T github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJpoZOs= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI= github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= @@ -310,16 +290,12 @@ github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4er github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1 h1:ocYkMQY5RrXTYgXl7ICpV0IXwlEQGwKIsery4gyXa1U= github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4 h1:l75CXGRSwbaYNpl/Z2X1XIIAMSCquvXgpVZDhwEIJsc= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= @@ -344,9 +320,7 @@ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5a github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= @@ -358,14 +332,9 @@ github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= @@ -531,9 +500,8 @@ github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o= @@ -653,9 +621,8 @@ github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/moesif/moesifapi-go v1.0.6 h1:r3ppy6p5jxzdauziRI3lMtcjDpVH/zW2an2rYXLkNWE= github.com/moesif/moesifapi-go v1.0.6/go.mod h1:wRGgVy0QeiCgnjFEiD13HD2Aa7reI8nZXtCnddNnZGs= github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= @@ -735,10 +702,8 @@ github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndr github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU= -github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -747,17 +712,13 @@ github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6T github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= -github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= -github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= github.com/qri-io/jsonpointer v0.1.1 h1:prVZBZLL6TW5vsSB9fFHFAMBLI4b0ri5vribQlTJiBA= github.com/qri-io/jsonpointer v0.1.1/go.mod h1:DnJPaYgiKu56EuDp8TU5wFLdZIcAnb/uH9v37ZaMV64= github.com/qri-io/jsonschema v0.2.1 h1:NNFoKms+kut6ABPf6xiKNM5214jzxAhDBrPHCJ97Wg0= @@ -900,16 +861,13 @@ github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6Ut github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c h1:3lbZUMbMiGUW/LMkfsEABsc5zNT9+b1CvsJx47JzJ8g= github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c/go.mod h1:UrdRz5enIKZ63MEE3IF9l2/ebyx59GyGgPi+tICQdmM= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opentelemetry.io/otel v0.13.0 h1:2isEnyzjjJZq6r2EKMsFj4TxiQiexsM04AVhwbR/oBA= go.opentelemetry.io/otel v0.13.0/go.mod h1:dlSNewoRYikTkotEnxdmuBHgzT+k/idJSfDv/FxEnOY= @@ -964,9 +922,6 @@ golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm0 golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -977,16 +932,13 @@ golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f h1:J5lckAjkw6qYlOZNj90mLYNTEKDvWeuc1yieZ8qUzUE= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -1005,24 +957,15 @@ golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191009170851-d66e71096ffb/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= @@ -1030,9 +973,7 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1040,21 +981,16 @@ golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f h1:Ax0t5p6N38Ga0dThY21weqDEyz2oklo4IvDkpigvkD8= -golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1080,7 +1016,6 @@ golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1089,23 +1024,12 @@ golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1118,10 +1042,8 @@ golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220405210540-1e041c57c461 h1:kHVeDEnfKn3T238CvrUcz6KeEsFHVaKh4kMTt6Wsysg= golang.org/x/sys v0.0.0-20220405210540-1e041c57c461/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= @@ -1173,29 +1095,10 @@ golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a h1:CB3a9Nez8M13wwlr/E2YtwoU+qYHKfC+JrDa45RXXoQ= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1218,22 +1121,12 @@ google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEn google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -1249,23 +1142,7 @@ google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvx google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200108215221-bd8f9a0ef82f/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= @@ -1277,11 +1154,6 @@ google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -1291,7 +1163,6 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= @@ -1331,9 +1202,8 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= @@ -1349,8 +1219,6 @@ honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= nhooyr.io/websocket v1.8.7 h1:usjR2uOr/zjjkVMy0lW+PPohFok7PCow5sDjLgX4P4g= nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= From 67663813f37703e8869dad45e23fc5215ebf7adb Mon Sep 17 00:00:00 2001 From: Tit Petric Date: Tue, 14 Mar 2023 13:10:25 +0100 Subject: [PATCH 050/102] Update murmur3 to latest (#580) Co-authored-by: Tit Petric --- go.mod | 2 +- go.sum | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index 531b69410..2a5b8ef15 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( github.com/DataDog/datadog-go v4.7.0+incompatible github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d // indirect github.com/TykTechnologies/graphql-go-tools v1.6.2-0.20220811124354-8d1f142966f8 - github.com/TykTechnologies/murmur3 v0.0.0-20180602122059-1915e687e465 + github.com/TykTechnologies/murmur3 v0.0.0-20230310161213-aad17efd5632 github.com/TykTechnologies/tyk v0.0.0-20200207055804-cf1d1ad81206 github.com/aws/aws-sdk-go-v2 v1.16.14 github.com/aws/aws-sdk-go-v2/config v1.9.0 diff --git a/go.sum b/go.sum index 611687329..f457b65ac 100644 --- a/go.sum +++ b/go.sum @@ -57,8 +57,9 @@ github.com/TykTechnologies/goverify v0.0.0-20160822133757-7ccc57452ade/go.mod h1 github.com/TykTechnologies/graphql-go-tools v1.6.2-0.20220811124354-8d1f142966f8 h1:CA59ssz4bwLkd7pzkDpZOnlMzzraq/TEbJ6xvQpSPCc= github.com/TykTechnologies/graphql-go-tools v1.6.2-0.20220811124354-8d1f142966f8/go.mod h1:Cxpyt1EQHf8bRqAfZStqbgHif8YWngLga7tpnHRSRwU= github.com/TykTechnologies/leakybucket v0.0.0-20170301023702-71692c943e3c/go.mod h1:GnHUbsQx+ysI10osPhUdTmsxcE7ef64cVp38Fdyd7e0= -github.com/TykTechnologies/murmur3 v0.0.0-20180602122059-1915e687e465 h1:A2gBjoX8aF0G3GHEpHyj2f0ixuPkCgcGqmPdKHSkW+0= github.com/TykTechnologies/murmur3 v0.0.0-20180602122059-1915e687e465/go.mod h1:sqH/SPFr11m9cahie7ulBuBX9TOhfBX1sp+qf9jh3Vg= +github.com/TykTechnologies/murmur3 v0.0.0-20230310161213-aad17efd5632 h1:T5NWziFusj8au5nxAqMMh/bZyX9CAyYnBkaMSsfH6BA= +github.com/TykTechnologies/murmur3 v0.0.0-20230310161213-aad17efd5632/go.mod h1:UsPYgOFBpNzDXLEti7MKOwHLpVSqdzuNGkVFPspQmnQ= github.com/TykTechnologies/openid2go v0.0.0-20200122120050-1b642583380a/go.mod h1:rGlqNE4CvxZIeiHp0mgrw+/jdGSjJzkZ0n78hhHMdfM= github.com/TykTechnologies/tyk v0.0.0-20200207055804-cf1d1ad81206 h1:dYSY3KkcFkITF+q8FWpPS87ggv1Rex1Vmmu9q4t4Pwc= github.com/TykTechnologies/tyk v0.0.0-20200207055804-cf1d1ad81206/go.mod h1:+WNQ0t1t4ZCh0Z+mDnnyNAQZc5hVJ490iqLOWKPLIMI= From 3e521bee7dec48db0faeaebca266f0a971862fa3 Mon Sep 17 00:00:00 2001 From: Tomas Buchaillot Date: Tue, 14 Mar 2023 16:04:10 +0100 Subject: [PATCH 051/102] TT-8314 replace uuid lib + bump go version #582 --- analytics/demo/demo.go | 7 +++++-- go.mod | 4 ++-- go.sum | 6 ------ 3 files changed, 7 insertions(+), 10 deletions(-) diff --git a/analytics/demo/demo.go b/analytics/demo/demo.go index 0ac9423c0..bef7cc91c 100644 --- a/analytics/demo/demo.go +++ b/analytics/demo/demo.go @@ -9,7 +9,7 @@ import ( "github.com/TykTechnologies/tyk-pump/logger" "github.com/gocraft/health" - uuid "github.com/satori/go.uuid" + "github.com/gofrs/uuid" ) var ( @@ -129,7 +129,10 @@ func GenerateAPIKeys(orgId string) { } func generateAPIKey(orgId string) string { - u1 := uuid.NewV4() + u1, err := uuid.NewV4() + if err != nil { + log.WithError(err).Error("failed to generate UUID") + } id := strings.Replace(u1.String(), "-", "", -1) return orgId + id } diff --git a/go.mod b/go.mod index 2a5b8ef15..17189da50 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/TykTechnologies/tyk-pump -go 1.15 +go 1.16 require ( github.com/DataDog/datadog-go v4.7.0+incompatible @@ -18,6 +18,7 @@ require ( github.com/go-ole/go-ole v1.2.4 // indirect github.com/go-redis/redis/v8 v8.3.1 github.com/gocraft/health v0.0.0-20170925182251-8675af27fef0 + github.com/gofrs/uuid v3.3.0+incompatible github.com/golang/protobuf v1.5.2 github.com/google/go-cmp v0.5.9 github.com/gorilla/mux v1.8.0 @@ -38,7 +39,6 @@ require ( github.com/quipo/statsd v0.0.0-20160923160612-75b7afedf0d2 github.com/resurfaceio/logger-go/v3 v3.2.1 github.com/robertkowalski/graylog-golang v0.0.0-20151121031040-e5295cfa2827 - github.com/satori/go.uuid v1.2.0 github.com/segmentio/analytics-go v0.0.0-20160711225931-bdb0aeca8a99 github.com/segmentio/backo-go v0.0.0-20160424052352-204274ad699c // indirect github.com/segmentio/kafka-go v0.3.6 diff --git a/go.sum b/go.sum index f457b65ac..0d3823e56 100644 --- a/go.sum +++ b/go.sum @@ -74,10 +74,8 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d h1:UQZhZ2O0vMHr2cI+DC1Mbh0TJxzA3RcLoMsFw+aXw7E= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0= -github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig+0+Ap1h4unLjW6YQJpKZVmUzxsD4E/Q= github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= @@ -181,7 +179,6 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= -github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48 h1:fRzb/w+pyskVMQ+UbP35JkH8yB7MYb4q/qhBarqZE6g= github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= @@ -214,7 +211,6 @@ github.com/franela/goblin v0.0.0-20181003173013-ead4ad1d2727 h1:eouy4stZdUKn7n98 github.com/franela/goblin v0.0.0-20181003173013-ead4ad1d2727/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8 h1:a9ENSRDFBUPkJ5lCgVZh26+ZbGyoVJG7yb5SSzF5H54= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= -github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= @@ -243,7 +239,6 @@ github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI= github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= @@ -941,7 +936,6 @@ golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKG golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= From e1bb7c9db5f14b730b805b2fb300cd9d41549e08 Mon Sep 17 00:00:00 2001 From: Esteban Ricardo Mirizio Date: Wed, 15 Mar 2023 14:59:13 -0300 Subject: [PATCH 052/102] automated push by gromit (#587) Co-authored-by: Gromit --- .github/CODEOWNERS | 2 ++ .github/workflows/pac.yml | 4 ++-- {ci/repo-policy => repo-policy}/main.tf | 2 +- {ci/repo-policy => repo-policy}/modules/github-repos/repo.tf | 0 .../modules/github-repos/variables.tf | 0 5 files changed, 5 insertions(+), 3 deletions(-) rename {ci/repo-policy => repo-policy}/main.tf (98%) rename {ci/repo-policy => repo-policy}/modules/github-repos/repo.tf (100%) rename {ci/repo-policy => repo-policy}/modules/github-repos/variables.tf (100%) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 100465216..8bf7fde13 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,3 +1,5 @@ /ci/ @TykTechnologies/devops .github/workflows/release.yml @TykTechnologies/devops .github/workflows/sync-automation.yml @TykTechnologies/devops +.github/workflows/pac.yml @TykTechnologies/devops +/repo-policy/ @TykTechnologies/devops \ No newline at end of file diff --git a/.github/workflows/pac.yml b/.github/workflows/pac.yml index 328a23cd7..e0dfc4986 100644 --- a/.github/workflows/pac.yml +++ b/.github/workflows/pac.yml @@ -3,10 +3,10 @@ name: Policy as Code on: pull_request: paths: - - ci/repo-policy/** + - repo-policy/** env: - TERRAFORM_DIR: "./ci/repo-policy" + TERRAFORM_DIR: "./repo-policy" jobs: terraform: diff --git a/ci/repo-policy/main.tf b/repo-policy/main.tf similarity index 98% rename from ci/repo-policy/main.tf rename to repo-policy/main.tf index 9fe33adbc..6de0d8a40 100644 --- a/ci/repo-policy/main.tf +++ b/repo-policy/main.tf @@ -38,7 +38,7 @@ module "tyk-pump" { convos = "false", required_tests = ["1.15","Go 1.16 tests"]}, { branch = "release-1.7", - reviewers = "2", + reviewers = "0", convos = "false", required_tests = ["1.15","Go 1.16 tests"]}, ] diff --git a/ci/repo-policy/modules/github-repos/repo.tf b/repo-policy/modules/github-repos/repo.tf similarity index 100% rename from ci/repo-policy/modules/github-repos/repo.tf rename to repo-policy/modules/github-repos/repo.tf diff --git a/ci/repo-policy/modules/github-repos/variables.tf b/repo-policy/modules/github-repos/variables.tf similarity index 100% rename from ci/repo-policy/modules/github-repos/variables.tf rename to repo-policy/modules/github-repos/variables.tf From 416a6eb070d1176345bfbf8744ffd11462aae9ff Mon Sep 17 00:00:00 2001 From: Burak Sekili Date: Wed, 15 Mar 2023 22:16:32 +0300 Subject: [PATCH 053/102] Update environment variables for Prometheus (#564) * Update environment variables for Prometheus There is a typo in the environment variable of Prometheus Pump. The proposed change updates this typo. * Update README.md --------- Co-authored-by: Tomas Buchaillot --- README.md | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index fc432e57e..a2cd19f59 100644 --- a/README.md +++ b/README.md @@ -202,8 +202,10 @@ Take into account that you can also set `log_level` field into the `uptime_pump_ In `uptime_pump_config` you can configure a mongo uptime pump. By default, the uptime pump is going to be `mongo` type, so it's not necessary to specify it here. The minimum required configurations for uptime pumps are: -`collection_name` - That determines the uptime collection name in mongo. By default, `tyk_uptime_analytics`. -`mongo_url` - The uptime pump mongo connection url. It is usually something like "mongodb://username:password@{hostname:port},{hostname:port}/{db_name}". + +- `collection_name` - That determines the uptime collection name in mongo. By default, `tyk_uptime_analytics`. + +- `mongo_url` - The uptime pump mongo connection url. It is usually something like "mongodb://username:password@{hostname:port},{hostname:port}/{db_name}". ###### JSON / Conf File ``` @@ -648,7 +650,7 @@ TYK_PMP_PUMPS_PROMETHEUS_TYPE=prometheus TYK_PMP_PUMPS_PROMETHEUS_META_ADDR=localhost:9090 TYK_PMP_PUMPS_PROMETHEUS_META_PATH=/metrics TYK_PMP_PUMPS_PROMETHEUS_META_CUSTOMMETRICS='[{"name":"tyk_http_requests_total","description":"Total of API requests","metric_type":"counter","labels":["response_code","api_name"]}]' -TYK_PMP_PUMPS_PROMETHEUS_META_DISABLED_METRICS=[] +TYK_PMP_PUMPS_PROMETHEUS_META_DISABLEDMETRICS=[] ``` ## DogStatsD @@ -1353,4 +1355,4 @@ You can run Tyk Pump in demo mode, which will generate fake analytics data and s - `--demo-days=` - Sets the number of days of demo data to generate. Defaults to 30. - `--demo-records-per-hour=` - Sets the number of records to generate per hour. The default value is a random number between 300 and 500. - `--demo-track-path` - Enables tracking of the request path in the demo data. Defaults to false (disabled). Note that setting `track_all_paths` to `true` in your Pump configuration will override this option. -- `--demo-future-data` - By default, the demo data is generated for the past X days (configured in `demo-days` flag). This option will generate data for the next X days. Defaults to false (disabled). \ No newline at end of file +- `--demo-future-data` - By default, the demo data is generated for the past X days (configured in `demo-days` flag). This option will generate data for the next X days. Defaults to false (disabled). From ca22ae4bb17567b5a80b756b877f5ec267cb285b Mon Sep 17 00:00:00 2001 From: Tomas Buchaillot Date: Thu, 16 Mar 2023 17:17:31 +0100 Subject: [PATCH 054/102] TT-8313 Hybrid pump refactor (#588) * initial commit * fix some stuff * adding tests + linters * linting * fix test init * adding more tests * fix test with new err * dropping uber/atomic dependency * simplifying TestWriteLicenseExpire * adding test case for env vars * discarding env change * TestWriteLicenseExpire * whoops go downgrade * adding retry mechanism * adding ping to test * cleaning connect and login logic + adding more tests --- go.mod | 3 +- go.sum | 165 +-------- instrumentation_helpers.go | 4 - pumps/hybrid.go | 384 +++++++++++++------- pumps/hybrid_test.go | 720 +++++++++++++++++++++++++++++++++++++ 5 files changed, 989 insertions(+), 287 deletions(-) create mode 100644 pumps/hybrid_test.go diff --git a/go.mod b/go.mod index 17189da50..ab6976d3d 100644 --- a/go.mod +++ b/go.mod @@ -5,15 +5,16 @@ go 1.16 require ( github.com/DataDog/datadog-go v4.7.0+incompatible github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d // indirect + github.com/TykTechnologies/gorpc v0.0.0-20210624160652-fe65bda0ccb9 github.com/TykTechnologies/graphql-go-tools v1.6.2-0.20220811124354-8d1f142966f8 github.com/TykTechnologies/murmur3 v0.0.0-20230310161213-aad17efd5632 - github.com/TykTechnologies/tyk v0.0.0-20200207055804-cf1d1ad81206 github.com/aws/aws-sdk-go-v2 v1.16.14 github.com/aws/aws-sdk-go-v2/config v1.9.0 github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.9.0 github.com/beeker1121/goque v0.0.0-20170321141813-4044bc29b280 // indirect github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 // indirect github.com/buger/jsonparser v1.1.1 + github.com/cenkalti/backoff/v4 v4.0.2 github.com/fatih/structs v1.1.0 github.com/go-ole/go-ole v1.2.4 // indirect github.com/go-redis/redis/v8 v8.3.1 diff --git a/go.sum b/go.sum index 0d3823e56..7da9140e8 100644 --- a/go.sum +++ b/go.sum @@ -26,8 +26,6 @@ github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DataDog/datadog-go v4.7.0+incompatible h1:setZNZoivEjeG87iK0abKZ9XHwHV6z63eAHhwmSzFes= github.com/DataDog/datadog-go v4.7.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/Jeffail/gabs v1.4.0/go.mod h1:6xMvQMK4k33lb7GUUpaAPh6nKMmemQeg5d4gn7/bOXc= -github.com/Jeffail/tunny v0.0.0-20171107125207-452a8e97d6a3/go.mod h1:BX3q3G70XX0UmIkDWfDHoDRquDS1xFJA5VTbMf+14wM= github.com/Masterminds/goutils v1.1.0 h1:zukEsf/1JZwCMgHiK3GZftabmxiCw4apj3a28RPBiVg= github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= @@ -38,31 +36,18 @@ github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VM github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/sarama v1.29.1/go.mod h1:mdtqvCSg8JOxk8PmpTNGyo6wzd4BMm4QXSfDnTXmgkE= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d h1:G0m3OIz70MZUWq3EgK3CesDbo8upS2Vm9/P3FtgI+Jk= github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= -github.com/TykTechnologies/again v0.0.0-20190805133618-6ad301e7eaed/go.mod h1:OUrgdjjCoYX2GZY9Vathb4ExCO9WuPtU1piuOpNw19Q= -github.com/TykTechnologies/circuitbreaker v2.2.2+incompatible/go.mod h1:f2+J36wN08/zLudMnO+QaqaBhTdQuIqemtaeEQbhMEM= -github.com/TykTechnologies/drl v0.0.0-20190905191955-cc541aa8e3e1/go.mod h1:dLW6S3KuurRuyluxy33i57uYuTB1s/u+L8mCT0fqb98= -github.com/TykTechnologies/goautosocket v0.0.0-20190430121222-97bfa5e7e481/go.mod h1:CtF8OunV123VfKa8Z9kKcIPHgcd67hSAwFMLlS7FvS4= -github.com/TykTechnologies/gojsonschema v0.0.0-20170222154038-dcb3e4bb7990 h1:CJRTgg13M3vJG9S7k7kpnvDRMGMywm5OsN6eUE8VwJE= -github.com/TykTechnologies/gojsonschema v0.0.0-20170222154038-dcb3e4bb7990/go.mod h1:SQT0NBrY4/pMikBgwFIrWCjcHBxg015Y8is0kAnMtug= github.com/TykTechnologies/gorm v1.20.7-0.20210910090358-06148e82dc85 h1:16hcEoY9Av84ykdGGAXdVZo7kY5r00247jHlxcnLP60= github.com/TykTechnologies/gorm v1.20.7-0.20210910090358-06148e82dc85/go.mod h1:hz0d/E0QBTYarOnYtdcNnBWN/NYxVMP7nZNDT6E/fFM= -github.com/TykTechnologies/gorpc v0.0.0-20190515174534-b9c10befc5f4 h1:hTjM5Uubg3w9VjNc8WjrDrLiGX14Ih8/ItyXEn2tNUs= -github.com/TykTechnologies/gorpc v0.0.0-20190515174534-b9c10befc5f4/go.mod h1:vqhQRhIHefD4jdFo55j+m0vD5NMjx2liq/ubnshQpaY= -github.com/TykTechnologies/goverify v0.0.0-20160822133757-7ccc57452ade/go.mod h1:mkS8jKcz8otdfEXhJs1QQ/DKoIY1NFFsRPKS0RwQENI= +github.com/TykTechnologies/gorpc v0.0.0-20210624160652-fe65bda0ccb9 h1:fbxHiuw/244CQ4TEirzgL/CIMXDUx2szZn8cuuMlCy0= +github.com/TykTechnologies/gorpc v0.0.0-20210624160652-fe65bda0ccb9/go.mod h1:v6v7Mlj08+EmEcXOfpuTxGt2qYU9yhqqtv4QF9Wf50E= github.com/TykTechnologies/graphql-go-tools v1.6.2-0.20220811124354-8d1f142966f8 h1:CA59ssz4bwLkd7pzkDpZOnlMzzraq/TEbJ6xvQpSPCc= github.com/TykTechnologies/graphql-go-tools v1.6.2-0.20220811124354-8d1f142966f8/go.mod h1:Cxpyt1EQHf8bRqAfZStqbgHif8YWngLga7tpnHRSRwU= -github.com/TykTechnologies/leakybucket v0.0.0-20170301023702-71692c943e3c/go.mod h1:GnHUbsQx+ysI10osPhUdTmsxcE7ef64cVp38Fdyd7e0= -github.com/TykTechnologies/murmur3 v0.0.0-20180602122059-1915e687e465/go.mod h1:sqH/SPFr11m9cahie7ulBuBX9TOhfBX1sp+qf9jh3Vg= github.com/TykTechnologies/murmur3 v0.0.0-20230310161213-aad17efd5632 h1:T5NWziFusj8au5nxAqMMh/bZyX9CAyYnBkaMSsfH6BA= github.com/TykTechnologies/murmur3 v0.0.0-20230310161213-aad17efd5632/go.mod h1:UsPYgOFBpNzDXLEti7MKOwHLpVSqdzuNGkVFPspQmnQ= -github.com/TykTechnologies/openid2go v0.0.0-20200122120050-1b642583380a/go.mod h1:rGlqNE4CvxZIeiHp0mgrw+/jdGSjJzkZ0n78hhHMdfM= -github.com/TykTechnologies/tyk v0.0.0-20200207055804-cf1d1ad81206 h1:dYSY3KkcFkITF+q8FWpPS87ggv1Rex1Vmmu9q4t4Pwc= -github.com/TykTechnologies/tyk v0.0.0-20200207055804-cf1d1ad81206/go.mod h1:+WNQ0t1t4ZCh0Z+mDnnyNAQZc5hVJ490iqLOWKPLIMI= github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/agnivade/levenshtein v1.1.0 h1:n6qGwyHG61v3ABce1rPVZklEYRT8NFpCMrpZdBUbYGM= github.com/agnivade/levenshtein v1.1.0/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo= @@ -77,10 +62,7 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5 github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0= github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d h1:Byv0BzEl3/e6D5CLfI0j/7hiIEtvGVFPCZ7Ei2oq8iQ= github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/aws/aws-sdk-go v1.29.11/go.mod h1:1KvfttTE3SPKMpo8g2c6jL3ZKfXtFvKscTgahTma5Xg= @@ -123,20 +105,18 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24 github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= -github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= -github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= -github.com/cenk/backoff v2.2.1+incompatible/go.mod h1:7FtoeaSnHoZnmZzz47cM35Y9nSW7tNyaidugnHTaFDE= +github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff/v4 v4.0.2 h1:JIufpQLbh4DkbQoii76ItQIUFzevQSqOLZca4eamEDs= +github.com/cenkalti/backoff/v4 v4.0.2/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8Bog/87DQnVg= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/certifi/gocertifi v0.0.0-20190905060710-a5e0173ced67/go.mod h1:GJKEexRPVJrBSOjoqN5VNOIKJ5Q3RViH6eu3puDRwx4= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -147,13 +127,10 @@ github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWR github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= -github.com/clbanning/mxj v1.8.4 h1:HuhwZtbyvyOw+3Z1AowPkU87JkJUSv751ELWaiTpj8I= -github.com/clbanning/mxj v1.8.4/go.mod h1:BVjHeAH+rl9rs6f+QIpeRl0tfu10SXn1pUSa5PVGJng= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= -github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= github.com/containerd/continuity v0.3.0/go.mod h1:wJEAIwKOm/pBZuBd0JmeTvnLquTB1Ag8espWhkykbPM= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= @@ -182,42 +159,28 @@ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cu github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/eclipse/paho.mqtt.golang v1.2.0 h1:1F8mhG9+aO5/xpdtFkW4SxOJB67ukuDC3t2y2qayIX0= github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts= -github.com/emanoelxavier/openid2go v0.0.0-20190718021401-6345b638bfc9/go.mod h1:hahZBazACLtwLVO5XoLT8pPXTGfRt5bK6XddHEy/XUk= -github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evalphobia/logrus_sentry v0.8.2/go.mod h1:pKcp+vriitUqu9KiWj/VRFbRfFNUwz95/UkgG8a6MNc= github.com/evanphx/json-patch/v5 v5.1.0 h1:B0aXl1o/1cP8NbviYiBMkcHBtUjIJ1/Ccg6b+SwCLQg= github.com/evanphx/json-patch/v5 v5.1.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= -github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5/go.mod h1:JpoxHjuQauoxiFMl1ie8Xc/7TfLuMZ5eOCONd1sUBHg= -github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a/go.mod h1:7Ga40egUymuWXxAe151lTNnCv97MddSOVsjpPPkityA= -github.com/facebookgo/pidfile v0.0.0-20150612191647-f242e2999868/go.mod h1:3Hzo46xzfVpIdv4lJw7YBp9fUJ7HpUgbjH1fFDgy4qM= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= -github.com/franela/goblin v0.0.0-20181003173013-ead4ad1d2727 h1:eouy4stZdUKn7n98c1+rdUTxWMg+jvhP+oHt0K8fiug= -github.com/franela/goblin v0.0.0-20181003173013-ead4ad1d2727/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= -github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8 h1:a9ENSRDFBUPkJ5lCgVZh26+ZbGyoVJG7yb5SSzF5H54= -github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/gemnasium/logrus-graylog-hook v2.0.7+incompatible/go.mod h1:85jwR23cg8rapnMQj96B9pX4XzmkXMNAPVfnnUNP8Dk= github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= -github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= @@ -231,7 +194,6 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2 github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= @@ -246,15 +208,12 @@ github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD87 github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= github.com/go-playground/validator/v10 v10.2.0 h1:KgJ0snyC2R9VXYN2rneOtQcw5aHQB1Vv0sFl1UcHBOY= github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= -github.com/go-redis/redis v6.15.6+incompatible h1:H9evprGPLI8+ci7fxQx6WNZHJSb7be8FqJQRhdQZ5Sg= -github.com/go-redis/redis v6.15.6+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= github.com/go-redis/redis/v8 v8.3.1 h1:jEPCgHQopfNaABun3NVN9pv2K7RjstY/7UJD6UEKFEY= github.com/go-redis/redis/v8 v8.3.1/go.mod h1:a2xkpBM7NJUN5V5kiF46X5Ltx4WeXJ9757X/ScKUBdE= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/go-test/deep v1.0.4 h1:u2CU3YKy9I2pmu9pX0eq50wCgjfGIt539SqR7FbHiho= github.com/go-test/deep v1.0.4/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/go-zookeeper/zk v1.0.2/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= @@ -272,9 +231,7 @@ github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5x github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gofrs/uuid v3.3.0+incompatible h1:8K4tyRfvU1CYPgJsveYFQMhpFd/wXNM7iK6rR7UHz84= github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= -github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= @@ -339,10 +296,7 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v0.0.0-20160226214623-1ea25387ff6f/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= @@ -351,51 +305,19 @@ github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/ad github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gotestyourself/gotestyourself v1.4.0/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= -github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= -github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= -github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-msgpack v0.5.4/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= -github.com/hashicorp/go-retryablehttp v0.5.4/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hashicorp/vault/api v1.0.4/go.mod h1:gDcqh3WGcR1cpF5AJz/B1UFheUEneMoIospckxBxk6Q= -github.com/hashicorp/vault/sdk v0.1.13/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M= -github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/helloeave/json v1.15.3 h1:roUxUEGhsSvhuhi80c4qmLiW633d5uf0mkzUGzBMfX8= github.com/helloeave/json v1.15.3/go.mod h1:uTHhuUsgnrpm9cc7Gi3tfIUwgf1dq/7+uLfpUFLBFEQ= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= github.com/huandu/xstrings v1.2.1 h1:v6IdmkCnDhJG/S0ivr58PeIfg+tyhqQYy4YsCsQ0Pdc= github.com/huandu/xstrings v1.2.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/iancoleman/strcase v0.0.0-20191112232945-16388991a334/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.8 h1:CGgOkSJeqMRmt0D9XLWExdT4m4F1vd3FV3VPt+0VxkQ= github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= @@ -505,7 +427,6 @@ github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfV github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= -github.com/justinas/alice v0.0.0-20171023064455-03f45bd4b7da/go.mod h1:oLH0CmIaxCGXD67VKGR5AacGXZSMznlmeqM8RzPrcY8= github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0= github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8= github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= @@ -550,13 +471,8 @@ github.com/logrusorgru/aurora v0.0.0-20200102142835-e9ef32dff381 h1:bqDmpDG49ZRn github.com/logrusorgru/aurora v0.0.0-20200102142835-e9ef32dff381/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= github.com/logzio/logzio-go v0.0.0-20200316143903-ac8fc0e2910e h1:j4tDETg2tUX0AZq2CClOpW8rBf9rPEBNjiXgQoso4Z8= github.com/logzio/logzio-go v0.0.0-20200316143903-ac8fc0e2910e/go.mod h1:OBprCVuGvtyYcaCmYjE32bF12d5AAHeXS5xI0QbIXMI= -github.com/lonelycode/go-uuid v0.0.0-20141202165402-ed3ca8a15a93 h1:WcaWCUFwpiRpIjcM7u27kuy2p5zPLC1KRxB3/bJ7XsI= -github.com/lonelycode/go-uuid v0.0.0-20141202165402-ed3ca8a15a93/go.mod h1:ZjpSGzPgHSthaPv5L+rBEMIwrr5Uto0pKPwHmCHRkUM= github.com/lonelycode/mgohacks v0.0.0-20150820024025-f9c291f7e57e h1:VvfhTFKhOTHD0xtCOPpzWxw03TUdtkRVWjRL3Lcnhuk= github.com/lonelycode/mgohacks v0.0.0-20150820024025-f9c291f7e57e/go.mod h1:xVJqf7VdD7Xfgmi9XY63aOYtrYClQOtwHX2FEOgtCKM= -github.com/lonelycode/osin v0.0.0-20160423095202-da239c9dacb6 h1:G2UYdR7/shMh7NMp2ETozj6zlqU5M8b0VqRbdxTXciU= -github.com/lonelycode/osin v0.0.0-20160423095202-da239c9dacb6/go.mod h1:x4kc0i0iLfRkNWchVMcLjy+Txcz3XqNbr8iRUGFduLQ= -github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -573,7 +489,6 @@ github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= @@ -588,24 +503,11 @@ github.com/mattn/go-sqlite3 v1.14.3/go.mod h1:WVKg1VTActs4Qso6iwGbiFih2UIHo0ENGw github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mavricknz/asn1-ber v0.0.0-20151103223136-b9df1c2f4213/go.mod h1:v/ZufymxjcI3pnNmQIUQQKxnHLTblrjZ4MNLs5DrZ1o= -github.com/mavricknz/ldap v0.0.0-20160227184754-f5a958005e43/go.mod h1:z76yvVwVulPd8FyifHe8UEHeud6XXaSan0ibi2sDy6w= -github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b h1:j7+1HpAFS1zy5+Q4qx1fWh90gTKwiN4QCGoY9TWyyO4= -github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/minio/highwayhash v1.0.1 h1:dZ6IIu8Z14VlC0VpfKofAhCy74wu/Qb5gcn52yWoz/0= github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v0.0.0-20180203102830-a4e142e9c047/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.2.2 h1:dxe5oCinTXiTIcfgmZecdCzPmAJKd46KsCWc35r0TV4= @@ -638,10 +540,8 @@ github.com/nats-io/nkeys v0.3.0 h1:cgM5tL53EvYRU+2YLXIK0G2mJtK12Ft9oeooSZMA2G8= github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4= github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= -github.com/newrelic/go-agent v2.13.0+incompatible/go.mod h1:a8Fv1b/fYhFSReoTU6HDkTYIMZeSVNffmoS726Y0LzQ= github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/olivere/elastic v6.2.31+incompatible h1:zwJIIsgfiDBuDS3sb6MCbm/e03BPEJoGZvqevZXM254= github.com/olivere/elastic v6.2.31+incompatible/go.mod h1:J+q1zQJTgAz9woqsbVRqGeB5G1iqDKVBWLNSYW8yfJ8= github.com/olivere/elastic/v7 v7.0.12/go.mod h1:14rWX28Pnh3qCKYRVnSGXWLf9MbLonYS/4FDCY3LAPo= @@ -667,34 +567,24 @@ github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFSt github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= -github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= github.com/oschwald/maxminddb-golang v1.5.0 h1:rmyoIV6z2/s9TCJedUuDiKht2RN12LWJ1L7iRGtWY64= github.com/oschwald/maxminddb-golang v1.5.0/go.mod h1:3jhIUymTJ5VREKyIhWm66LJiQt04F0UCDdodShpjWsY= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.6.0/go.mod h1:5N711Q9dKgbdkxHL+MEfF31hpT7l0S0s/t2kKREewys= -github.com/peterbourgon/g2s v0.0.0-20170223122336-d4e7ad98afea/go.mod h1:1VcHEd3ro4QMoHfiNl/j7Jkln9+KQuorp0PItHMJYNg= github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= -github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.6.0+incompatible h1:Ix9yFKn1nSPBLFl/yZknTp8TU5G4Ps0JDmguYK6iH1A= github.com/pierrec/lz4 v2.6.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pires/go-proxyproto v0.0.0-20190615163442-2c19fd512994/go.mod h1:6/gX3+E/IYGa0wMORlSMla999awQFdbaeQCHjSMKIzY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5/go.mod h1:eCbImbZ95eXtAUIbLAuAVnBnwf83mjf6QIVH8SHYwqQ= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pmylund/go-cache v2.1.0+incompatible h1:n+7K51jLz6a3sCvff3BppuCAkixuDHuJ/C57Vw/XjTE= -github.com/pmylund/go-cache v2.1.0+incompatible/go.mod h1:hmz95dGvINpbRZGsqPcd7B5xXY5+EKb5PpGhQY3NTHk= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= @@ -721,28 +611,20 @@ github.com/qri-io/jsonschema v0.2.1 h1:NNFoKms+kut6ABPf6xiKNM5214jzxAhDBrPHCJ97W github.com/qri-io/jsonschema v0.2.1/go.mod h1:g7DPkiOsK1xv6T/Ao5scXRkd+yTFygcANPBaaqW+VrI= github.com/quipo/statsd v0.0.0-20160923160612-75b7afedf0d2 h1:IvjiJDGCF8L8TjKHQKmLAjWztpKDCAaRifiRMdGzWk0= github.com/quipo/statsd v0.0.0-20160923160612-75b7afedf0d2/go.mod h1:1COUodqytMiv/GkAVUGhc0CA6e8xak5U4551TY7iEe0= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/resurfaceio/logger-go/v3 v3.2.1 h1:tTPvGp+FpH35aaT/nnhP4n/Rh/f1vHe64WoXTDgv0fY= github.com/resurfaceio/logger-go/v3 v3.2.1/go.mod h1:YPcxFUcloW37F1WQA9MUcGWu2JzlvBxlCfFF5+T3GO8= github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc= github.com/robertkowalski/graylog-golang v0.0.0-20151121031040-e5295cfa2827 h1:D2Xs0bSuqpKnUOOlK4yu6lloeOs4+oD+pjbOfsxgWu0= github.com/robertkowalski/graylog-golang v0.0.0-20151121031040-e5295cfa2827/go.mod h1:jONcYFk83vUF1lv0aERAwaFtDM9wUW4BMGmlnpLJyZY= -github.com/robertkrimen/otto v0.0.0-20180617131154-15f95af6e78d/go.mod h1:xvqspoSXJTIpemEonrMDFq6XzwHYYgToXWj5eRX1OtY= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= -github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sebdah/goldie v0.0.0-20180424091453-8784dd1ab561 h1:IY+sDBJR/wRtsxq+626xJnt4Tw7/ROA9cDIR8MMhWyg= github.com/sebdah/goldie v0.0.0-20180424091453-8784dd1ab561/go.mod h1:lvjGftC8oe7XPtyrOidaMi0rp5B9+XY/ZRUynGnuaxQ= github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= @@ -787,8 +669,6 @@ github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tL github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/square/go-jose v2.4.1+incompatible/go.mod h1:7MxpAF/1WTVUu8Am+T5kNy+t0902CaLWM4Z745MkOa8= -github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= @@ -819,9 +699,6 @@ github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhso github.com/tidwall/sjson v1.0.4 h1:UcdIRXff12Lpnu3OLtZvnc03g4vH2suXDXhBwBqmzYg= github.com/tidwall/sjson v1.0.4/go.mod h1:bURseu1nuBkFpIES5cz6zBtjmYeOQmEESshn7VpF15Y= github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= -github.com/uber-go/atomic v1.4.0/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g= -github.com/uber/jaeger-client-go v2.19.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= -github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo= github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= @@ -838,21 +715,12 @@ github.com/vektah/gqlparser/v2 v2.2.0/go.mod h1:i3mQIGIrbK2PD1RrCeMTlVbkF2FJ6WkU github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= -github.com/x-cray/logrus-prefixed-formatter v0.5.2 h1:00txxvfBM9muc0jiLIEAkAcIMJzfthRT6usrui8uGmg= -github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= github.com/xdg/scram v1.0.3 h1:nTadYh2Fs4BK2xdldEa2g5bbaZp0/+1nJMMPtPxS/to= github.com/xdg/scram v1.0.3/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xdg/stringprep v1.0.3 h1:cmL5Enob4W83ti/ZHuZLuKD/xqJfus4fVPwE+/BDm+4= github.com/xdg/stringprep v1.0.3/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= -github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= -github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xeipuuv/gojsonschema v0.0.0-20171025060643-212d8a0df7ac h1:4VBKAdTNqxLs00+bB+9Lnosfg6keGxPEXZ28e7hZV3A= -github.com/xeipuuv/gojsonschema v0.0.0-20171025060643-212d8a0df7ac/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= -github.com/xenolf/lego v0.3.2-0.20170618175828-28ead50ff1ca/go.mod h1:fwiGnfsIjG7OHPfOvgK7Y/Qo6+2Ox0iozjNTkZICKbY= github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c h1:3lbZUMbMiGUW/LMkfsEABsc5zNT9+b1CvsJx47JzJ8g= @@ -888,7 +756,6 @@ go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.18.1 h1:CSUJ2mjFszzEWt4CdKISEuChVIXGBn3lAPwkRGyVrc4= go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= @@ -897,7 +764,6 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191227163750-53104e6ec876/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -940,9 +806,7 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -954,7 +818,6 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191009170851-d66e71096ffb/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -986,14 +849,11 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1010,7 +870,6 @@ golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1041,12 +900,10 @@ golang.org/x/sys v0.0.0-20220405210540-1e041c57c461 h1:kHVeDEnfKn3T238CvrUcz6KeE golang.org/x/sys v0.0.0-20220405210540-1e041c57c461/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -1124,7 +981,6 @@ google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpC google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -1138,14 +994,10 @@ google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvx google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200108215221-bd8f9a0ef82f/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= @@ -1164,10 +1016,8 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/Masterminds/sprig.v2 v2.21.0/go.mod h1:DtHmW+kdrJpYMY6Mk6OHFNi/8EBAnNYVRUffwRCNHgA= gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1184,14 +1034,10 @@ gopkg.in/olivere/elastic.v5 v5.0.85 h1:GwBqEsvRIHVfCQVXDHYi9LHec2yEkc3GNKh9WB8G/ gopkg.in/olivere/elastic.v5 v5.0.85/go.mod h1:M3WNlsF+WhYn7api4D87NIflwTV/c0iVs8cqfWhK+68= gopkg.in/olivere/elastic.v6 v6.2.31 h1:qA/+hd/HGWpibGEy3d2zXBSdWx8DTLASm6/GusSuD7g= gopkg.in/olivere/elastic.v6 v6.2.31/go.mod h1:2cTT8Z+/LcArSWpCgvZqBgt3VOqXiy7v00w12Lz8bd4= -gopkg.in/sourcemap.v1 v1.0.5/go.mod h1:2RlvNNSMglmRrcvhfuzp4hQHwOtjxlbjX7UPY/GXb78= -gopkg.in/square/go-jose.v1 v1.1.2/go.mod h1:QpYS+a4WhS+DTlyQIi6Ka7MS3SuR9a055rgXNEe6EiA= -gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/vmihailenco/msgpack.v2 v2.9.1 h1:kb0VV7NuIojvRfzwslQeP3yArBqJHW9tOl4t38VS1jM= gopkg.in/vmihailenco/msgpack.v2 v2.9.1/go.mod h1:/3Dn1Npt9+MYyLpYYXjInO/5jvMLamn+AEGwNEOatn8= -gopkg.in/xmlpath.v2 v2.0.0-20150820204837-860cbeca3ebc/go.mod h1:N8UOSI6/c2yOpa/XDz3KVUiegocTziPiqNkeNTMiG1k= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -1217,7 +1063,6 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt nhooyr.io/websocket v1.8.7 h1:usjR2uOr/zjjkVMy0lW+PPohFok7PCow5sDjLgX4P4g= nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/letsencrypt v0.0.2/go.mod h1:buyQKZ6IXrRnB7TdkHP0RyEybLx18HHyOSoTyoOLqNY= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/instrumentation_helpers.go b/instrumentation_helpers.go index bd00d2529..2d3559d7f 100644 --- a/instrumentation_helpers.go +++ b/instrumentation_helpers.go @@ -5,8 +5,6 @@ import ( "runtime/debug" "time" - "github.com/TykTechnologies/tyk/rpc" - "github.com/gocraft/health" ) @@ -44,8 +42,6 @@ func SetupInstrumentation() { log.Info("StatsD instrumentation sink started") instrument.AddSink(statsdSink) - rpc.Instrument = instrument - MonitorApplicationInstrumentation() } diff --git a/pumps/hybrid.go b/pumps/hybrid.go index cd887af81..bcc211807 100644 --- a/pumps/hybrid.go +++ b/pumps/hybrid.go @@ -2,33 +2,30 @@ package pumps import ( "context" + "crypto/tls" "encoding/json" "errors" - "fmt" + "net" + "sync/atomic" + "time" + "github.com/TykTechnologies/gorpc" "github.com/TykTechnologies/tyk-pump/analytics" - "github.com/kelseyhightower/envconfig" - - "github.com/TykTechnologies/tyk/rpc" + "github.com/cenkalti/backoff/v4" + "github.com/gofrs/uuid" + "github.com/mitchellh/mapstructure" + "github.com/sirupsen/logrus" ) const hybridPrefix = "hybrid-pump" var hybridDefaultENV = PUMPS_ENV_PREFIX + "_HYBRID" + PUMPS_ENV_META_PREFIX -type GroupLoginRequest struct { - UserKey string - GroupID string -} - var ( dispatcherFuncs = map[string]interface{}{ "Login": func(clientAddr, userKey string) bool { return false }, - "LoginWithGroup": func(clientAddr string, groupData *GroupLoginRequest) bool { - return false - }, "PurgeAnalyticsData": func(data string) error { return nil }, @@ -39,18 +36,77 @@ var ( return nil }, } + DefaultRPCCallTimeout = 10 + ErrRPCLogin = errors.New("RPC login incorrect") + retryAndLog = func(fn func() error, retryMsg string, logger *logrus.Entry) error { + return backoff.RetryNotify(fn, backoff.WithMaxRetries(backoff.NewExponentialBackOff(), 3), func(err error, t time.Duration) { + if err != nil { + logger.Error("Failed to connect to Tyk MDCB, retrying") + } + }) + } ) // HybridPump allows to send analytics to MDCB over RPC type HybridPump struct { - aggregated bool - trackAllPaths bool - storeAnalyticsPerMinute bool - aggregationTime int - enableAggregateSelfHealing bool - ignoreTagPrefixList []string CommonPumpConfig - rpcConfig rpc.Config + + clientSingleton *gorpc.Client + dispatcher *gorpc.Dispatcher + clientIsConnected atomic.Value + + funcClientSingleton *gorpc.DispatcherClient + + hybridConfig *HybridPumpConf +} + +// @PumpConf Hybrid +type HybridPumpConf struct { + EnvPrefix string `mapstructure:"meta_env_prefix"` + + // MDCB URL connection string + ConnectionString string `mapstructure:"connection_string"` + // Your organisation ID to connect to the MDCB installation. + RPCKey string `mapstructure:"rpc_key"` + // This the API key of a user used to authenticate and authorise the Hybrid Pump access through MDCB. + // The user should be a standard Dashboard user with minimal privileges so as to reduce any risk if the user is compromised. + APIKey string `mapstructure:"api_key"` + + // Specifies prefixes of tags that should be ignored if `aggregated` is set to `true`. + IgnoreTagPrefixList []string `json:"ignore_tag_prefix_list" mapstructure:"ignore_tag_prefix_list"` + + // Hybrid pump RPC calls timeout in seconds. Defaults to `10` seconds. + CallTimeout int `mapstructure:"call_timeout"` + // Hybrid pump connection pool size + RPCPoolSize int `mapstructure:"rpc_pool_size"` + // aggregationTime is to specify the frequency of the aggregation in minutes if `aggregated` is set to `true`. + aggregationTime int + + // Send aggregated analytics data to Tyk MDCB + Aggregated bool `mapstructure:"aggregated"` + // Specifies if it should store aggregated data for all the endpoints if `aggregated` is set to `true`. By default, `false` + // which means that only store aggregated data for `tracked endpoints`. + TrackAllPaths bool `mapstructure:"track_all_paths"` + // Determines if the aggregations should be made per minute (true) or per hour (false) if `aggregated` is set to `true`. + StoreAnalyticsPerMinute bool `json:"store_analytics_per_minute" mapstructure:"store_analytics_per_minute"` + + // Use SSL to connect to Tyk MDCB + UseSSL bool `mapstructure:"use_ssl"` + // Skip SSL verification + SSLInsecureSkipVerify bool `mapstructure:"ssl_insecure_skip_verify"` +} + +func (conf *HybridPumpConf) CheckDefaults() { + if conf.CallTimeout == 0 { + conf.CallTimeout = DefaultRPCCallTimeout + } + + if conf.Aggregated { + conf.aggregationTime = 60 + if conf.StoreAnalyticsPerMinute { + conf.aggregationTime = 1 + } + } } func (p *HybridPump) GetName() string { @@ -62,121 +118,136 @@ func (p *HybridPump) New() Pump { } func (p *HybridPump) Init(config interface{}) error { - p.log = log.WithField("prefix", hybridPrefix) - meta := config.(map[string]interface{}) - // read configuration - rpcConfig := rpc.Config{} - if useSSL, ok := meta["use_ssl"]; ok { - rpcConfig.UseSSL = useSSL.(bool) - } - if sslInsecure, ok := meta["ssl_insecure_skip_verify"]; ok { - rpcConfig.SSLInsecureSkipVerify = sslInsecure.(bool) - } - if connStr, ok := meta["connection_string"]; ok { - rpcConfig.ConnectionString = connStr.(string) - } - if rpcKey, ok := meta["rpc_key"]; ok { - rpcConfig.RPCKey = rpcKey.(string) + // Read configuration file + p.hybridConfig = &HybridPumpConf{} + err := mapstructure.Decode(config, &p.hybridConfig) + if err != nil { + p.log.Error("Failed to decode configuration: ", err) + return err } - if apiKey, ok := meta["api_key"]; ok { - rpcConfig.APIKey = apiKey.(string) + + processPumpEnvVars(p, p.log, p.hybridConfig, hybridDefaultENV) + + if p.hybridConfig.ConnectionString == "" { + p.log.Error("Failed to decode configuration - no connection_string") + return errors.New("empty connection_string") } - if groupID, ok := meta["group_id"]; ok { - rpcConfig.GroupID = groupID.(string) + + p.hybridConfig.CheckDefaults() + + if err := p.connectAndLogin(true); err != nil { + p.log.Error(err) + return err } - if callTimeout, ok := meta["call_timeout"]; ok { - rpcConfig.CallTimeout = int(callTimeout.(float64)) + + return nil +} + +func (p *HybridPump) startDispatcher() { + p.dispatcher = gorpc.NewDispatcher() + + for funcName, funcBody := range dispatcherFuncs { + p.dispatcher.AddFunc(funcName, funcBody) } - if pingTimeout, ok := meta["ping_timeout"]; ok { - rpcConfig.PingTimeout = int(pingTimeout.(float64)) + + p.funcClientSingleton = p.dispatcher.NewFuncClient(p.clientSingleton) +} + +func (p *HybridPump) connectRPC() error { + p.log.Debug("Setting new MDCB connection!") + + connUUID, err := uuid.NewV4() + if err != nil { + return err } - if rpcPoolSize, ok := meta["rpc_pool_size"]; ok { - rpcConfig.RPCPoolSize = int(rpcPoolSize.(float64)) + connID := connUUID.String() + + // Length should fit into 1 byte. Protection if we decide change uuid in future. + if len(connID) > 255 { + return errors.New("connID is too long") } - //we do the env check here in the hybrid pump since the config here behaves different to other pumps. - if envPrefix, ok := meta["meta_env_prefix"]; ok { - prefix := envPrefix.(string) - p.log.Debug(fmt.Sprintf("Checking %v env variables with prefix %v", p.GetName(), prefix)) - overrideErr := envconfig.Process(prefix, &rpcConfig) - if overrideErr != nil { - p.log.Error(fmt.Sprintf("Failed to process environment variables for %v pump %v with err:%v ", prefix, p.GetName(), overrideErr)) + if p.hybridConfig.UseSSL { + // #nosec G402 + clientCfg := &tls.Config{ + InsecureSkipVerify: p.hybridConfig.SSLInsecureSkipVerify, } + + p.clientSingleton = gorpc.NewTLSClient(p.hybridConfig.ConnectionString, clientCfg) } else { - p.log.Debug(fmt.Sprintf("Checking default %v env variables with prefix %v", p.GetName(), hybridDefaultENV)) - overrideErr := envconfig.Process(hybridDefaultENV, &rpcConfig) - if overrideErr != nil { - p.log.Error(fmt.Sprintf("Failed to process environment variables for %v pump %v with err:%v ", hybridDefaultENV, p.GetName(), overrideErr)) - } + p.clientSingleton = gorpc.NewTCPClient(p.hybridConfig.ConnectionString) } - if rpcConfig.ConnectionString == "" { - p.log.Fatal("Failed to decode configuration - no connection_string") + if p.log.Level != logrus.DebugLevel { + p.clientSingleton.LogError = gorpc.NilErrorLogger } - p.rpcConfig = rpcConfig - errConnect := p.connectRpc() - if errConnect != nil { - p.log.Fatal("Failed to connect to RPC server") - } - // check if we need to send aggregated analytics - if aggregated, ok := meta["aggregated"]; ok { - p.aggregated = aggregated.(bool) + p.clientSingleton.OnConnect = p.onConnectFunc + + p.clientSingleton.Conns = p.hybridConfig.RPCPoolSize + if p.clientSingleton.Conns == 0 { + p.clientSingleton.Conns = 20 } - if p.aggregated { - if trackAllPaths, ok := meta["track_all_paths"]; ok { - p.trackAllPaths = trackAllPaths.(bool) - } - if storeAnalyticsPerMinute, ok := meta["store_analytics_per_minute"].(bool); ok { - p.storeAnalyticsPerMinute = storeAnalyticsPerMinute - p.aggregationTime = 1 - } else { - aggregationTime, ok := meta["aggregation_time"].(int) - if !ok || aggregationTime > 60 || aggregationTime < 1 { - p.log.Warnf("aggregation_time should be between 1 and 60, Found: %v. The default value will be used (60 minutes)", aggregationTime) - p.aggregationTime = 60 - } - } + p.clientSingleton.Dial = getDialFn(connID, p.hybridConfig) + + p.clientSingleton.Start() + + p.startDispatcher() + + _, err = p.callRPCFn("Ping", nil) + + return err +} + +func (p *HybridPump) onConnectFunc(conn net.Conn) (net.Conn, string, error) { + p.clientIsConnected.Store(true) + remoteAddr := conn.RemoteAddr().String() + p.log.WithField("remoteAddr", remoteAddr).Debug("connected to RPC server") - if enableAggregateSelfHealing, ok := meta["enable_aggregate_self_healing"].(bool); ok { - p.enableAggregateSelfHealing = enableAggregateSelfHealing + return conn, remoteAddr, nil +} + +func (p *HybridPump) callRPCFn(funcName string, request interface{}) (interface{}, error) { + return p.funcClientSingleton.CallTimeout(funcName, request, time.Duration(p.hybridConfig.CallTimeout)*time.Second) +} + +func getDialFn(connID string, config *HybridPumpConf) func(addr string) (conn net.Conn, err error) { + return func(addr string) (conn net.Conn, err error) { + dialer := &net.Dialer{ + Timeout: time.Duration(config.CallTimeout) * time.Second, + KeepAlive: 30 * time.Second, } - if list, ok := meta["ignore_tag_prefix_list"]; ok { - ignoreTagPrefixList := list.([]interface{}) - p.ignoreTagPrefixList = make([]string, len(ignoreTagPrefixList)) - for k, v := range ignoreTagPrefixList { - p.ignoreTagPrefixList[k] = fmt.Sprint(v) + useSSL := config.UseSSL + + if useSSL { + // #nosec G402 + cfg := &tls.Config{ + InsecureSkipVerify: config.SSLInsecureSkipVerify, } + + conn, err = tls.DialWithDialer(dialer, "tcp", addr, cfg) + } else { + conn, err = dialer.Dial("tcp", addr) } - } + if err != nil { + return nil, err + } - return nil -} + initWrite := [][]byte{[]byte("proto2"), {byte(len(connID))}, []byte(connID)} -func (p *HybridPump) connectRpc() error { - connected := rpc.Connect( - p.rpcConfig, - false, - dispatcherFuncs, - func(userKey string, groupID string) interface{} { - return GroupLoginRequest{ - UserKey: userKey, - GroupID: groupID, + for _, data := range initWrite { + if _, err := conn.Write(data); err != nil { + return nil, err } - }, - nil, - nil, - ) + } - if !connected { - return errors.New("failed to connect to RPC server") + return conn, nil } - return nil } func (p *HybridPump) WriteData(ctx context.Context, data []interface{}) error { @@ -185,34 +256,40 @@ func (p *HybridPump) WriteData(ctx context.Context, data []interface{}) error { } p.log.Debug("Attempting to write ", len(data), " records...") - if !rpc.Login() { - p.log.Error("Failed to login to RPC server, trying to reconnect...") - if errConnect := p.connectRpc(); errConnect != nil { - p.log.Error("Failed to connect to RPC server") - return errConnect - } - } - _, err := rpc.FuncClientSingleton("Ping", nil) + err := p.RPCLogin() if err != nil { - p.log.WithError(err).Error("Failed to ping RPC server") - return err + if errors.Is(err, ErrRPCLogin) { + p.log.Error("Failed to login to Tyk MDCB: ", err) + return err + } + p.log.Error("Failed to connect to Tyk MDCB, retrying") + + // try to login again + if err = p.connectAndLogin(false); err != nil { + p.log.Error(err) + return err + } } // do RPC call to server - if !p.aggregated { // send analytics records as is + if !p.hybridConfig.Aggregated { + // send analytics records as is // turn array with analytics records into JSON payload jsonData, err := json.Marshal(data) if err != nil { p.log.WithError(err).Error("Failed to marshal analytics data") return err } - if _, err := rpc.FuncClientSingleton("PurgeAnalyticsData", string(jsonData)); err != nil { + + p.log.Debug("Sending analytics data to Tyk MDCB") + + if _, err := p.callRPCFn("PurgeAnalyticsData", string(jsonData)); err != nil { p.log.WithError(err).Error("Failed to call PurgeAnalyticsData") return err } - } else { // send aggregated data - // calculate aggregates - aggregates := analytics.AggregateData(data, p.trackAllPaths, p.ignoreTagPrefixList, p.rpcConfig.ConnectionString, p.aggregationTime) + } else { + // aggregate analytics records + aggregates := analytics.AggregateData(data, p.hybridConfig.TrackAllPaths, p.hybridConfig.IgnoreTagPrefixList, p.hybridConfig.ConnectionString, p.hybridConfig.aggregationTime) // turn map with analytics aggregates into JSON payload jsonData, err := json.Marshal(aggregates) @@ -221,7 +298,10 @@ func (p *HybridPump) WriteData(ctx context.Context, data []interface{}) error { return err } - if _, err := rpc.FuncClientSingleton("PurgeAnalyticsDataAggregated", string(jsonData)); err != nil { + p.log.Debug("Sending aggregated analytics data to Tyk MDCB") + + // send aggregated data + if _, err := p.callRPCFn("PurgeAnalyticsDataAggregated", string(jsonData)); err != nil { p.log.WithError(err).Error("Failed to call PurgeAnalyticsDataAggregated") return err } @@ -230,3 +310,63 @@ func (p *HybridPump) WriteData(ctx context.Context, data []interface{}) error { return nil } + +func (p *HybridPump) Shutdown() error { + p.log.Info("Shutting down...") + p.clientSingleton.Stop() + p.clientSingleton = nil + p.funcClientSingleton = nil + + p.clientIsConnected.Store(false) + + p.log.Info("Pump shut down.") + return nil +} + +func (p *HybridPump) RPCLogin() error { + if val, ok := p.clientIsConnected.Load().(bool); !ok || !val { + p.log.Debug("Client is not connected to RPC server") + return errors.New("client is not connected to RPC server") + } + + // do RPC call to server + logged, err := p.callRPCFn("Login", p.hybridConfig.APIKey) + if err != nil { + p.log.WithError(err).Error("Failed to call Login") + return err + } + + if !logged.(bool) { + return ErrRPCLogin + } + + return nil +} + +// connectAndLogin connects to RPC server and logs in if retry is true, it will retry with retryAndLog func +func (p *HybridPump) connectAndLogin(retry bool) error { + connectFn := p.connectRPC + loginFn := p.RPCLogin + + if retry { + connectFn = func() error { + return retryAndLog(p.connectRPC, "Failed to connect to Tyk MDCB, retrying", p.log) + } + + loginFn = func() error { + return retryAndLog(p.RPCLogin, "Failed to login to Tyk MDCB, retrying", p.log) + } + } + + p.log.Info("Connecting to Tyk MDCB...") + if err := connectFn(); err != nil { + return err + } + + p.log.Info("Logging in to Tyk MDCB...") + if err := loginFn(); err != nil { + return err + } + + return nil +} diff --git a/pumps/hybrid_test.go b/pumps/hybrid_test.go new file mode 100644 index 000000000..340bccc36 --- /dev/null +++ b/pumps/hybrid_test.go @@ -0,0 +1,720 @@ +package pumps + +import ( + "bytes" + "context" + "errors" + "io" + "net" + "os" + "testing" + "time" + + "github.com/TykTechnologies/gorpc" + "github.com/TykTechnologies/tyk-pump/analytics" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" +) + +func setupKeepalive(conn net.Conn) error { + tcpConn, ok := conn.(*net.TCPConn) + if !ok { + return errors.New("not a tcp connection") + } + + if err := tcpConn.SetKeepAlive(true); err != nil { + return err + } + if err := tcpConn.SetKeepAlivePeriod(30 * time.Second); err != nil { + return err + } + return nil +} + +type testListener struct { + L net.Listener +} + +func (ln *testListener) Init(addr string) (err error) { + ln.L, err = net.Listen("tcp", addr) + return +} + +func (ln *testListener) Accept() (conn net.Conn, err error) { + c, err := ln.L.Accept() + if err != nil { + return + } + + if err = setupKeepalive(c); err != nil { + c.Close() + return + } + + handshake := make([]byte, 6) + if _, err = io.ReadFull(c, handshake); err != nil { + return + } + + idLenBuf := make([]byte, 1) + if _, err = io.ReadFull(c, idLenBuf); err != nil { + return + } + + idLen := uint8(idLenBuf[0]) + id := make([]byte, idLen) + if _, err = io.ReadFull(c, id); err != nil { + return + } + + return c, nil +} + +func (ln *testListener) Close() error { + return ln.L.Close() +} + +func startRPCMock(t *testing.T, config *HybridPumpConf, dispatcher *gorpc.Dispatcher) (*gorpc.Server, error) { + server := gorpc.NewTCPServer(config.ConnectionString, dispatcher.NewHandlerFunc()) + list := &testListener{} + server.Listener = list + server.LogError = gorpc.NilErrorLogger + + if err := server.Start(); err != nil { + t.Fail() + return nil, err + } + + return server, nil +} + +func stopRPCMock(t *testing.T, server *gorpc.Server) { + t.Helper() + if server != nil { + server.Listener.Close() + server.Stop() + } +} + +func TestHybridPumpInit(t *testing.T) { + //nolint:govet + tcs := []struct { + testName string + givenDispatcherFuncs map[string]interface{} + givenConfig *HybridPumpConf + expectedError error + }{ + { + testName: "Should return error if connection string is empty", + givenConfig: &HybridPumpConf{}, // empty connection string + givenDispatcherFuncs: map[string]interface{}{ + "Ping": func() bool { return true }, + "Login": func(clientAddr, userKey string) bool { return false }, + }, + expectedError: errors.New("empty connection_string"), + }, + { + testName: "Should return error if invalid credentials", + givenConfig: &HybridPumpConf{ + ConnectionString: "localhost:12345", + APIKey: "invalid_credentials", + }, // empty connection string + givenDispatcherFuncs: map[string]interface{}{ + "Ping": func() bool { return true }, + "Login": func(clientAddr, userKey string) bool { + return userKey == "valid_credentials" + }, + }, + expectedError: ErrRPCLogin, + }, + { + testName: "Should init if valid credentials", + givenConfig: &HybridPumpConf{ + ConnectionString: "localhost:12345", + APIKey: "valid_credentials", + }, + givenDispatcherFuncs: map[string]interface{}{ + "Ping": func() bool { return true }, + "Login": func(clientAddr, userKey string) bool { + return userKey == "valid_credentials" + }, + }, + expectedError: nil, + }, + } + + for _, tc := range tcs { + t.Run(tc.testName, func(t *testing.T) { + p := &HybridPump{} + + dispatcher := gorpc.NewDispatcher() + for funcName, funcBody := range tc.givenDispatcherFuncs { + dispatcher.AddFunc(funcName, funcBody) + } + + mockServer, err := startRPCMock(t, tc.givenConfig, dispatcher) + if err != nil { + t.Fatalf("Failed to start RPC mock: %v", err) + } + defer stopRPCMock(t, mockServer) + + err = p.Init(tc.givenConfig) + assert.Equal(t, tc.expectedError, err) + + if err == nil { + assert.Nil(t, p.Shutdown()) + } + }) + } +} + +func TestHybridPumpWriteData(t *testing.T) { + //nolint:govet + tcs := []struct { + testName string + givenConfig *HybridPumpConf + givenDispatcherFuncs map[string]interface{} + givenData []interface{} + expectedError error + }{ + { + testName: "write non aggregated data", + givenConfig: &HybridPumpConf{ + ConnectionString: "localhost:12345", + APIKey: "valid_credentials", + }, + givenDispatcherFuncs: map[string]interface{}{ + "Ping": func() bool { return true }, + "Login": func(clientAddr, userKey string) bool { + return userKey == "valid_credentials" + }, + "PurgeAnalyticsData": func(clientID, data string) error { + if data == "" { + return errors.New("empty data") + } + return nil + }, + }, + givenData: []interface{}{ + analytics.AnalyticsRecord{ + APIID: "testAPIID", + OrgID: "testOrg", + APIName: "testAPIName", + }, + analytics.AnalyticsRecord{ + APIID: "testAPIID2", + OrgID: "testOrg2", + APIName: "testAPIName2", + }, + }, + expectedError: nil, + }, + { + testName: "write aggregated data", + givenConfig: &HybridPumpConf{ + ConnectionString: "localhost:12345", + APIKey: "valid_credentials", + Aggregated: true, + }, + givenDispatcherFuncs: map[string]interface{}{ + "Ping": func() bool { return true }, + "Login": func(clientAddr, userKey string) bool { + return userKey == "valid_credentials" + }, + "PurgeAnalyticsDataAggregated": func(clientID, data string) error { + if data == "" { + return errors.New("empty data") + } + return nil + }, + }, + givenData: []interface{}{ + analytics.AnalyticsRecord{ + APIID: "testAPIID", + OrgID: "testOrg", + APIName: "testAPIName", + }, + analytics.AnalyticsRecord{ + APIID: "testAPIID2", + OrgID: "testOrg2", + APIName: "testAPIName2", + }, + }, + expectedError: nil, + }, + { + testName: "write aggregated data - no records", + givenConfig: &HybridPumpConf{ + ConnectionString: "localhost:12345", + APIKey: "valid_credentials", + Aggregated: true, + }, + givenDispatcherFuncs: map[string]interface{}{ + "Ping": func() bool { return true }, + "Login": func(clientAddr, userKey string) bool { + return userKey == "valid_credentials" + }, + "PurgeAnalyticsDataAggregated": func(clientID, data string) error { + if data == "" { + return errors.New("empty data") + } + return nil + }, + }, + givenData: []interface{}{}, + expectedError: nil, + }, + } + + for _, tc := range tcs { + t.Run(tc.testName, func(t *testing.T) { + p := &HybridPump{} + p.New() + + dispatcher := gorpc.NewDispatcher() + for funcName, funcBody := range tc.givenDispatcherFuncs { + dispatcher.AddFunc(funcName, funcBody) + } + + mockServer, err := startRPCMock(t, tc.givenConfig, dispatcher) + if err != nil { + t.Fatalf("Failed to start RPC mock: %v", err) + } + defer stopRPCMock(t, mockServer) + + err = p.Init(tc.givenConfig) + if err != nil { + t.Fail() + return + } + defer func() { + err := p.Shutdown() + if err != nil { + t.Fatalf("Failed to shutdown hybrid pump: %v", err) + } + }() + + err = p.WriteData(context.TODO(), tc.givenData) + assert.Equal(t, tc.expectedError, err) + }) + } +} + +func TestHybridPumpShutdown(t *testing.T) { + mockConf := &HybridPumpConf{ + ConnectionString: "localhost:9092", + RPCKey: "testkey", + APIKey: "testapikey", + } + + dispatcher := gorpc.NewDispatcher() + dispatcher.AddFunc("Ping", func() bool { return true }) + dispatcher.AddFunc("Login", func(clientAddr, userKey string) bool { + return userKey == mockConf.APIKey + }) + + server, err := startRPCMock(t, mockConf, dispatcher) + assert.NoError(t, err) + defer stopRPCMock(t, server) + + hybridPump := &HybridPump{} + err = hybridPump.Init(mockConf) + assert.NoError(t, err) + + err = hybridPump.Shutdown() + assert.NoError(t, err) + + // check if the isconnected + assert.False(t, hybridPump.clientIsConnected.Load().(bool)) + + assert.Nil(t, hybridPump.clientSingleton) +} + +func TestWriteLicenseExpire(t *testing.T) { + mockConf := &HybridPumpConf{ + ConnectionString: "localhost:9092", + RPCKey: "testkey", + APIKey: "testapikey", + } + + loginCall := 0 + + dispatcher := gorpc.NewDispatcher() + dispatcher.AddFunc("Ping", func() bool { return true }) + dispatcher.AddFunc("Login", func(clientAddr, userKey string) bool { + loginCall++ + return loginCall <= 3 + }) + dispatcher.AddFunc("PurgeAnalyticsData", func(clientID, data string) error { return nil }) + + server, err := startRPCMock(t, mockConf, dispatcher) + assert.NoError(t, err) + defer stopRPCMock(t, server) + + hybridPump := &HybridPump{} + // first login - success + err = hybridPump.Init(mockConf) + assert.NoError(t, err) + defer func() { + if err := hybridPump.Shutdown(); err != nil { + t.Fail() + } + }() + + // second login - success + err = hybridPump.WriteData(context.Background(), []interface{}{analytics.AnalyticsRecord{APIKey: "testapikey"}}) + assert.Nil(t, err) + + // third login - success + err = hybridPump.WriteData(context.Background(), []interface{}{analytics.AnalyticsRecord{APIKey: "testapikey"}}) + assert.Nil(t, err) + + // license expired, login fail - WriteData should fail + err = hybridPump.WriteData(context.Background(), []interface{}{analytics.AnalyticsRecord{APIKey: "testapikey"}}) + assert.NotNil(t, err) + assert.Equal(t, ErrRPCLogin, err) +} + +func TestHybridConfigCheckDefaults(t *testing.T) { + //nolint:govet + tcs := []struct { + testName string + givenConfig *HybridPumpConf + expectedConfig *HybridPumpConf + }{ + { + testName: "default values - no aggregated", + givenConfig: &HybridPumpConf{}, + expectedConfig: &HybridPumpConf{ + CallTimeout: DefaultRPCCallTimeout, + Aggregated: false, + }, + }, + { + testName: "aggregated true with StoreAnalyticsPerMinute", + givenConfig: &HybridPumpConf{ + Aggregated: true, + StoreAnalyticsPerMinute: true, + }, + expectedConfig: &HybridPumpConf{ + CallTimeout: DefaultRPCCallTimeout, + Aggregated: true, + StoreAnalyticsPerMinute: true, + aggregationTime: 1, + }, + }, + + { + testName: "aggregated true without StoreAnalyticsPerMinute", + givenConfig: &HybridPumpConf{ + Aggregated: true, + StoreAnalyticsPerMinute: false, + }, + expectedConfig: &HybridPumpConf{ + CallTimeout: DefaultRPCCallTimeout, + Aggregated: true, + StoreAnalyticsPerMinute: false, + aggregationTime: 60, + }, + }, + { + testName: "custom timeout", + givenConfig: &HybridPumpConf{ + CallTimeout: 20, + }, + expectedConfig: &HybridPumpConf{ + CallTimeout: 20, + }, + }, + } + + for _, tc := range tcs { + t.Run(tc.testName, func(t *testing.T) { + tc.givenConfig.CheckDefaults() + + assert.Equal(t, tc.expectedConfig, tc.givenConfig) + }) + } +} + +func TestHybridConfigParsing(t *testing.T) { + svAddress := "localhost:9099" + + //nolint:govet + tcs := []struct { + testName string + givenEnvs map[string]string + givenBaseConf map[string]interface{} + expectedConfig *HybridPumpConf + }{ + { + testName: "all envs", + givenEnvs: map[string]string{ + hybridDefaultENV + "_CONNECTIONSTRING": svAddress, + hybridDefaultENV + "_CALLTIMEOUT": "20", + hybridDefaultENV + "_RPCKEY": "testkey", + hybridDefaultENV + "_APIKEY": "testapikey", + hybridDefaultENV + "_AGGREGATED": "true", + }, + givenBaseConf: map[string]interface{}{}, + expectedConfig: &HybridPumpConf{ + ConnectionString: svAddress, + CallTimeout: 20, + RPCKey: "testkey", + APIKey: "testapikey", + Aggregated: true, + aggregationTime: 60, + }, + }, + { + testName: "all config", + givenEnvs: map[string]string{}, + givenBaseConf: map[string]interface{}{ + "connection_string": svAddress, + "call_timeout": 20, + "rpc_key": "testkey", + "api_key": "testapikey", + "aggregated": true, + }, + expectedConfig: &HybridPumpConf{ + ConnectionString: svAddress, + CallTimeout: 20, + RPCKey: "testkey", + APIKey: "testapikey", + Aggregated: true, + aggregationTime: 60, + }, + }, + + { + testName: "mixed config", + givenEnvs: map[string]string{ + hybridDefaultENV + "_CONNECTIONSTRING": svAddress, + hybridDefaultENV + "_RPCKEY": "testkey", + hybridDefaultENV + "_APIKEY": "testapikey", + }, + givenBaseConf: map[string]interface{}{ + "call_timeout": 20, + "aggregated": true, + "store_analytics_per_minute": true, + "track_all_paths": true, + }, + expectedConfig: &HybridPumpConf{ + ConnectionString: svAddress, + CallTimeout: 20, + RPCKey: "testkey", + APIKey: "testapikey", + Aggregated: true, + StoreAnalyticsPerMinute: true, + aggregationTime: 1, + TrackAllPaths: true, + }, + }, + } + + for _, tc := range tcs { + t.Run(tc.testName, func(t *testing.T) { + for key, env := range tc.givenEnvs { + os.Setenv(key, env) + } + defer func(envs map[string]string) { + for _, env := range envs { + os.Unsetenv(env) + } + }(tc.givenEnvs) + + dispatcher := gorpc.NewDispatcher() + dispatcher.AddFunc("Ping", func() bool { return true }) + dispatcher.AddFunc("Login", func(clientAddr, userKey string) bool { + return true + }) + + server, err := startRPCMock(t, &HybridPumpConf{ConnectionString: svAddress}, dispatcher) + assert.NoError(t, err) + defer stopRPCMock(t, server) + + hybridPump := &HybridPump{} + err = hybridPump.Init(tc.givenBaseConf) + assert.NoError(t, err) + defer func() { + if err := hybridPump.Shutdown(); err != nil { + t.Fail() + } + }() + + assert.Equal(t, tc.expectedConfig, hybridPump.hybridConfig) + }) + } +} + +func TestDispatcherFuncs(t *testing.T) { + //nolint:govet + tcs := []struct { + testName string + function string + input []interface{} + expectedOutput interface{} + expectedError error + }{ + { + testName: "Login", + function: "Login", + input: []interface{}{"127.0.0.1", "userKey123"}, + expectedOutput: false, + }, + { + testName: "PurgeAnalyticsData", + function: "PurgeAnalyticsData", + input: []interface{}{"test data"}, + expectedOutput: nil, + expectedError: nil, + }, + { + testName: "Ping", + function: "Ping", + input: []interface{}{}, + expectedOutput: false, + }, + { + testName: "PurgeAnalyticsDataAggregated", + function: "PurgeAnalyticsDataAggregated", + input: []interface{}{"test data"}, + expectedOutput: nil, + expectedError: nil, + }, + } + + for _, tc := range tcs { + t.Run(tc.testName, func(t *testing.T) { + switch fn := dispatcherFuncs[tc.function].(type) { + case func(string, string) bool: + result := fn(tc.input[0].(string), tc.input[1].(string)) + if result != tc.expectedOutput { + t.Errorf("Expected %v, got %v", tc.expectedOutput, result) + } + case func(string) error: + err := fn(tc.input[0].(string)) + if !errors.Is(err, tc.expectedError) { + t.Errorf("Expected error %v, got %v", tc.expectedError, err) + } + case func() bool: + result := fn() + if result != tc.expectedOutput { + t.Errorf("Expected %v, got %v", tc.expectedOutput, result) + } + default: + t.Errorf("Unexpected function type") + } + }) + } +} + +func TestRetryAndLog(t *testing.T) { + buf := bytes.Buffer{} + testLogger := logrus.New() + testLogger.SetOutput(&buf) + + retries := 0 + fn := func() error { + retries++ + if retries == 3 { + return nil + } + return errors.New("test error") + } + + err := retryAndLog(fn, "retrying", testLogger.WithField("test", "test")) + assert.Nil(t, err) + assert.Equal(t, 3, retries) + assert.Contains(t, buf.String(), "retrying") +} + +func TestConnectAndLogin(t *testing.T) { + //nolint:govet + tcs := []struct { + testName string + givenRetry bool + shouldStartSv bool + givenAttemptSuccess int + expectedErr error + }{ + { + testName: "without retry - success", + givenRetry: false, + shouldStartSv: true, + }, + { + testName: "without retry - server down", + givenRetry: false, + shouldStartSv: false, + expectedErr: errors.New("gorpc.Client: [localhost:9092]. Cannot obtain response during timeout=1s"), + }, + { + testName: "with retry - success", + givenRetry: true, + shouldStartSv: true, + }, + { + testName: "with retry - server down", + givenRetry: true, + shouldStartSv: false, + expectedErr: errors.New("gorpc.Client: [localhost:9092]. Cannot obtain response during timeout=1s"), + }, + { + testName: "without retry - fail first attempt - error", + givenRetry: false, + shouldStartSv: true, + givenAttemptSuccess: 2, + expectedErr: ErrRPCLogin, + }, + { + testName: " retry - fail first attempt - success after", + givenRetry: true, + shouldStartSv: true, + givenAttemptSuccess: 2, + expectedErr: nil, + }, + } + + for _, tc := range tcs { + t.Run(tc.testName, func(t *testing.T) { + mockConf := &HybridPumpConf{ + ConnectionString: "localhost:9092", + RPCKey: "testkey", + APIKey: "testapikey", + CallTimeout: 1, + } + + pump := &HybridPump{} + pump.hybridConfig = mockConf + pump.log = log.WithField("prefix", "hybrid-test") + + if tc.shouldStartSv { + attempts := 0 + dispatcherFns := map[string]interface{}{ + "Ping": func() bool { return true }, + "Login": func(clientAddr, userKey string) bool { + attempts++ + return attempts >= tc.givenAttemptSuccess + }, + } + dispatcher := gorpc.NewDispatcher() + for fnName, fn := range dispatcherFns { + dispatcher.AddFunc(fnName, fn) + } + + server, err := startRPCMock(t, mockConf, dispatcher) + assert.NoError(t, err) + defer stopRPCMock(t, server) + } + + err := pump.connectAndLogin(tc.givenRetry) + if tc.expectedErr == nil { + assert.Nil(t, err) + } else { + assert.NotNil(t, err) + assert.Equal(t, err.Error(), tc.expectedErr.Error()) + } + }) + } +} From 9cd44dd86462c8d4c16183efbe46648749c369f0 Mon Sep 17 00:00:00 2001 From: Tomas Buchaillot Date: Mon, 20 Mar 2023 16:09:30 +0100 Subject: [PATCH 055/102] TT-8314 Fix sec issues (#592) * updating graphql-go-tools and go1.16 in release.yml * testing graphql fix * updating graphql-go-tools to latest master --- .github/workflows/release.yml | 4 +- go.mod | 6 +- go.sum | 135 +++++++++++++++++++++------------- repo-policy/main.tf | 2 +- 4 files changed, 90 insertions(+), 57 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 9415a6713..d4e5809a3 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -38,9 +38,9 @@ jobs: strategy: fail-fast: false matrix: - golang_cross: [ 1.15 ] + golang_cross: [ 1.16 ] include: - - golang_cross: 1.15 + - golang_cross: 1.16 goreleaser: 'ci/goreleaser/goreleaser.yml' rpmvers: 'el/7 el/8' debvers: 'ubuntu/xenial ubuntu/bionic debian/jessie ubuntu/focal debian/buster debian/bullseye' diff --git a/go.mod b/go.mod index ab6976d3d..7b03f4c13 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( github.com/DataDog/datadog-go v4.7.0+incompatible github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d // indirect github.com/TykTechnologies/gorpc v0.0.0-20210624160652-fe65bda0ccb9 - github.com/TykTechnologies/graphql-go-tools v1.6.2-0.20220811124354-8d1f142966f8 + github.com/TykTechnologies/graphql-go-tools v1.6.2-0.20230320143102-7a16078ce517 github.com/TykTechnologies/murmur3 v0.0.0-20230310161213-aad17efd5632 github.com/aws/aws-sdk-go-v2 v1.16.14 github.com/aws/aws-sdk-go-v2/config v1.9.0 @@ -30,7 +30,7 @@ require ( github.com/lintianzhi/graylogd v0.0.0-20180503131252-dc68342f04dc // indirect github.com/logzio/logzio-go v0.0.0-20200316143903-ac8fc0e2910e github.com/lonelycode/mgohacks v0.0.0-20150820024025-f9c291f7e57e - github.com/mitchellh/mapstructure v1.2.2 + github.com/mitchellh/mapstructure v1.3.1 github.com/moesif/moesifapi-go v1.0.6 github.com/olivere/elastic v6.2.31+incompatible // indirect github.com/olivere/elastic/v7 v7.0.28 @@ -48,7 +48,7 @@ require ( github.com/stretchr/testify v1.8.1 github.com/syndtr/goleveldb v0.0.0-20190318030020-c3a204f8e965 // indirect github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c // indirect - golang.org/x/net v0.0.0-20220225172249-27dd8689420f + golang.org/x/net v0.0.0-20220722155237-a158d28d115b google.golang.org/protobuf v1.28.1 gopkg.in/alecthomas/kingpin.v2 v2.2.6 gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22 diff --git a/go.sum b/go.sum index 7da9140e8..a20d3083d 100644 --- a/go.sum +++ b/go.sum @@ -18,16 +18,18 @@ cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiy cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/99designs/gqlgen v0.13.1-0.20210728041543-7e38dd46943c h1:tEDQ6XnvZQ98sZd7iqq5pe4YsstBu7TOS6T5GhNsp2s= -github.com/99designs/gqlgen v0.13.1-0.20210728041543-7e38dd46943c/go.mod h1:S7z4boV+Nx4VvzMUpVrY/YuHjFX4n7rDyuTqvAkuoRE= +github.com/99designs/gqlgen v0.17.20 h1:O7WzccIhKB1dm+7g6dhQcULINftfiLSBg2l/mwbpJMw= +github.com/99designs/gqlgen v0.17.20/go.mod h1:Mja2HI23kWT1VRH09hvWshFgOzKswpO20o4ScpJIES4= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DataDog/datadog-go v4.7.0+incompatible h1:setZNZoivEjeG87iK0abKZ9XHwHV6z63eAHhwmSzFes= github.com/DataDog/datadog-go v4.7.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/Masterminds/goutils v1.1.0 h1:zukEsf/1JZwCMgHiK3GZftabmxiCw4apj3a28RPBiVg= github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= +github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60= @@ -44,13 +46,13 @@ github.com/TykTechnologies/gorm v1.20.7-0.20210910090358-06148e82dc85 h1:16hcEoY github.com/TykTechnologies/gorm v1.20.7-0.20210910090358-06148e82dc85/go.mod h1:hz0d/E0QBTYarOnYtdcNnBWN/NYxVMP7nZNDT6E/fFM= github.com/TykTechnologies/gorpc v0.0.0-20210624160652-fe65bda0ccb9 h1:fbxHiuw/244CQ4TEirzgL/CIMXDUx2szZn8cuuMlCy0= github.com/TykTechnologies/gorpc v0.0.0-20210624160652-fe65bda0ccb9/go.mod h1:v6v7Mlj08+EmEcXOfpuTxGt2qYU9yhqqtv4QF9Wf50E= -github.com/TykTechnologies/graphql-go-tools v1.6.2-0.20220811124354-8d1f142966f8 h1:CA59ssz4bwLkd7pzkDpZOnlMzzraq/TEbJ6xvQpSPCc= -github.com/TykTechnologies/graphql-go-tools v1.6.2-0.20220811124354-8d1f142966f8/go.mod h1:Cxpyt1EQHf8bRqAfZStqbgHif8YWngLga7tpnHRSRwU= +github.com/TykTechnologies/graphql-go-tools v1.6.2-0.20230320143102-7a16078ce517 h1:EtNbr8wZPmSBtUKjE2S74bAYeJAJzW5CqJNewSz12sQ= +github.com/TykTechnologies/graphql-go-tools v1.6.2-0.20230320143102-7a16078ce517/go.mod h1:ZiFZcrue3+n2mHH+KLHRipbYVULkgy3Myko5S7IIs74= github.com/TykTechnologies/murmur3 v0.0.0-20230310161213-aad17efd5632 h1:T5NWziFusj8au5nxAqMMh/bZyX9CAyYnBkaMSsfH6BA= github.com/TykTechnologies/murmur3 v0.0.0-20230310161213-aad17efd5632/go.mod h1:UsPYgOFBpNzDXLEti7MKOwHLpVSqdzuNGkVFPspQmnQ= github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= -github.com/agnivade/levenshtein v1.1.0 h1:n6qGwyHG61v3ABce1rPVZklEYRT8NFpCMrpZdBUbYGM= -github.com/agnivade/levenshtein v1.1.0/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo= +github.com/agnivade/levenshtein v1.1.1 h1:QY8M92nrzkmr798gCo3kmMyqXFzdQVpxLlGPRBij0P8= +github.com/agnivade/levenshtein v1.1.1/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= @@ -65,6 +67,9 @@ github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdK github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d h1:Byv0BzEl3/e6D5CLfI0j/7hiIEtvGVFPCZ7Ei2oq8iQ= github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/asyncapi/converter-go v0.0.0-20190802111537-d8459b2bd403/go.mod h1:mpJYWYy+USNiLENQxiyGgRc3qtFPxYSWdSd/eS+R6bo= +github.com/asyncapi/parser-go v0.4.2/go.mod h1:5iAT+irO9xKeBDnIhqT0ev8QJH1dHq4i2oU/UBhhwB8= +github.com/asyncapi/spec-json-schemas/v2 v2.14.0/go.mod h1:5lFCFtRGfI3WVOla4slifjgPs9x79FY0fqZjgNL495c= github.com/aws/aws-sdk-go v1.29.11/go.mod h1:1KvfttTE3SPKMpo8g2c6jL3ZKfXtFvKscTgahTma5Xg= github.com/aws/aws-sdk-go v1.40.32/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= github.com/aws/aws-sdk-go-v2 v1.10.0/go.mod h1:U/EyyVvKtzmFeQQcca7eBotKdlpcP2zzU6bXBYcf7CE= @@ -141,6 +146,7 @@ github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7 github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4= @@ -159,6 +165,7 @@ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cu github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= @@ -214,6 +221,7 @@ github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-test/deep v1.0.4 h1:u2CU3YKy9I2pmu9pX0eq50wCgjfGIt539SqR7FbHiho= github.com/go-test/deep v1.0.4/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/go-zookeeper/zk v1.0.2/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= @@ -233,7 +241,6 @@ github.com/gofrs/uuid v3.3.0+incompatible h1:8K4tyRfvU1CYPgJsveYFQMhpFd/wXNM7iK6 github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= @@ -279,7 +286,6 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -288,6 +294,7 @@ github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXi github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= @@ -295,15 +302,13 @@ github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/context v0.0.0-20160226214623-1ea25387ff6f/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/mux v1.6.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gotestyourself/gotestyourself v1.4.0/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -316,8 +321,9 @@ github.com/helloeave/json v1.15.3/go.mod h1:uTHhuUsgnrpm9cc7Gi3tfIUwgf1dq/7+uLfp github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.2.1 h1:v6IdmkCnDhJG/S0ivr58PeIfg+tyhqQYy4YsCsQ0Pdc= github.com/huandu/xstrings v1.2.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/iancoleman/strcase v0.0.0-20191112232945-16388991a334/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.8 h1:CGgOkSJeqMRmt0D9XLWExdT4m4F1vd3FV3VPt+0VxkQ= github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= @@ -430,8 +436,8 @@ github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0= github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8= github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= +github.com/kevinmbeaulieu/eq-go v1.0.0/go.mod h1:G3S8ajA56gKBZm4UB9AOyoOS37JO3roToPzKNM8dtdM= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.9.8/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= @@ -467,8 +473,8 @@ github.com/lib/pq v1.10.6 h1:jbk+ZieJ0D7EVGJYpL9QTz7/YW6UHbmdnZWYyK5cdBs= github.com/lib/pq v1.10.6/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lintianzhi/graylogd v0.0.0-20180503131252-dc68342f04dc h1:7f0qjuEBw/5vUrP2lyIUgAihl0A6H0E79kswNy6edeE= github.com/lintianzhi/graylogd v0.0.0-20180503131252-dc68342f04dc/go.mod h1:WTHfLzkGmTEe+nyJqdZhFbAWUkyI30IVS9ytgHDJj0I= -github.com/logrusorgru/aurora v0.0.0-20200102142835-e9ef32dff381 h1:bqDmpDG49ZRnB5PcgP0RXtQvnMSgIF14M7CBd2shtXs= -github.com/logrusorgru/aurora v0.0.0-20200102142835-e9ef32dff381/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= +github.com/logrusorgru/aurora/v3 v3.0.0 h1:R6zcoZZbvVcGMvDCKo45A9U/lzYyzl5NfYIvznmDfE4= +github.com/logrusorgru/aurora/v3 v3.0.0/go.mod h1:vsR12bk5grlLvLXAYrBsb5Oc/N+LxAlxggSjiwMnCUc= github.com/logzio/logzio-go v0.0.0-20200316143903-ac8fc0e2910e h1:j4tDETg2tUX0AZq2CClOpW8rBf9rPEBNjiXgQoso4Z8= github.com/logzio/logzio-go v0.0.0-20200316143903-ac8fc0e2910e/go.mod h1:OBprCVuGvtyYcaCmYjE32bF12d5AAHeXS5xI0QbIXMI= github.com/lonelycode/mgohacks v0.0.0-20150820024025-f9c291f7e57e h1:VvfhTFKhOTHD0xtCOPpzWxw03TUdtkRVWjRL3Lcnhuk= @@ -480,22 +486,23 @@ github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7 github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ= -github.com/matryer/moq v0.0.0-20200106131100-75d0ddfc0007/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ= +github.com/matryer/moq v0.2.7/go.mod h1:kITsx543GOENm48TUAQyJ9+SAvFSr7iGQXPoth/VUBk= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= -github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.14.3 h1:j7a/xn1U6TKA/PHHxqZuzh64CdtRc7rU9M+AvkOl5bA= @@ -508,10 +515,9 @@ github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLT github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/mapstructure v0.0.0-20180203102830-a4e142e9c047/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.2.2 h1:dxe5oCinTXiTIcfgmZecdCzPmAJKd46KsCWc35r0TV4= -github.com/mitchellh/mapstructure v1.2.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.3.1 h1:cCBH2gTD2K0OtLlv/Y5H01VQCqmlDxz30kS5Y5bqfLA= +github.com/mitchellh/mapstructure v1.3.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= @@ -540,8 +546,9 @@ github.com/nats-io/nkeys v0.3.0 h1:cgM5tL53EvYRU+2YLXIK0G2mJtK12Ft9oeooSZMA2G8= github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4= github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= -github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/olivere/elastic v6.2.31+incompatible h1:zwJIIsgfiDBuDS3sb6MCbm/e03BPEJoGZvqevZXM254= github.com/olivere/elastic v6.2.31+incompatible/go.mod h1:J+q1zQJTgAz9woqsbVRqGeB5G1iqDKVBWLNSYW8yfJ8= github.com/olivere/elastic/v7 v7.0.12/go.mod h1:14rWX28Pnh3qCKYRVnSGXWLf9MbLonYS/4FDCY3LAPo= @@ -550,19 +557,25 @@ github.com/olivere/elastic/v7 v7.0.28/go.mod h1:DzHQoqd6YqSuvF1lk/fR4cW4FNUNzSD5 github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.14.1 h1:jMU0WaQrP0a/YAEq8eJmJKjBoMs+pClEr1vDMlM/Do4= github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.10.2 h1:aY/nuoWlKJud2J6U0E3NWsjlg+0GtwXxgEqthRdzlcs= github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= +github.com/onsi/gomega v1.20.0 h1:8W0cWlwFkflGPLltQvLRB7ZVD5HuP6ng320w2IS245Q= +github.com/onsi/gomega v1.20.0/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/runc v1.1.2/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= -github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= @@ -611,6 +624,8 @@ github.com/qri-io/jsonschema v0.2.1 h1:NNFoKms+kut6ABPf6xiKNM5214jzxAhDBrPHCJ97W github.com/qri-io/jsonschema v0.2.1/go.mod h1:g7DPkiOsK1xv6T/Ao5scXRkd+yTFygcANPBaaqW+VrI= github.com/quipo/statsd v0.0.0-20160923160612-75b7afedf0d2 h1:IvjiJDGCF8L8TjKHQKmLAjWztpKDCAaRifiRMdGzWk0= github.com/quipo/statsd v0.0.0-20160923160612-75b7afedf0d2/go.mod h1:1COUodqytMiv/GkAVUGhc0CA6e8xak5U4551TY7iEe0= +github.com/r3labs/sse/v2 v2.8.1 h1:lZH+W4XOLIq88U5MIHOsLec7+R62uhz3bIi2yn0Sg8o= +github.com/r3labs/sse/v2 v2.8.1/go.mod h1:Igau6Whc+F17QUgML1fYe1VPZzTV6EMCnYktEmkNJ7I= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/resurfaceio/logger-go/v3 v3.2.1 h1:tTPvGp+FpH35aaT/nnhP4n/Rh/f1vHe64WoXTDgv0fY= github.com/resurfaceio/logger-go/v3 v3.2.1/go.mod h1:YPcxFUcloW37F1WQA9MUcGWu2JzlvBxlCfFF5+T3GO8= @@ -618,12 +633,12 @@ github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1 github.com/robertkowalski/graylog-golang v0.0.0-20151121031040-e5295cfa2827 h1:D2Xs0bSuqpKnUOOlK4yu6lloeOs4+oD+pjbOfsxgWu0= github.com/robertkowalski/graylog-golang v0.0.0-20151121031040-e5295cfa2827/go.mod h1:jONcYFk83vUF1lv0aERAwaFtDM9wUW4BMGmlnpLJyZY= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sebdah/goldie v0.0.0-20180424091453-8784dd1ab561 h1:IY+sDBJR/wRtsxq+626xJnt4Tw7/ROA9cDIR8MMhWyg= github.com/sebdah/goldie v0.0.0-20180424091453-8784dd1ab561/go.mod h1:lvjGftC8oe7XPtyrOidaMi0rp5B9+XY/ZRUynGnuaxQ= @@ -644,9 +659,7 @@ github.com/shirou/gopsutil v3.20.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMT github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc h1:jUIKcSPO9MoMJBbEoyE/RJoE8vz7Mb8AjvifMMwSyvY= github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/shurcooL/vfsgen v0.0.0-20180121065927-ffb13db8def0/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= @@ -676,7 +689,6 @@ github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSS github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -705,13 +717,12 @@ github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljT github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs= github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/urfave/cli/v2 v2.1.1/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ= +github.com/urfave/cli/v2 v2.8.1/go.mod h1:Z41J9TPoffeoqP0Iza0YbAhGvymRdZAd2uPmZ5JxRdY= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= -github.com/vektah/dataloaden v0.2.1-0.20190515034641-a19b9a6e7c9e/go.mod h1:/HUdMve7rvxZma+2ZELQeNh88+003LL7Pf/CZ089j8U= -github.com/vektah/gqlparser/v2 v2.2.0 h1:bAc3slekAAJW6sZTi07aGq0OrfaCjj4jxARAaC7g2EM= -github.com/vektah/gqlparser/v2 v2.2.0/go.mod h1:i3mQIGIrbK2PD1RrCeMTlVbkF2FJ6WkU1KJlJlC+3F4= +github.com/vektah/gqlparser/v2 v2.5.1 h1:ZGu+bquAY23jsxDRcYpWjttRZrUz07LbiY77gUOHcr4= +github.com/vektah/gqlparser/v2 v2.5.1/go.mod h1:mPgqFBu/woKTVYWyNk8cO3kh4S/f4aRFZrvOnp3hmCs= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= @@ -721,12 +732,18 @@ github.com/xdg/scram v1.0.3/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49 github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xdg/stringprep v1.0.3 h1:cmL5Enob4W83ti/ZHuZLuKD/xqJfus4fVPwE+/BDm+4= github.com/xdg/stringprep v1.0.3/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonpointer v0.0.0-20190809123943-df4f5c81cb3b/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c h1:3lbZUMbMiGUW/LMkfsEABsc5zNT9+b1CvsJx47JzJ8g= github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c/go.mod h1:UrdRz5enIKZ63MEE3IF9l2/ebyx59GyGgPi+tICQdmM= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= @@ -772,8 +789,9 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20201112155050-0c6587e931a9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e h1:gsTQYXdTw2Gq7RBsWvlQ91b+aEQ6bXFUngBGuR8sPpI= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 h1:7I4JAnoQBe7ZtJcBaYHi5UtiO8tQHbUSXxL+pnGRANg= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -801,8 +819,9 @@ golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCc golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -818,10 +837,10 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191116160921-f9c825593386/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= @@ -831,9 +850,13 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b h1:PxfKdU9lEEDYjdIzOtC4qFWgkU2rGHdKlKowJSMN9h0= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -847,8 +870,9 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -887,17 +911,24 @@ golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220405210540-1e041c57c461 h1:kHVeDEnfKn3T238CvrUcz6KeEsFHVaKh4kMTt6Wsysg= +golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220405210540-1e041c57c461/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab h1:2QkjZIsXupsJbJIdSjjUOgWK3aEtzyuh2mPt3l/CkeU= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -921,7 +952,6 @@ golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -931,7 +961,6 @@ golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190515012406-7d7faa4812bd/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= @@ -950,9 +979,10 @@ golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a h1:CB3a9Nez8M13wwlr/E2YtwoU+qYHKfC+JrDa45RXXoQ= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= +golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1014,10 +1044,13 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/cenkalti/backoff.v1 v1.1.0 h1:Arh75ttbsvlpVA7WtVpH4u9h6Zl46xuptxqLxPiSo4Y= +gopkg.in/cenkalti/backoff.v1 v1.1.0/go.mod h1:J6Vskwqd+OMVJl8C33mmtxTBs2gyzfv7UDAkHu8BrjI= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1043,8 +1076,10 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20190709130402-674ba3eaed22/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= @@ -1066,5 +1101,3 @@ rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8 rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= -sourcegraph.com/sourcegraph/appdash-data v0.0.0-20151005221446-73f23eafcf67/go.mod h1:L5q+DGLGOQFpo1snNEkLOJT2d1YTW66rWNzatr3He1k= diff --git a/repo-policy/main.tf b/repo-policy/main.tf index 6de0d8a40..5d22e9127 100644 --- a/repo-policy/main.tf +++ b/repo-policy/main.tf @@ -36,7 +36,7 @@ module "tyk-pump" { { branch = "master", reviewers = "2", convos = "false", - required_tests = ["1.15","Go 1.16 tests"]}, + required_tests = ["1.16","Go 1.16 tests"]}, { branch = "release-1.7", reviewers = "0", convos = "false", From f2770d82c4d05fd3c6470d3e4e1f710c647b15bb Mon Sep 17 00:00:00 2001 From: Esteban Ricardo Mirizio Date: Tue, 28 Mar 2023 09:23:59 -0300 Subject: [PATCH 056/102] automated push by gromit (#594) Co-authored-by: Gromit --- .github/workflows/release.yml | 14 ++++++++------ ci/Dockerfile.std | 4 ++-- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index d4e5809a3..2f37c9cab 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -1,6 +1,6 @@ # Generated by: gromit policy -# Generated on: Wed Dec 14 23:43:05 UTC 2022 +# Generated on: Thu Mar 23 17:02:26 UTC 2023 # Distribution channels covered by this workflow @@ -20,9 +20,6 @@ on: branches: - master - release-** - - integration/** - - feature/** - - perf/** tags: - 'v*' @@ -58,6 +55,10 @@ jobs: uses: actions/checkout@v3 with: fetch-depth: 1 + + - name: "Add Git safe.directory" + run: git config --global --add safe.directory $GITHUB_WORKSPACE + - uses: docker/setup-qemu-action@v2 - uses: docker/setup-buildx-action@v2 @@ -68,7 +69,6 @@ jobs: with: username: ${{ secrets.DOCKER_USERNAME }} password: ${{ secrets.DOCKER_PASSWORD }} - - name: Login to Cloudsmith if: startsWith(github.ref, 'refs/tags') uses: docker/login-action@v2 @@ -224,6 +224,7 @@ jobs: --title-link 'https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}' upgrade-deb: + if: startsWith(github.ref, 'refs/tags') && !github.event.pull_request.draft runs-on: ubuntu-latest needs: goreleaser strategy: @@ -270,6 +271,7 @@ jobs: push: false upgrade-rpm: + if: startsWith(github.ref, 'refs/tags') && !github.event.pull_request.draft needs: goreleaser runs-on: ubuntu-latest strategy: @@ -309,7 +311,7 @@ jobs: push: false smoke-tests: - if: startsWith(github.ref, 'refs/tags') + if: startsWith(github.ref, 'refs/tags') && !github.event.pull_request.draft needs: - goreleaser runs-on: ubuntu-latest diff --git a/ci/Dockerfile.std b/ci/Dockerfile.std index 29246abc6..08c448f1a 100644 --- a/ci/Dockerfile.std +++ b/ci/Dockerfile.std @@ -1,6 +1,6 @@ # Generated by: gromit policy -# Generated on: Wed Dec 14 23:43:05 UTC 2022 +# Generated on: Thu Mar 23 17:02:26 UTC 2023 FROM debian:bullseye-slim ARG TARGETARCH @@ -12,7 +12,7 @@ RUN apt-get update \ # Remove some things to decrease CVE surface -RUN apt-get remove -y --allow-remove-essential libtiff5 ncurses-base \ +RUN apt-get remove -y --allow-remove-essential --auto-remove curl libtiff5 ncurses-base \ && rm /usr/bin/passwd && rm /usr/sbin/adduser # Clean up caches, unwanted .a and .o files From 8458e1104f173334f46b9200daee42ef80c59e91 Mon Sep 17 00:00:00 2001 From: Matias <83959431+mativm02@users.noreply.github.com> Date: Wed, 12 Apr 2023 08:48:05 -0300 Subject: [PATCH 057/102] [TT-7952] Implementing storage library (#597) * implementing storage library * changes on mgo helper file * modifying doaggregatedwriting * refactoring tests * removing test prints * solving conflicts v2 * using a generic dbObject * linting * adding ID field to structs * refactoring upserts to mixed collection * fixing GenerateDemo func * TestAggregatedRecordTableName * fix GraphMongoPump_WriteData record ids * making id.ObjectID unexported in order to avoid embedded in graph records * fix TestWriteUptimeData * fix TestSerializer_Decode * switching default aggregate driver to mgo * removing json tag from unexported id field * refactor TestWriteUptimeData test * asserting msgpack err in TestWriteUptimeData * linting mongo_tests.go * adding TestAnalyticsRecord_GetFieldNames * adding TestAnalyticsRecord_GetLineValues * adding TestAggregatedRecord * adding uptime_data_test.go * adding TestAnalyticsRecordAggregate_generateBSONFromProperty * adding TestAnalyticsRecordAggregate_generateSetterForTime * uptime tests * adding TestAnalyticsRecordAggregate_latencySetter * adding TestAnalyticsRecordAggregate_AsChange + linting mongo_selective_test.go + pointer in DoAggregatedWriting * adding TestAnalyticsRecordAggregate_AsChange + sorting AnalyticsRecordAggregate.ErrorList so we can test * fumpting * fixing indexes * fixing code smells * reducing cognitive complexity of AccumulateSet functions * fixing failing test * formatting * removing autogenerated random file * fixing code smells * using different collection names * uploading analytics file * using default driver from library instead of hardcoding it * fixing tests * improving CI test * removing commented test --------- Co-authored-by: tbuchaillot --- .github/workflows/ci-test.yml | 9 +- analytics/aggregate.go | 163 ++++--- analytics/aggregate_test.go | 800 ++++++++++++++++++++++++++++++++- analytics/analytics.go | 19 +- analytics/analytics_test.go | 88 ++++ analytics/graph_record.go | 15 + analytics/graph_record_test.go | 2 +- analytics/uptime_data.go | 45 +- analytics/uptime_data_test.go | 377 ++++++++++++++++ bin/ci-test.sh | 25 +- go.mod | 3 +- go.sum | 30 +- pumps/graph_mongo.go | 27 +- pumps/graph_mongo_test.go | 23 +- pumps/mgo_helper_test.go | 117 +++-- pumps/mongo.go | 463 +++++++------------ pumps/mongo_aggregate.go | 266 ++++------- pumps/mongo_aggregate_test.go | 180 ++------ pumps/mongo_selective.go | 354 +++++++-------- pumps/mongo_selective_test.go | 266 ++++++++++- pumps/mongo_test.go | 149 ++++-- serializer/golanglint.xml | 4 + serializer/serializer_test.go | 4 +- 23 files changed, 2402 insertions(+), 1027 deletions(-) create mode 100644 analytics/uptime_data_test.go create mode 100644 serializer/golanglint.xml diff --git a/.github/workflows/ci-test.yml b/.github/workflows/ci-test.yml index d9b6d96d6..3094c3635 100644 --- a/.github/workflows/ci-test.yml +++ b/.github/workflows/ci-test.yml @@ -1,9 +1,9 @@ name: CI tests -on: +on: pull_request: push: - branches: + branches: - master env: @@ -38,7 +38,7 @@ jobs: id: hygiene run: | go install golang.org/x/tools/cmd/goimports@latest - + - name: Fetch base branch if: ${{ github.event_name == 'pull_request' }} run: git fetch origin ${{ github.base_ref }} @@ -51,7 +51,7 @@ jobs: - name: Start MongoDB uses: supercharge/mongodb-github-action@1.2.0 with: - mongodb-version: '${{ matrix.mongodb-version }}' + mongodb-version: "${{ matrix.mongodb-version }}" - name: Cache uses: actions/cache@v3 @@ -81,4 +81,3 @@ jobs: if: ${{ github.event_name == 'push' }} run: | $(go env GOPATH)/bin/golangci-lint run --out-format checkstyle --timeout=300s --max-issues-per-linter=0 --max-same-issues=0 --issues-exit-code=0 ./... > golanglint.xml - diff --git a/analytics/aggregate.go b/analytics/aggregate.go index 7080712c6..8b17ba9e6 100644 --- a/analytics/aggregate.go +++ b/analytics/aggregate.go @@ -4,14 +4,16 @@ import ( b64 "encoding/base64" "encoding/hex" "fmt" + "sort" "strconv" "strings" "sync" "time" + "github.com/TykTechnologies/storage/persistent/dbm" + "github.com/TykTechnologies/storage/persistent/id" "github.com/fatih/structs" "github.com/sirupsen/logrus" - "gopkg.in/mgo.v2/bson" "gorm.io/gorm" ) @@ -70,7 +72,21 @@ type GraphRecordAggregate struct { RootFields map[string]*Counter } +type AggregateFieldList struct { + APIKeys []Counter + APIID []Counter + OauthIDs []Counter + Geo []Counter + Tags []Counter + Errors []Counter + Endpoints []Counter + KeyEndpoint map[string][]Counter `bson:"keyendpoints"` + OauthEndpoint map[string][]Counter `bson:"oauthendpoints"` + APIEndpoint []Counter `bson:"apiendpoints"` +} + type AnalyticsRecordAggregate struct { + id id.ObjectId `bson:"_id" gorm:"-:all"` TimeStamp time.Time OrgID string TimeID struct { @@ -91,18 +107,7 @@ type AnalyticsRecordAggregate struct { Endpoints map[string]*Counter - Lists struct { - APIKeys []Counter - APIID []Counter - OauthIDs []Counter - Geo []Counter - Tags []Counter - Errors []Counter - Endpoints []Counter - KeyEndpoint map[string][]Counter `bson:"keyendpoints"` - OauthEndpoint map[string][]Counter `bson:"oauthendpoints"` - APIEndpoint []Counter `bson:"apiendpoints"` - } + Lists AggregateFieldList KeyEndpoint map[string]map[string]*Counter `bson:"keyendpoints"` OauthEndpoint map[string]map[string]*Counter `bson:"oauthendpoints"` @@ -112,6 +117,22 @@ type AnalyticsRecordAggregate struct { ExpireAt time.Time `bson:"expireAt" json:"expireAt"` LastTime time.Time + Mixed bool `bson:"-" json:"-"` +} + +func (f *AnalyticsRecordAggregate) TableName() string { + if f.Mixed { + return AgggregateMixedCollectionName + } + return "z_tyk_analyticz_aggregate_" + f.OrgID +} + +func (f *AnalyticsRecordAggregate) GetObjectID() id.ObjectId { + return f.id +} + +func (f *AnalyticsRecordAggregate) SetObjectID(id id.ObjectId) { + f.id = id } type SQLAnalyticsRecordAggregate struct { @@ -168,7 +189,7 @@ func (f *SQLAnalyticsRecordAggregate) TableName() string { return AggregateSQLTable } -func OnConflictAssignments(tableName string, tempTable string) map[string]interface{} { +func OnConflictAssignments(tableName, tempTable string) map[string]interface{} { assignments := make(map[string]interface{}) f := SQLAnalyticsRecordAggregate{} baseFields := structs.Fields(f.Code) @@ -254,53 +275,53 @@ func (f AnalyticsRecordAggregate) New() AnalyticsRecordAggregate { return thisF } -func (f *AnalyticsRecordAggregate) generateBSONFromProperty(parent, thisUnit string, incVal *Counter, newUpdate bson.M) bson.M { +func (f *AnalyticsRecordAggregate) generateBSONFromProperty(parent, thisUnit string, incVal *Counter, newUpdate dbm.DBM) dbm.DBM { constructor := parent + "." + thisUnit + "." if parent == "" { constructor = thisUnit + "." } - newUpdate["$inc"].(bson.M)[constructor+"hits"] = incVal.Hits - newUpdate["$inc"].(bson.M)[constructor+"success"] = incVal.Success - newUpdate["$inc"].(bson.M)[constructor+"errortotal"] = incVal.ErrorTotal + newUpdate["$inc"].(dbm.DBM)[constructor+"hits"] = incVal.Hits + newUpdate["$inc"].(dbm.DBM)[constructor+"success"] = incVal.Success + newUpdate["$inc"].(dbm.DBM)[constructor+"errortotal"] = incVal.ErrorTotal for k, v := range incVal.ErrorMap { - newUpdate["$inc"].(bson.M)[constructor+"errormap."+k] = v - } - newUpdate["$inc"].(bson.M)[constructor+"totalrequesttime"] = incVal.TotalRequestTime - newUpdate["$set"].(bson.M)[constructor+"identifier"] = incVal.Identifier - newUpdate["$set"].(bson.M)[constructor+"humanidentifier"] = incVal.HumanIdentifier - newUpdate["$set"].(bson.M)[constructor+"lasttime"] = incVal.LastTime - newUpdate["$set"].(bson.M)[constructor+"openconnections"] = incVal.OpenConnections - newUpdate["$set"].(bson.M)[constructor+"closedconnections"] = incVal.ClosedConnections - newUpdate["$set"].(bson.M)[constructor+"bytesin"] = incVal.BytesIn - newUpdate["$set"].(bson.M)[constructor+"bytesout"] = incVal.BytesOut - newUpdate["$max"].(bson.M)[constructor+"maxlatency"] = incVal.MaxLatency + newUpdate["$inc"].(dbm.DBM)[constructor+"errormap."+k] = v + } + newUpdate["$inc"].(dbm.DBM)[constructor+"totalrequesttime"] = incVal.TotalRequestTime + newUpdate["$set"].(dbm.DBM)[constructor+"identifier"] = incVal.Identifier + newUpdate["$set"].(dbm.DBM)[constructor+"humanidentifier"] = incVal.HumanIdentifier + newUpdate["$set"].(dbm.DBM)[constructor+"lasttime"] = incVal.LastTime + newUpdate["$set"].(dbm.DBM)[constructor+"openconnections"] = incVal.OpenConnections + newUpdate["$set"].(dbm.DBM)[constructor+"closedconnections"] = incVal.ClosedConnections + newUpdate["$set"].(dbm.DBM)[constructor+"bytesin"] = incVal.BytesIn + newUpdate["$set"].(dbm.DBM)[constructor+"bytesout"] = incVal.BytesOut + newUpdate["$max"].(dbm.DBM)[constructor+"maxlatency"] = incVal.MaxLatency // Don't update min latency in case of errors if incVal.Hits != incVal.ErrorTotal { if newUpdate["$min"] == nil { - newUpdate["$min"] = bson.M{} + newUpdate["$min"] = dbm.DBM{} } - newUpdate["$min"].(bson.M)[constructor+"minlatency"] = incVal.MinLatency - newUpdate["$min"].(bson.M)[constructor+"minupstreamlatency"] = incVal.MinUpstreamLatency + newUpdate["$min"].(dbm.DBM)[constructor+"minlatency"] = incVal.MinLatency + newUpdate["$min"].(dbm.DBM)[constructor+"minupstreamlatency"] = incVal.MinUpstreamLatency } - newUpdate["$max"].(bson.M)[constructor+"maxupstreamlatency"] = incVal.MaxUpstreamLatency - newUpdate["$inc"].(bson.M)[constructor+"totalupstreamlatency"] = incVal.TotalUpstreamLatency - newUpdate["$inc"].(bson.M)[constructor+"totallatency"] = incVal.TotalLatency + newUpdate["$max"].(dbm.DBM)[constructor+"maxupstreamlatency"] = incVal.MaxUpstreamLatency + newUpdate["$inc"].(dbm.DBM)[constructor+"totalupstreamlatency"] = incVal.TotalUpstreamLatency + newUpdate["$inc"].(dbm.DBM)[constructor+"totallatency"] = incVal.TotalLatency return newUpdate } -func (f *AnalyticsRecordAggregate) generateSetterForTime(parent, thisUnit string, realTime float64, newUpdate bson.M) bson.M { +func (f *AnalyticsRecordAggregate) generateSetterForTime(parent, thisUnit string, realTime float64, newUpdate dbm.DBM) dbm.DBM { constructor := parent + "." + thisUnit + "." if parent == "" { constructor = thisUnit + "." } - newUpdate["$set"].(bson.M)[constructor+"requesttime"] = realTime + newUpdate["$set"].(dbm.DBM)[constructor+"requesttime"] = realTime return newUpdate } -func (f *AnalyticsRecordAggregate) latencySetter(parent, thisUnit string, newUpdate bson.M, counter *Counter) bson.M { +func (f *AnalyticsRecordAggregate) latencySetter(parent, thisUnit string, newUpdate dbm.DBM, counter *Counter) dbm.DBM { if counter.Hits > 0 { counter.Latency = float64(counter.TotalLatency) / float64(counter.Hits) counter.UpstreamLatency = float64(counter.TotalUpstreamLatency) / float64(counter.Hits) @@ -313,8 +334,8 @@ func (f *AnalyticsRecordAggregate) latencySetter(parent, thisUnit string, newUpd if parent == "" { constructor = thisUnit + "." } - newUpdate["$set"].(bson.M)[constructor+"latency"] = counter.Latency - newUpdate["$set"].(bson.M)[constructor+"upstreamlatency"] = counter.UpstreamLatency + newUpdate["$set"].(dbm.DBM)[constructor+"latency"] = counter.Latency + newUpdate["$set"].(dbm.DBM)[constructor+"upstreamlatency"] = counter.UpstreamLatency return newUpdate } @@ -408,11 +429,11 @@ func (f *AnalyticsRecordAggregate) Dimensions() (dimensions []Dimension) { return } -func (f *AnalyticsRecordAggregate) AsChange() (newUpdate bson.M) { - newUpdate = bson.M{ - "$inc": bson.M{}, - "$set": bson.M{}, - "$max": bson.M{}, +func (f *AnalyticsRecordAggregate) AsChange() (newUpdate dbm.DBM) { + newUpdate = dbm.DBM{ + "$inc": dbm.DBM{}, + "$set": dbm.DBM{}, + "$max": dbm.DBM{}, } for _, d := range f.Dimensions() { @@ -423,18 +444,18 @@ func (f *AnalyticsRecordAggregate) AsChange() (newUpdate bson.M) { asTime := f.TimeStamp newTime := time.Date(asTime.Year(), asTime.Month(), asTime.Day(), asTime.Hour(), asTime.Minute(), 0, 0, asTime.Location()) - newUpdate["$set"].(bson.M)["timestamp"] = newTime - newUpdate["$set"].(bson.M)["expireAt"] = f.ExpireAt - newUpdate["$set"].(bson.M)["timeid.year"] = newTime.Year() - newUpdate["$set"].(bson.M)["timeid.month"] = newTime.Month() - newUpdate["$set"].(bson.M)["timeid.day"] = newTime.Day() - newUpdate["$set"].(bson.M)["timeid.hour"] = newTime.Hour() - newUpdate["$set"].(bson.M)["lasttime"] = f.LastTime + newUpdate["$set"].(dbm.DBM)["timestamp"] = newTime + newUpdate["$set"].(dbm.DBM)["expireAt"] = f.ExpireAt + newUpdate["$set"].(dbm.DBM)["timeid.year"] = newTime.Year() + newUpdate["$set"].(dbm.DBM)["timeid.month"] = newTime.Month() + newUpdate["$set"].(dbm.DBM)["timeid.day"] = newTime.Day() + newUpdate["$set"].(dbm.DBM)["timeid.hour"] = newTime.Hour() + newUpdate["$set"].(dbm.DBM)["lasttime"] = f.LastTime return newUpdate } -func (f *AnalyticsRecordAggregate) SetErrorList(parent, thisUnit string, counter *Counter, newUpdate bson.M) { +func (f *AnalyticsRecordAggregate) SetErrorList(parent, thisUnit string, counter *Counter, newUpdate dbm.DBM) { constructor := parent + "." + thisUnit + "." if parent == "" { constructor = thisUnit + "." @@ -449,12 +470,16 @@ func (f *AnalyticsRecordAggregate) SetErrorList(parent, thisUnit string, counter } errorlist = append(errorlist, element) } + sort.SliceStable(errorlist, func(i, j int) bool { + return errorlist[i].Code < errorlist[j].Code + }) + counter.ErrorList = errorlist - newUpdate["$set"].(bson.M)[constructor+"errorlist"] = counter.ErrorList + newUpdate["$set"].(dbm.DBM)[constructor+"errorlist"] = counter.ErrorList } -func (f *AnalyticsRecordAggregate) getRecords(fieldName string, data map[string]*Counter, newUpdate bson.M) []Counter { +func (f *AnalyticsRecordAggregate) getRecords(fieldName string, data map[string]*Counter, newUpdate dbm.DBM) []Counter { result := make([]Counter, 0) for thisUnit, incVal := range data { @@ -472,41 +497,41 @@ func (f *AnalyticsRecordAggregate) getRecords(fieldName string, data map[string] return result } -func (f *AnalyticsRecordAggregate) AsTimeUpdate() bson.M { - newUpdate := bson.M{ - "$set": bson.M{}, +func (f *AnalyticsRecordAggregate) AsTimeUpdate() dbm.DBM { + newUpdate := dbm.DBM{ + "$set": dbm.DBM{}, } // We need to create lists of API data so that we can aggregate across the list // in order to present top-20 style lists of APIs, Tokens etc. // apis := make([]Counter, 0) - newUpdate["$set"].(bson.M)["lists.apiid"] = f.getRecords("apiid", f.APIID, newUpdate) + newUpdate["$set"].(dbm.DBM)["lists.apiid"] = f.getRecords("apiid", f.APIID, newUpdate) - newUpdate["$set"].(bson.M)["lists.errors"] = f.getRecords("errors", f.Errors, newUpdate) + newUpdate["$set"].(dbm.DBM)["lists.errors"] = f.getRecords("errors", f.Errors, newUpdate) - newUpdate["$set"].(bson.M)["lists.versions"] = f.getRecords("versions", f.Versions, newUpdate) + newUpdate["$set"].(dbm.DBM)["lists.versions"] = f.getRecords("versions", f.Versions, newUpdate) - newUpdate["$set"].(bson.M)["lists.apikeys"] = f.getRecords("apikeys", f.APIKeys, newUpdate) + newUpdate["$set"].(dbm.DBM)["lists.apikeys"] = f.getRecords("apikeys", f.APIKeys, newUpdate) - newUpdate["$set"].(bson.M)["lists.oauthids"] = f.getRecords("oauthids", f.OauthIDs, newUpdate) + newUpdate["$set"].(dbm.DBM)["lists.oauthids"] = f.getRecords("oauthids", f.OauthIDs, newUpdate) - newUpdate["$set"].(bson.M)["lists.geo"] = f.getRecords("geo", f.Geo, newUpdate) + newUpdate["$set"].(dbm.DBM)["lists.geo"] = f.getRecords("geo", f.Geo, newUpdate) - newUpdate["$set"].(bson.M)["lists.tags"] = f.getRecords("tags", f.Tags, newUpdate) + newUpdate["$set"].(dbm.DBM)["lists.tags"] = f.getRecords("tags", f.Tags, newUpdate) - newUpdate["$set"].(bson.M)["lists.endpoints"] = f.getRecords("endpoints", f.Endpoints, newUpdate) + newUpdate["$set"].(dbm.DBM)["lists.endpoints"] = f.getRecords("endpoints", f.Endpoints, newUpdate) for thisUnit, incVal := range f.KeyEndpoint { parent := "lists.keyendpoints." + thisUnit - newUpdate["$set"].(bson.M)[parent] = f.getRecords("keyendpoints."+thisUnit, incVal, newUpdate) + newUpdate["$set"].(dbm.DBM)[parent] = f.getRecords("keyendpoints."+thisUnit, incVal, newUpdate) } for thisUnit, incVal := range f.OauthEndpoint { parent := "lists.oauthendpoints." + thisUnit - newUpdate["$set"].(bson.M)[parent] = f.getRecords("oauthendpoints."+thisUnit, incVal, newUpdate) + newUpdate["$set"].(dbm.DBM)[parent] = f.getRecords("oauthendpoints."+thisUnit, incVal, newUpdate) } - newUpdate["$set"].(bson.M)["lists.apiendpoints"] = f.getRecords("apiendpoints", f.ApiEndpoint, newUpdate) + newUpdate["$set"].(dbm.DBM)["lists.apiendpoints"] = f.getRecords("apiendpoints", f.ApiEndpoint, newUpdate) var newTime float64 diff --git a/analytics/aggregate_test.go b/analytics/aggregate_test.go index 1086db310..d868a2ad4 100644 --- a/analytics/aggregate_test.go +++ b/analytics/aggregate_test.go @@ -6,6 +6,9 @@ import ( "testing" "time" + "github.com/TykTechnologies/storage/persistent/dbm" + "github.com/TykTechnologies/storage/persistent/id" + "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/require" "github.com/stretchr/testify/assert" @@ -342,7 +345,7 @@ func TestAggregateGraphData_Dimension(t *testing.T) { } func TestAggregateData_SkipGraphRecords(t *testing.T) { - run := func(records []AnalyticsRecord, expectedAggregatedRecordCount int, expectedExistingOrgKeys []string, expectedNonExistingOrgKeys []string) func(t *testing.T) { + run := func(records []AnalyticsRecord, expectedAggregatedRecordCount int, expectedExistingOrgKeys, expectedNonExistingOrgKeys []string) func(t *testing.T) { return func(t *testing.T) { data := make([]interface{}, len(records)) for i := range records { @@ -438,3 +441,798 @@ func TestSetAggregateTimestamp(t *testing.T) { ts := setAggregateTimestamp("testing-setLastTimestamp", asTime, 7) assert.Equal(t, time.Date(asTime.Year(), asTime.Month(), asTime.Day(), asTime.Hour(), asTime.Minute(), 0, 0, asTime.Location()), ts) } + +func TestAggregatedRecord_TableName(t *testing.T) { + tcs := []struct { + testName string + givenRecord AnalyticsRecordAggregate + expectedTableName string + }{ + { + testName: "should return table name with org id", + givenRecord: AnalyticsRecordAggregate{ + OrgID: "123", + Mixed: true, + }, + expectedTableName: AgggregateMixedCollectionName, + }, + { + testName: "should return table name with org id", + givenRecord: AnalyticsRecordAggregate{ + OrgID: "123", + Mixed: false, + }, + expectedTableName: "z_tyk_analyticz_aggregate_123", + }, + } + + for _, tc := range tcs { + t.Run(tc.testName, func(t *testing.T) { + assert.Equal(t, tc.expectedTableName, tc.givenRecord.TableName()) + }) + } +} + +func TestAggregatedRecord_GetObjectID(t *testing.T) { + t.Run("should return the ID field", func(t *testing.T) { + id := id.NewObjectID() + record := AnalyticsRecordAggregate{ + id: id, + } + assert.Equal(t, id, record.GetObjectID()) + }) +} + +func TestAggregatedRecord_SetObjectID(t *testing.T) { + t.Run("should set the ID field", func(t *testing.T) { + id := id.NewObjectID() + record := AnalyticsRecordAggregate{} + record.SetObjectID(id) + assert.Equal(t, id, record.id) + }) +} + +func TestSQLAnalyticsRecordAggregate_TableName(t *testing.T) { + t.Run("should return the SQL table name", func(t *testing.T) { + record := SQLAnalyticsRecordAggregate{} + assert.Equal(t, AggregateSQLTable, record.TableName()) + }) +} + +func TestAnalyticsRecordAggregate_generateBSONFromProperty(t *testing.T) { + currentTime := time.Date(2023, 0o4, 0o4, 10, 0, 0, 0, time.UTC) + + tcs := []struct { + givenCounter *Counter + expected dbm.DBM + + testName string + givenName string + givenValue string + }{ + { + testName: "success counter", + givenCounter: &Counter{ + Hits: 2, + TotalRequestTime: 100, + Success: 1, + ErrorTotal: 0, + RequestTime: 100, + TotalUpstreamLatency: 20, + MaxLatency: 100, + MaxUpstreamLatency: 110, + MinUpstreamLatency: 10, + MinLatency: 20, + TotalLatency: 150, + Identifier: "", + HumanIdentifier: "", + ErrorMap: map[string]int{"200": 1}, + LastTime: currentTime, + }, + givenName: "test", + givenValue: "total", + expected: dbm.DBM{ + "$set": dbm.DBM{ + "test.total.bytesin": int64(0), + "test.total.bytesout": int64(0), + "test.total.humanidentifier": "", + "test.total.identifier": "", + "test.total.lasttime": currentTime, + "test.total.openconnections": int64(0), + "test.total.closedconnections": int64(0), + }, + "$inc": dbm.DBM{ + "test.total.errormap.200": int(1), + "test.total.errortotal": int(0), + "test.total.hits": int(2), + "test.total.success": int(1), + "test.total.totallatency": int64(150), + "test.total.totalrequesttime": float64(100), + "test.total.totalupstreamlatency": int64(20), + }, + "$max": dbm.DBM{ + "test.total.maxlatency": int64(100), + "test.total.maxupstreamlatency": int64(110), + }, + "$min": dbm.DBM{ + "test.total.minlatency": int64(20), + "test.total.minupstreamlatency": int64(10), + }, + }, + }, + { + testName: "error counter", + givenCounter: &Counter{ + Hits: 2, + TotalRequestTime: 100, + Success: 0, + ErrorTotal: 2, + RequestTime: 100, + TotalUpstreamLatency: 20, + MaxLatency: 100, + MaxUpstreamLatency: 110, + MinUpstreamLatency: 10, + MinLatency: 20, + TotalLatency: 150, + Identifier: "test", + HumanIdentifier: "", + ErrorMap: map[string]int{"500": 2}, + LastTime: currentTime, + }, + givenName: "test", + givenValue: "total", + expected: dbm.DBM{ + "$set": dbm.DBM{ + "test.total.bytesin": int64(0), + "test.total.bytesout": int64(0), + "test.total.humanidentifier": "", + "test.total.identifier": "test", + "test.total.lasttime": currentTime, + "test.total.openconnections": int64(0), + "test.total.closedconnections": int64(0), + }, + "$inc": dbm.DBM{ + "test.total.errormap.500": int(2), + "test.total.errortotal": int(2), + "test.total.hits": int(2), + "test.total.success": int(0), + "test.total.totallatency": int64(150), + "test.total.totalrequesttime": float64(100), + "test.total.totalupstreamlatency": int64(20), + }, + "$max": dbm.DBM{ + "test.total.maxlatency": int64(100), + "test.total.maxupstreamlatency": int64(110), + }, + "$min": dbm.DBM{}, // we don't update mins on case of full error counter + }, + }, + + { + testName: "without name", + givenCounter: &Counter{ + Hits: 2, + TotalRequestTime: 100, + Success: 0, + ErrorTotal: 2, + RequestTime: 100, + TotalUpstreamLatency: 20, + MaxLatency: 100, + MaxUpstreamLatency: 110, + MinUpstreamLatency: 10, + MinLatency: 20, + TotalLatency: 150, + Identifier: "test", + HumanIdentifier: "", + ErrorMap: map[string]int{"500": 2}, + LastTime: currentTime, + }, + givenName: "", + givenValue: "noname", + expected: dbm.DBM{ + "$set": dbm.DBM{ + "noname.bytesin": int64(0), + "noname.bytesout": int64(0), + "noname.humanidentifier": "", + "noname.identifier": "test", + "noname.lasttime": currentTime, + "noname.openconnections": int64(0), + "noname.closedconnections": int64(0), + }, + "$inc": dbm.DBM{ + "noname.errormap.500": int(2), + "noname.errortotal": int(2), + "noname.hits": int(2), + "noname.success": int(0), + "noname.totallatency": int64(150), + "noname.totalrequesttime": float64(100), + "noname.totalupstreamlatency": int64(20), + }, + "$max": dbm.DBM{ + "noname.maxlatency": int64(100), + "noname.maxupstreamlatency": int64(110), + }, + "$min": dbm.DBM{}, // we don't update mins on case of full error counter + }, + }, + } + + for _, tc := range tcs { + t.Run(tc.testName, func(t *testing.T) { + aggregate := &AnalyticsRecordAggregate{} + + baseDBM := dbm.DBM{ + "$set": dbm.DBM{}, + "$inc": dbm.DBM{}, + "$max": dbm.DBM{}, + "$min": dbm.DBM{}, + } + + actual := aggregate.generateBSONFromProperty(tc.givenName, tc.givenValue, tc.givenCounter, baseDBM) + if !cmp.Equal(tc.expected, actual) { + t.Errorf("AggregateUptimeData() mismatch (-want +got):\n%s", cmp.Diff(tc.expected, actual)) + } + }) + } +} + +func TestAnalyticsRecordAggregate_generateSetterForTime(t *testing.T) { + tcs := []struct { + expected dbm.DBM + + testName string + givenName string + givenValue string + givenRequestTime float64 + }{ + { + testName: "with name", + givenName: "test", + givenValue: "total", + givenRequestTime: 100, + expected: dbm.DBM{ + "$set": dbm.DBM{ + "test.total.requesttime": float64(100), + }, + }, + }, + { + testName: "without name", + givenName: "", + givenValue: "noname", + givenRequestTime: 130, + expected: dbm.DBM{ + "$set": dbm.DBM{ + "noname.requesttime": float64(130), + }, + }, + }, + } + + for _, tc := range tcs { + t.Run(tc.testName, func(t *testing.T) { + aggregate := &AnalyticsRecordAggregate{} + + baseDBM := dbm.DBM{ + "$set": dbm.DBM{}, + } + + actual := aggregate.generateSetterForTime(tc.givenName, tc.givenValue, tc.givenRequestTime, baseDBM) + if !cmp.Equal(tc.expected, actual) { + t.Errorf("AggregateUptimeData() mismatch (-want +got):\n%s", cmp.Diff(tc.expected, actual)) + } + }) + } +} + +func TestAnalyticsRecordAggregate_latencySetter(t *testing.T) { + tcs := []struct { + givenCounter *Counter + expected dbm.DBM + + testName string + givenName string + givenValue string + }{ + { + testName: "with name and hits", + givenCounter: &Counter{ + Hits: 2, + TotalLatency: 100, + TotalUpstreamLatency: 200, + }, + givenName: "test", + givenValue: "total", + expected: dbm.DBM{ + "$set": dbm.DBM{ + "test.total.latency": float64(50), + "test.total.upstreamlatency": float64(100), + }, + }, + }, + { + testName: "without name and with hits", + givenCounter: &Counter{ + Hits: 2, + TotalLatency: 200, + TotalUpstreamLatency: 400, + }, + givenName: "", + givenValue: "noname", + expected: dbm.DBM{ + "$set": dbm.DBM{ + "noname.latency": float64(100), + "noname.upstreamlatency": float64(200), + }, + }, + }, + + { + testName: "without name and without hits", + givenCounter: &Counter{ + Hits: 0, + TotalLatency: 200, + TotalUpstreamLatency: 400, + }, + givenName: "", + givenValue: "noname", + expected: dbm.DBM{ + "$set": dbm.DBM{ + "noname.latency": float64(0), + "noname.upstreamlatency": float64(0), + }, + }, + }, + } + + for _, tc := range tcs { + t.Run(tc.testName, func(t *testing.T) { + aggregate := &AnalyticsRecordAggregate{} + + baseDBM := dbm.DBM{ + "$set": dbm.DBM{}, + } + + actual := aggregate.latencySetter(tc.givenName, tc.givenValue, baseDBM, tc.givenCounter) + if !cmp.Equal(tc.expected, actual) { + t.Errorf("AggregateUptimeData() mismatch (-want +got):\n%s", cmp.Diff(tc.expected, actual)) + } + }) + } +} + +func TestAnalyticsRecordAggregate_AsChange(t *testing.T) { + currentTime := time.Date(2023, 0o4, 0o4, 10, 0, 0, 0, time.UTC) + + tcs := []struct { + given *AnalyticsRecordAggregate + expected dbm.DBM + testName string + }{ + { + testName: "aggregate with versions - no errors", + given: &AnalyticsRecordAggregate{ + OrgID: "testorg", + TimeID: struct { + Year int + Month int + Day int + Hour int + }{ + Year: currentTime.Year(), + Month: int(currentTime.Month()), + Day: currentTime.Day(), + Hour: currentTime.Hour(), + }, + Versions: map[string]*Counter{ + "v1": { + Hits: 1, + Success: 1, + TotalLatency: 100, + TotalUpstreamLatency: 200, + TotalRequestTime: 200, + MinUpstreamLatency: 20, + MinLatency: 10, + MaxUpstreamLatency: 100, + MaxLatency: 100, + LastTime: currentTime, + }, + "v2": { + Hits: 1, + Success: 1, + TotalLatency: 100, + TotalUpstreamLatency: 200, + TotalRequestTime: 200, + MinUpstreamLatency: 20, + MinLatency: 10, + MaxUpstreamLatency: 100, + MaxLatency: 100, + LastTime: currentTime, + }, + }, + Total: Counter{ + Hits: 2, + Success: 2, + TotalLatency: 200, + TotalRequestTime: 200, + MaxUpstreamLatency: 100, + MaxLatency: 100, + MinUpstreamLatency: 20, + MinLatency: 10, + TotalUpstreamLatency: 400, + LastTime: currentTime, + }, + Errors: map[string]*Counter{}, + LastTime: currentTime, + TimeStamp: currentTime, + ExpireAt: currentTime, + }, + expected: dbm.DBM{ + "$inc": dbm.DBM{ + "total.hits": int(2), + "total.success": int(2), + "total.errortotal": int(0), + "total.totallatency": int64(200), + "total.totalupstreamlatency": int64(400), + "total.totalrequesttime": float64(200), + "versions.v1.errortotal": int(0), + "versions.v1.hits": int(1), + "versions.v1.success": int(1), + "versions.v1.totallatency": int64(100), + "versions.v1.totalrequesttime": float64(200), + "versions.v1.totalupstreamlatency": int64(200), + "versions.v2.errortotal": int(0), + "versions.v2.hits": int(1), + "versions.v2.success": int(1), + "versions.v2.totallatency": int64(100), + "versions.v2.totalrequesttime": float64(200), + "versions.v2.totalupstreamlatency": int64(200), + }, + "$min": dbm.DBM{ + "total.minlatency": int64(10), + "total.minupstreamlatency": int64(20), + "versions.v1.minlatency": int64(10), + "versions.v1.minupstreamlatency": int64(20), + "versions.v2.minlatency": int64(10), + "versions.v2.minupstreamlatency": int64(20), + }, + "$max": dbm.DBM{ + "total.maxlatency": int64(100), + "total.maxupstreamlatency": int64(100), + "versions.v1.maxlatency": int64(100), + "versions.v1.maxupstreamlatency": int64(100), + "versions.v2.maxlatency": int64(100), + "versions.v2.maxupstreamlatency": int64(100), + }, + "$set": dbm.DBM{ + "expireAt": currentTime, + "lasttime": currentTime, + "timestamp": currentTime, + "total.lasttime": currentTime, + "timeid.day": currentTime.Day(), + "timeid.hour": currentTime.Hour(), + "timeid.month": currentTime.Month(), + "timeid.year": currentTime.Year(), + "total.bytesin": int64(0), + "total.bytesout": int64(0), + "total.closedconnections": int64(0), + "total.openconnections": int64(0), + "total.humanidentifier": "", + "total.identifier": "", + "versions.v1.bytesin": int64(0), + "versions.v1.bytesout": int64(0), + "versions.v1.lasttime": currentTime, + "versions.v1.humanidentifier": "", + "versions.v1.identifier": "", + "versions.v1.closedconnections": int64(0), + "versions.v1.openconnections": int64(0), + "versions.v2.bytesin": int64(0), + "versions.v2.bytesout": int64(0), + "versions.v2.lasttime": currentTime, + "versions.v2.humanidentifier": "", + "versions.v2.identifier": "", + "versions.v2.closedconnections": int64(0), + "versions.v2.openconnections": int64(0), + }, + }, + }, + { + testName: "aggregate with apiid - with errors", + given: &AnalyticsRecordAggregate{ + OrgID: "testorg", + TimeID: struct { + Year int + Month int + Day int + Hour int + }{ + Year: currentTime.Year(), + Month: int(currentTime.Month()), + Day: currentTime.Day(), + Hour: currentTime.Hour(), + }, + APIID: map[string]*Counter{ + "api1": { + Hits: 3, + Success: 0, + ErrorTotal: 3, + TotalLatency: 100, + TotalUpstreamLatency: 200, + TotalRequestTime: 200, + MinUpstreamLatency: 20, + MinLatency: 10, + MaxUpstreamLatency: 100, + MaxLatency: 100, + ErrorMap: map[string]int{"404": 1, "500": 2}, + ErrorList: []ErrorData{{Code: "404", Count: 1}, {Code: "500", Count: 2}}, + LastTime: currentTime, + }, + "api2": { + Hits: 1, + Success: 1, + TotalLatency: 100, + TotalUpstreamLatency: 200, + TotalRequestTime: 200, + MinUpstreamLatency: 20, + MinLatency: 10, + MaxUpstreamLatency: 100, + MaxLatency: 100, + LastTime: currentTime, + }, + }, + Total: Counter{ + Hits: 4, + Success: 1, + ErrorTotal: 3, + TotalLatency: 200, + TotalRequestTime: 200, + MaxUpstreamLatency: 100, + MaxLatency: 100, + MinUpstreamLatency: 20, + MinLatency: 10, + TotalUpstreamLatency: 400, + ErrorMap: map[string]int{"404": 1, "500": 2}, + ErrorList: []ErrorData{{Code: "404", Count: 1}, {Code: "500", Count: 2}}, + LastTime: currentTime, + }, + Errors: map[string]*Counter{}, + LastTime: currentTime, + TimeStamp: currentTime, + ExpireAt: currentTime, + }, + expected: dbm.DBM{ + "$inc": dbm.DBM{ + "total.hits": int(4), + "total.success": int(1), + "total.errortotal": int(3), + "total.totallatency": int64(200), + "total.totalupstreamlatency": int64(400), + "total.totalrequesttime": float64(200), + "total.errormap.404": int(1), + "total.errormap.500": int(2), + "apiid.api1.hits": int(3), + "apiid.api1.success": int(0), + "apiid.api1.errortotal": int(3), + "apiid.api1.totallatency": int64(100), + "apiid.api1.totalupstreamlatency": int64(200), + "apiid.api1.totalrequesttime": float64(200), + "apiid.api1.errormap.404": int(1), + "apiid.api1.errormap.500": int(2), + "apiid.api2.hits": int(1), + "apiid.api2.success": int(1), + "apiid.api2.totallatency": int64(100), + "apiid.api2.totalupstreamlatency": int64(200), + "apiid.api2.totalrequesttime": float64(200), + "apiid.api2.errortotal": int(0), + }, + "$min": dbm.DBM{ + "total.minlatency": int64(10), + "total.minupstreamlatency": int64(20), + "apiid.api2.minlatency": int64(10), + "apiid.api2.minupstreamlatency": int64(20), + }, + "$max": dbm.DBM{ + "total.maxlatency": int64(100), + "total.maxupstreamlatency": int64(100), + "apiid.api1.maxlatency": int64(100), + "apiid.api1.maxupstreamlatency": int64(100), + "apiid.api2.maxlatency": int64(100), + "apiid.api2.maxupstreamlatency": int64(100), + }, + "$set": dbm.DBM{ + "expireAt": currentTime, + "lasttime": currentTime, + "timestamp": currentTime, + "total.lasttime": currentTime, + "timeid.day": currentTime.Day(), + "timeid.hour": currentTime.Hour(), + "timeid.month": currentTime.Month(), + "timeid.year": currentTime.Year(), + "total.bytesin": int64(0), + "total.bytesout": int64(0), + "total.closedconnections": int64(0), + "total.openconnections": int64(0), + "total.humanidentifier": "", + "total.identifier": "", + "apiid.api1.bytesin": int64(0), + "apiid.api1.bytesout": int64(0), + "apiid.api1.closedconnections": int64(0), + "apiid.api1.openconnections": int64(0), + "apiid.api1.humanidentifier": "", + "apiid.api1.identifier": "", + "apiid.api1.lasttime": currentTime, + "apiid.api2.bytesin": int64(0), + "apiid.api2.bytesout": int64(0), + "apiid.api2.closedconnections": int64(0), + "apiid.api2.openconnections": int64(0), + "apiid.api2.humanidentifier": "", + "apiid.api2.identifier": "", + "apiid.api2.lasttime": currentTime, + }, + }, + }, + } + + for _, tc := range tcs { + t.Run(tc.testName, func(t *testing.T) { + actual := tc.given.AsChange() + if !cmp.Equal(tc.expected, actual) { + t.Errorf("AggregateUptimeData() mismatch (-want +got):\n%s", cmp.Diff(tc.expected, actual)) + } + }) + } +} + +func TestAnalyticsRecordAggregate_AsTimeUpdate(t *testing.T) { + currentTime := time.Date(2023, 0o4, 0o4, 10, 0, 0, 0, time.UTC) + + tcs := []struct { + given *AnalyticsRecordAggregate + expected dbm.DBM + testName string + }{ + { + testName: "oauthendpoint+keyendpoint+apiendpoint+tota", + given: &AnalyticsRecordAggregate{ + OrgID: "testorg", + KeyEndpoint: map[string]map[string]*Counter{ + "apikey1": { + "/get": { + Hits: 3, + Success: 0, + ErrorTotal: 3, + TotalLatency: 300, + TotalUpstreamLatency: 600, + LastTime: currentTime, + ErrorMap: map[string]int{"404": 1, "500": 2}, + }, + }, + }, + OauthEndpoint: map[string]map[string]*Counter{ + "oauthid1": { + "/get": { + Hits: 3, + Success: 0, + ErrorTotal: 3, + TotalLatency: 300, + TotalUpstreamLatency: 600, + LastTime: currentTime, + ErrorMap: map[string]int{"404": 1, "500": 2}, + }, + }, + }, + ApiEndpoint: map[string]*Counter{ + "/get": { + Hits: 3, + Success: 0, + ErrorTotal: 3, + TotalLatency: 300, + TotalUpstreamLatency: 600, + LastTime: currentTime, + ErrorMap: map[string]int{"404": 1, "500": 2}, + }, + }, + + Total: Counter{ + Hits: 3, + Success: 0, + ErrorTotal: 3, + TotalLatency: 300, + TotalUpstreamLatency: 600, + TotalRequestTime: 300, + ErrorMap: map[string]int{"404": 1, "500": 2}, + BytesIn: 0, + BytesOut: 0, + OpenConnections: 0, + ClosedConnections: 0, + HumanIdentifier: "", + Identifier: "", + LastTime: currentTime, + MinLatency: 10, + MaxLatency: 100, + MinUpstreamLatency: 20, + MaxUpstreamLatency: 100, + }, + }, + expected: dbm.DBM{ + "$set": dbm.DBM{ + "apiendpoints./get.errorlist": []ErrorData{{Code: "404", Count: 1}, {Code: "500", Count: 2}}, + "apiendpoints./get.latency": float64(100), + "apiendpoints./get.requesttime": float64(0), + "apiendpoints./get.upstreamlatency": float64(200), + "keyendpoints.apikey1./get.errorlist": []ErrorData{{Code: "404", Count: 1}, {Code: "500", Count: 2}}, + "keyendpoints.apikey1./get.latency": float64(100), + "keyendpoints.apikey1./get.requesttime": float64(0), + "keyendpoints.apikey1./get.upstreamlatency": float64(200), + "lists.apiendpoints": []Counter{ + { + Hits: 3, + Success: 0, + ErrorTotal: 3, + TotalLatency: 300, + TotalUpstreamLatency: 600, + UpstreamLatency: 200, + Latency: 100, + LastTime: currentTime, + ErrorMap: map[string]int{"404": 1, "500": 2}, + ErrorList: []ErrorData{{Code: "404", Count: 1}, {Code: "500", Count: 2}}, + }, + }, + "lists.apiid": []Counter{}, + "lists.apikeys": []Counter{}, + "lists.endpoints": []Counter{}, + "lists.errors": []Counter{}, + "lists.geo": []Counter{}, + "lists.oauthids": []Counter{}, + "lists.tags": []Counter{}, + "lists.versions": []Counter{}, + "lists.keyendpoints.apikey1": []Counter{ + { + Hits: 3, + Success: 0, + ErrorTotal: 3, + TotalLatency: 300, + TotalUpstreamLatency: 600, + UpstreamLatency: 200, + Latency: 100, + LastTime: currentTime, + ErrorMap: map[string]int{"404": 1, "500": 2}, + ErrorList: []ErrorData{{Code: "404", Count: 1}, {Code: "500", Count: 2}}, + }, + }, + "lists.oauthendpoints.oauthid1": []Counter{ + { + Hits: 3, + Success: 0, + ErrorTotal: 3, + TotalLatency: 300, + TotalUpstreamLatency: 600, + UpstreamLatency: 200, + Latency: 100, + LastTime: currentTime, + ErrorMap: map[string]int{"404": 1, "500": 2}, + ErrorList: []ErrorData{{Code: "404", Count: 1}, {Code: "500", Count: 2}}, + }, + }, + "oauthendpoints.oauthid1./get.errorlist": []ErrorData{{Code: "404", Count: 1}, {Code: "500", Count: 2}}, + "oauthendpoints.oauthid1./get.latency": float64(100), + "oauthendpoints.oauthid1./get.requesttime": float64(0), + "oauthendpoints.oauthid1./get.upstreamlatency": float64(200), + "total.errorlist": []ErrorData{{Code: "404", Count: 1}, {Code: "500", Count: 2}}, + "total.latency": float64(100), + "total.requesttime": float64(100), + "total.upstreamlatency": float64(200), + }, + }, + }, + } + + for _, tc := range tcs { + t.Run(tc.testName, func(t *testing.T) { + actual := tc.given.AsTimeUpdate() + if !cmp.Equal(tc.expected, actual) { + t.Errorf("AggregateUptimeData() mismatch (-want +got):\n%s", cmp.Diff(tc.expected, actual)) + } + }) + } +} diff --git a/analytics/analytics.go b/analytics/analytics.go index 60acc6b28..023fade4d 100644 --- a/analytics/analytics.go +++ b/analytics/analytics.go @@ -14,6 +14,7 @@ import ( "github.com/oschwald/maxminddb-golang" "google.golang.org/protobuf/types/known/timestamppb" + "github.com/TykTechnologies/storage/persistent/id" analyticsproto "github.com/TykTechnologies/tyk-pump/analytics/proto" "github.com/TykTechnologies/tyk-pump/logger" @@ -41,6 +42,7 @@ const SQLTable = "tyk_analytics" // AnalyticsRecord encodes the details of a request type AnalyticsRecord struct { + id id.ObjectId `bson:"_id" gorm:"-:all"` Method string `json:"method" gorm:"column:method"` Host string `json:"host" gorm:"column:host"` Path string `json:"path" gorm:"column:path"` @@ -71,12 +73,25 @@ type AnalyticsRecord struct { TrackPath bool `json:"track_path" gorm:"column:trackpath"` ExpireAt time.Time `bson:"expireAt" json:"expireAt"` ApiSchema string `json:"api_schema" bson:"-" gorm:"-:all"` + + CollectionName string `json:"-" bson:"-" gorm:"-:all"` } func (a *AnalyticsRecord) TableName() string { + if a.CollectionName != "" { + return a.CollectionName + } return SQLTable } +func (a *AnalyticsRecord) GetObjectID() id.ObjectId { + return a.id +} + +func (a *AnalyticsRecord) SetObjectID(id id.ObjectId) { + a.id = id +} + type GraphError struct { Message string `json:"message"` Path []interface{} `json:"path"` @@ -157,7 +172,7 @@ func (a *AnalyticsRecord) GetFieldNames() []string { fields = append(fields, a.Geo.GetFieldNames()...) fields = append(fields, a.Network.GetFieldNames()...) fields = append(fields, a.Latency.GetFieldNames()...) - return append(fields, "Tags", "Alias", "TrackPath", "ExpireAt") + return append(fields, "Tags", "Alias", "TrackPath", "ExpireAt", "ApiSchema") } func (n *NetworkStats) GetLineValues() []string { @@ -222,6 +237,8 @@ func (a *AnalyticsRecord) GetLineValues() []string { fields = append(fields, a.Alias) fields = append(fields, strconv.FormatBool(a.TrackPath)) fields = append(fields, a.ExpireAt.String()) + fields = append(fields, a.ApiSchema) + return fields } diff --git a/analytics/analytics_test.go b/analytics/analytics_test.go index 86b49a79a..27096c70e 100644 --- a/analytics/analytics_test.go +++ b/analytics/analytics_test.go @@ -1,8 +1,12 @@ package analytics import ( + "fmt" "testing" + "time" + "github.com/TykTechnologies/storage/persistent/id" + "github.com/fatih/structs" "github.com/stretchr/testify/assert" ) @@ -86,3 +90,87 @@ func TestAnalyticsRecord_RemoveIgnoredFields(t *testing.T) { }) } } + +func TestAnalyticsRecord_Base(t *testing.T) { + rec := &AnalyticsRecord{} + + assert.Equal(t, SQLTable, rec.TableName()) + + newID := id.NewObjectID() + rec.SetObjectID(newID) + assert.Equal(t, newID, rec.GetObjectID()) +} + +func TestAnalyticsRecord_GetFieldNames(t *testing.T) { + rec := &AnalyticsRecord{} + + fields := rec.GetFieldNames() + + assert.Equal(t, 39, len(fields)) + + expectedFields := []string{ + "Method", + "Host", + "Path", + "RawPath", + "ContentLength", + "UserAgent", + "Day", + "Month", + "Year", + "Hour", + "ResponseCode", + "APIKey", + "TimeStamp", + "APIVersion", + "APIName", + "APIID", + "OrgID", + "OauthID", + "RequestTime", + "RawRequest", + "RawResponse", + "IPAddress", + "Tags", "Alias", "TrackPath", "ExpireAt", "ApiSchema", + "GeoData.Country.ISOCode", + "GeoData.City.GeoNameID", + "GeoData.City.Names", + "GeoData.Location.Latitude", + "GeoData.Location.Longitude", + "GeoData.Location.TimeZone", + "Latency.Total", + "Latency.Upstream", + "NetworkStats.OpenConnections", + "NetworkStats.ClosedConnection", + "NetworkStats.BytesIn", + "NetworkStats.BytesOut", + } + + for _, expected := range expectedFields { + assert.Contains(t, fields, expected) + } +} + +func TestAnalyticsRecord_GetLineValues(t *testing.T) { + rec := &AnalyticsRecord{ + APIID: "api123", + OrgID: "org123", + APIKey: "key123", + Path: "/path", + RawPath: "/rawpath", + APIVersion: "v1", + APIName: "api_name", + TimeStamp: time.Now(), + ApiSchema: "http", + } + + fields := rec.GetLineValues() + + assert.Equal(t, 39, len(fields)) + + for _, field := range structs.Fields(rec) { + if field.IsExported() && !field.IsZero() { + assert.Contains(t, fields, fmt.Sprint(field.Value())) + } + } +} diff --git a/analytics/graph_record.go b/analytics/graph_record.go index 0a87e9259..911301e52 100644 --- a/analytics/graph_record.go +++ b/analytics/graph_record.go @@ -17,6 +17,7 @@ import ( "github.com/TykTechnologies/graphql-go-tools/pkg/astparser" gql "github.com/TykTechnologies/graphql-go-tools/pkg/graphql" "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" + "github.com/TykTechnologies/storage/persistent/id" ) type GraphRecord struct { @@ -31,6 +32,20 @@ type GraphRecord struct { HasErrors bool `gorm:"has_errors"` } +func (g *GraphRecord) TableName() string { + return g.AnalyticsRecord.TableName() +} + +// GetObjectID is a dummy function to satisfy the interface +func (*GraphRecord) GetObjectID() id.ObjectId { + return "" +} + +// SetObjectID is a dummy function to satisfy the interface +func (*GraphRecord) SetObjectID(id.ObjectId) { + // empty +} + // parseRequest reads the raw encoded request and schema, extracting the type information // operation information and root field operations // if an error is encountered it simply breaks the operation regardless of how far along it is. diff --git a/analytics/graph_record_test.go b/analytics/graph_record_test.go index 268d7ab08..8b851226e 100644 --- a/analytics/graph_record_test.go +++ b/analytics/graph_record_test.go @@ -425,7 +425,7 @@ func TestAnalyticsRecord_ToGraphRecord(t *testing.T) { expected := testCase.expected() expected.AnalyticsRecord = a gotten := a.ToGraphRecord() - if diff := cmp.Diff(expected, gotten, cmpopts.IgnoreFields(AnalyticsRecord{}, "RawRequest", "RawResponse")); diff != "" { + if diff := cmp.Diff(expected, gotten, cmpopts.IgnoreFields(AnalyticsRecord{}, "RawRequest", "RawResponse"), cmpopts.IgnoreUnexported(AnalyticsRecord{})); diff != "" { t.Fatal(diff) } }) diff --git a/analytics/uptime_data.go b/analytics/uptime_data.go index 9db8f0f15..69734847b 100644 --- a/analytics/uptime_data.go +++ b/analytics/uptime_data.go @@ -6,26 +6,28 @@ import ( "gorm.io/gorm" + "github.com/TykTechnologies/storage/persistent/id" "github.com/fatih/structs" ) const UptimeSQLTable = "tyk_uptime_analytics" type UptimeReportData struct { - URL string `json:"url"` - RequestTime int64 `json:"request_time"` - ResponseCode int `json:"response_code"` - TCPError bool `json:"tcp_error"` - ServerError bool `json:"server_error"` - Day int `json:"day"` - Month time.Month `json:"month"` - Year int `json:"year"` - Hour int `json:"hour"` - Minute int `json:"minute"` - TimeStamp time.Time `json:"timestamp"` - ExpireAt time.Time `bson:"expireAt"` - APIID string `json:"api_id"` - OrgID string `json:"org_id"` + ID id.ObjectId `json:"_id" bson:"_id" gorm:"-:all"` + URL string `json:"url"` + RequestTime int64 `json:"request_time"` + ResponseCode int `json:"response_code"` + TCPError bool `json:"tcp_error"` + ServerError bool `json:"server_error"` + Day int `json:"day"` + Month time.Month `json:"month"` + Year int `json:"year"` + Hour int `json:"hour"` + Minute int `json:"minute"` + TimeStamp time.Time `json:"timestamp"` + ExpireAt time.Time `bson:"expireAt"` + APIID string `json:"api_id"` + OrgID string `json:"org_id"` } type UptimeReportAggregateSQL struct { @@ -45,7 +47,19 @@ func (a *UptimeReportAggregateSQL) TableName() string { return UptimeSQLTable } -func OnConflictUptimeAssignments(tableName string, tempTable string) map[string]interface{} { +func (a *UptimeReportData) GetObjectID() id.ObjectId { + return a.ID +} + +func (a *UptimeReportData) SetObjectID(id id.ObjectId) { + a.ID = id +} + +func (a *UptimeReportData) TableName() string { + return UptimeSQLTable +} + +func OnConflictUptimeAssignments(tableName, tempTable string) map[string]interface{} { assignments := make(map[string]interface{}) f := UptimeReportAggregateSQL{} baseFields := structs.Fields(f.Code) @@ -172,6 +186,7 @@ func AggregateUptimeData(data []UptimeReportData) map[string]UptimeReportAggrega TotalRequestTime: float64(thisV.RequestTime), LastTime: thisV.TimeStamp, ErrorMap: make(map[string]int), + ErrorList: []ErrorData{}, } thisAggregate.Total.Hits++ thisAggregate.Total.TotalRequestTime += float64(thisV.RequestTime) diff --git a/analytics/uptime_data_test.go b/analytics/uptime_data_test.go new file mode 100644 index 000000000..39e7f4f42 --- /dev/null +++ b/analytics/uptime_data_test.go @@ -0,0 +1,377 @@ +package analytics + +import ( + "testing" + "time" + + "github.com/TykTechnologies/storage/persistent/id" + "github.com/google/go-cmp/cmp" + "gorm.io/gorm/clause" + + "github.com/stretchr/testify/assert" +) + +func TestUptimeReportData_GetObjectID(t *testing.T) { + t.Run("should return the ID field", func(t *testing.T) { + id := id.NewObjectID() + record := UptimeReportData{ + ID: id, + } + assert.Equal(t, id, record.GetObjectID()) + }) +} + +func TestUptimeReportData_SetObjectID(t *testing.T) { + t.Run("should set the ID field", func(t *testing.T) { + id := id.NewObjectID() + record := UptimeReportData{} + record.SetObjectID(id) + assert.Equal(t, id, record.ID) + }) +} + +func TestUptimeReportData_TableName(t *testing.T) { + t.Run("should return the uptime SQL table name", func(t *testing.T) { + record := UptimeReportData{} + assert.Equal(t, UptimeSQLTable, record.TableName()) + }) +} + +func TestUptimeReportAggregateSQL_TableName(t *testing.T) { + t.Run("should return the uptime aggregate SQL table name", func(t *testing.T) { + record := UptimeReportAggregateSQL{} + assert.Equal(t, UptimeSQLTable, record.TableName()) + }) +} + +func TestUptimeReportAggregate_New(t *testing.T) { + t.Run("should return a new UptimeReportAggregate", func(t *testing.T) { + expected := UptimeReportAggregate{} + expected.URL = make(map[string]*Counter) + expected.Errors = make(map[string]*Counter) + + actual := UptimeReportAggregate{}.New() + + assert.Equal(t, expected, actual) + }) +} + +func TestUptimeReportAggregate_Dimensions(t *testing.T) { + tcs := []struct { + testName string + input UptimeReportAggregate + expected []Dimension + }{ + { + testName: "should return the dimensions", + input: UptimeReportAggregate{ + URL: map[string]*Counter{ + "foo": {}, + }, + Errors: map[string]*Counter{ + "bar": {}, + }, + Total: Counter{}, + }, + expected: []Dimension{ + { + Name: "url", + Value: "foo", + Counter: &Counter{}, + }, + { + Name: "errors", + Value: "bar", + Counter: &Counter{}, + }, + { + Name: "", + Value: "total", + Counter: &Counter{}, + }, + }, + }, + { + testName: "no extra dimensions", + input: UptimeReportAggregate{}, + expected: []Dimension{ + { + Name: "", + Value: "total", + Counter: &Counter{}, + }, + }, + }, + } + + for _, tc := range tcs { + t.Run(tc.testName, func(t *testing.T) { + actual := tc.input.Dimensions() + + assert.Equal(t, tc.expected, actual) + }) + } +} + +func TestAggregateUptimeData(t *testing.T) { + currentTime := time.Date(2023, 0o4, 0o4, 10, 0, 0, 0, time.UTC) + + tcs := []struct { + testName string + expected map[string]UptimeReportAggregate + input []UptimeReportData + }{ + { + testName: "empty input", + input: []UptimeReportData{}, + expected: map[string]UptimeReportAggregate{}, + }, + { + testName: "single record", + input: []UptimeReportData{ + { + OrgID: "org123", + APIID: "api123", + URL: "/get", + ResponseCode: 200, + RequestTime: 100, + TimeStamp: currentTime, + ExpireAt: currentTime, + }, + }, + expected: map[string]UptimeReportAggregate{ + "org123": { + OrgID: "org123", + ExpireAt: currentTime, + LastTime: currentTime, + TimeStamp: currentTime, + TimeID: struct { + Year int + Month int + Day int + Hour int + }{ + Year: currentTime.Year(), + Month: int(currentTime.Month()), + Day: currentTime.Day(), + Hour: currentTime.Hour(), + }, + URL: map[string]*Counter{ + "/get": { + Hits: 1, + TotalRequestTime: 100, + Success: 1, + ErrorTotal: 0, + RequestTime: 100, + Identifier: "/get", + HumanIdentifier: "", + LastTime: currentTime, + ErrorMap: map[string]int{"200": 1}, + ErrorList: []ErrorData{}, + }, + }, + Errors: map[string]*Counter{}, + Total: Counter{ + Hits: 1, + TotalRequestTime: 100, + Success: 1, + ErrorTotal: 0, + RequestTime: 100, + Identifier: "", + HumanIdentifier: "", + ErrorMap: map[string]int{"200": 1}, + }, + }, + }, + }, + { + testName: "single record - response code -1", + input: []UptimeReportData{ + { + OrgID: "org123", + APIID: "api123", + URL: "/get", + ResponseCode: -1, + RequestTime: 100, + TimeStamp: currentTime, + ExpireAt: currentTime, + }, + }, + expected: map[string]UptimeReportAggregate{ + "org123": { + OrgID: "org123", + ExpireAt: currentTime, + LastTime: currentTime, + TimeStamp: currentTime, + TimeID: struct { + Year int + Month int + Day int + Hour int + }{ + Year: currentTime.Year(), + Month: int(currentTime.Month()), + Day: currentTime.Day(), + Hour: currentTime.Hour(), + }, + URL: map[string]*Counter{ + "/get": { + Identifier: "/get", + }, + }, + Errors: map[string]*Counter{}, + Total: Counter{ + ErrorMap: map[string]int{}, + }, + }, + }, + }, + { + testName: "multi record", + input: []UptimeReportData{ + { + OrgID: "org123", + APIID: "api123", + URL: "/get", + ResponseCode: 200, + RequestTime: 100, + TimeStamp: currentTime, + ExpireAt: currentTime, + }, + { + OrgID: "org123", + APIID: "api123", + URL: "/get", + ResponseCode: 200, + RequestTime: 100, + TimeStamp: currentTime, + ExpireAt: currentTime, + }, + { + OrgID: "org123", + APIID: "api123", + URL: "/get", + ResponseCode: 500, + RequestTime: 100, + TimeStamp: currentTime, + ExpireAt: currentTime, + }, + }, + expected: map[string]UptimeReportAggregate{ + "org123": { + OrgID: "org123", + ExpireAt: currentTime, + LastTime: currentTime, + TimeStamp: currentTime, + TimeID: struct { + Year int + Month int + Day int + Hour int + }{ + Year: currentTime.Year(), + Month: int(currentTime.Month()), + Day: currentTime.Day(), + Hour: currentTime.Hour(), + }, + URL: map[string]*Counter{ + "/get": { + Hits: 3, + TotalRequestTime: 300, + Success: 2, + ErrorTotal: 1, + RequestTime: 100, + Identifier: "/get", + HumanIdentifier: "", + LastTime: currentTime, + ErrorMap: map[string]int{"200": 2, "500": 1}, + ErrorList: []ErrorData{}, + }, + }, + Errors: map[string]*Counter{ + "500": { + Hits: 1, + TotalRequestTime: 100, + Success: 0, + ErrorTotal: 1, + RequestTime: 100, + Identifier: "500", + HumanIdentifier: "", + LastTime: currentTime, + ErrorMap: map[string]int{"500": 1}, + ErrorList: []ErrorData{}, + }, + }, + Total: Counter{ + Hits: 3, + TotalRequestTime: 300, + Success: 2, + ErrorTotal: 1, + RequestTime: 100, + Identifier: "", + HumanIdentifier: "", + ErrorMap: map[string]int{"200": 2, "500": 1}, + }, + }, + }, + }, + } + + for _, tc := range tcs { + t.Run(tc.testName, func(t *testing.T) { + actual := AggregateUptimeData(tc.input) + + if !cmp.Equal(tc.expected, actual) { + t.Errorf("AggregateUptimeData() mismatch (-want +got):\n%s", cmp.Diff(tc.expected, actual)) + } + }) + } +} + +func TestOnConflictUptimeAssignments(t *testing.T) { + assignments := OnConflictAssignments("uptime_reports", "excluded") + + expectedAssignmets := map[string]interface{}{ + "code_1x": clause.Expr{SQL: "uptime_reports.code_1x + excluded.code_1x"}, + "code_200": clause.Expr{SQL: "uptime_reports.code_200 + excluded.code_200"}, + "code_201": clause.Expr{SQL: "uptime_reports.code_201 + excluded.code_201"}, + "code_2x": clause.Expr{SQL: "uptime_reports.code_2x + excluded.code_2x"}, + "code_301": clause.Expr{SQL: "uptime_reports.code_301 + excluded.code_301"}, + "code_302": clause.Expr{SQL: "uptime_reports.code_302 + excluded.code_302"}, + "code_303": clause.Expr{SQL: "uptime_reports.code_303 + excluded.code_303"}, + "code_304": clause.Expr{SQL: "uptime_reports.code_304 + excluded.code_304"}, + "code_3x": clause.Expr{SQL: "uptime_reports.code_3x + excluded.code_3x"}, + "code_400": clause.Expr{SQL: "uptime_reports.code_400 + excluded.code_400"}, + "code_401": clause.Expr{SQL: "uptime_reports.code_401 + excluded.code_401"}, + "code_403": clause.Expr{SQL: "uptime_reports.code_403 + excluded.code_403"}, + "code_404": clause.Expr{SQL: "uptime_reports.code_404 + excluded.code_404"}, + "code_429": clause.Expr{SQL: "uptime_reports.code_429 + excluded.code_429"}, + "code_4x": clause.Expr{SQL: "uptime_reports.code_4x + excluded.code_4x"}, + "code_500": clause.Expr{SQL: "uptime_reports.code_500 + excluded.code_500"}, + "code_501": clause.Expr{SQL: "uptime_reports.code_501 + excluded.code_501"}, + "code_502": clause.Expr{SQL: "uptime_reports.code_502 + excluded.code_502"}, + "code_503": clause.Expr{SQL: "uptime_reports.code_503 + excluded.code_503"}, + "code_504": clause.Expr{SQL: "uptime_reports.code_504 + excluded.code_504"}, + "code_5x": clause.Expr{SQL: "uptime_reports.code_5x + excluded.code_5x"}, + "counter_bytes_in": clause.Expr{SQL: "uptime_reports.counter_bytes_in + excluded.counter_bytes_in"}, + "counter_bytes_out": clause.Expr{SQL: "uptime_reports.counter_bytes_out + excluded.counter_bytes_out"}, + "counter_closed_connections": clause.Expr{SQL: "uptime_reports.counter_closed_connections + excluded.counter_closed_connections"}, + "counter_error": clause.Expr{SQL: "uptime_reports.counter_error + excluded.counter_error"}, + "counter_hits": clause.Expr{SQL: "uptime_reports.counter_hits + excluded.counter_hits"}, + "counter_last_time": clause.Expr{SQL: "excluded.counter_last_time"}, + "counter_latency": clause.Expr{SQL: "(uptime_reports.counter_total_latency +excluded.counter_total_latency)/CAST( uptime_reports.counter_hits + excluded.counter_hits AS REAL)"}, + "counter_max_latency": clause.Expr{SQL: "0.5 * ((uptime_reports.counter_max_latency + excluded.counter_max_latency) + ABS(uptime_reports.counter_max_latency - excluded.counter_max_latency))"}, + "counter_max_upstream_latency": clause.Expr{SQL: "0.5 * ((uptime_reports.counter_max_upstream_latency + excluded.counter_max_upstream_latency) + ABS(uptime_reports.counter_max_upstream_latency - excluded.counter_max_upstream_latency))"}, + "counter_min_latency": clause.Expr{SQL: "0.5 * ((uptime_reports.counter_min_latency + excluded.counter_min_latency) - ABS(uptime_reports.counter_min_latency - excluded.counter_min_latency)) "}, + "counter_min_upstream_latency": clause.Expr{SQL: "0.5 * ((uptime_reports.counter_min_upstream_latency + excluded.counter_min_upstream_latency) - ABS(uptime_reports.counter_min_upstream_latency - excluded.counter_min_upstream_latency)) "}, + "counter_open_connections": clause.Expr{SQL: "uptime_reports.counter_open_connections + excluded.counter_open_connections"}, + "counter_request_time": clause.Expr{SQL: "(uptime_reports.counter_total_request_time +excluded.counter_total_request_time)/CAST( uptime_reports.counter_hits + excluded.counter_hits AS REAL)"}, + "counter_success": clause.Expr{SQL: "uptime_reports.counter_success + excluded.counter_success"}, + "counter_total_latency": clause.Expr{SQL: "uptime_reports.counter_total_latency + excluded.counter_total_latency"}, + "counter_total_request_time": clause.Expr{SQL: "uptime_reports.counter_total_request_time + excluded.counter_total_request_time"}, + "counter_total_upstream_latency": clause.Expr{SQL: "uptime_reports.counter_total_upstream_latency + excluded.counter_total_upstream_latency"}, + "counter_upstream_latency": clause.Expr{SQL: "(uptime_reports.counter_total_upstream_latency +excluded.counter_total_upstream_latency)/CAST( uptime_reports.counter_hits + excluded.counter_hits AS REAL)"}, + } + + assert.Equal(t, expectedAssignmets, assignments) +} diff --git a/bin/ci-test.sh b/bin/ci-test.sh index 5d5207366..b73b4f9c6 100755 --- a/bin/ci-test.sh +++ b/bin/ci-test.sh @@ -25,14 +25,14 @@ echo "Formatting checks..." FMT_FILES="$(gofmt -s -l ${GO_FILES})" if [[ -n ${FMT_FILES} ]]; then - fatal "Run 'gofmt -s -w' on these files:\n$FMT_FILES" + fatal "Run 'gofmt -s -w' on these files:\n$FMT_FILES" fi echo "gofmt check is ok!" IMP_FILES="$(goimports -l ${GO_FILES})" if [[ -n ${IMP_FILES} ]]; then - fatal "Run 'goimports -w' on these files:\n$IMP_FILES" + fatal "Run 'goimports -w' on these files:\n$IMP_FILES" fi echo "goimports check is ok!" @@ -42,11 +42,18 @@ do race="-race" echo "Testing... $pkg" if [[ ${pkg} == *"pumps" ]]; then - # run pumps tests without race detector until we add correct testing - race="" + # run pumps tests without race detector until we add correct testing + race="" + # run tests twice for tyk-pump/pumps with different MONGO_DRIVER values + MONGO_DRIVERS=("mgo" "mongo-go") + for mongo_driver in "${MONGO_DRIVERS[@]}"; do + echo "Running tests with MONGO_DRIVER=$mongo_driver" + export MONGO_DRIVER=$mongo_driver + coveragefile=`echo "$pkg" | awk -F/ '{print $NF}'` + show go test -timeout ${TEST_TIMEOUT} ${race} --coverprofile=${coveragefile}.cov -v ${pkg} + done + else + coveragefile=`echo "$pkg" | awk -F/ '{print $NF}'` + show go test -timeout ${TEST_TIMEOUT} ${race} --coverprofile=${coveragefile}.cov -v ${pkg} fi - coveragefile=`echo "$pkg" | awk -F/ '{print $NF}'` - show go test -timeout ${TEST_TIMEOUT} ${race} --coverprofile=${coveragefile}.cov -v ${pkg} -done - - +done \ No newline at end of file diff --git a/go.mod b/go.mod index 7b03f4c13..9eab65398 100644 --- a/go.mod +++ b/go.mod @@ -8,6 +8,7 @@ require ( github.com/TykTechnologies/gorpc v0.0.0-20210624160652-fe65bda0ccb9 github.com/TykTechnologies/graphql-go-tools v1.6.2-0.20230320143102-7a16078ce517 github.com/TykTechnologies/murmur3 v0.0.0-20230310161213-aad17efd5632 + github.com/TykTechnologies/storage v0.0.0-20230410152719-1e659ae95643 github.com/aws/aws-sdk-go-v2 v1.16.14 github.com/aws/aws-sdk-go-v2/config v1.9.0 github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.9.0 @@ -29,7 +30,6 @@ require ( github.com/kelseyhightower/envconfig v1.4.0 github.com/lintianzhi/graylogd v0.0.0-20180503131252-dc68342f04dc // indirect github.com/logzio/logzio-go v0.0.0-20200316143903-ac8fc0e2910e - github.com/lonelycode/mgohacks v0.0.0-20150820024025-f9c291f7e57e github.com/mitchellh/mapstructure v1.3.1 github.com/moesif/moesifapi-go v1.0.6 github.com/olivere/elastic v6.2.31+incompatible // indirect @@ -51,7 +51,6 @@ require ( golang.org/x/net v0.0.0-20220722155237-a158d28d115b google.golang.org/protobuf v1.28.1 gopkg.in/alecthomas/kingpin.v2 v2.2.6 - gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22 gopkg.in/olivere/elastic.v3 v3.0.56 gopkg.in/olivere/elastic.v5 v5.0.85 gopkg.in/olivere/elastic.v6 v6.2.31 diff --git a/go.sum b/go.sum index a20d3083d..d94c5d3f1 100644 --- a/go.sum +++ b/go.sum @@ -50,6 +50,14 @@ github.com/TykTechnologies/graphql-go-tools v1.6.2-0.20230320143102-7a16078ce517 github.com/TykTechnologies/graphql-go-tools v1.6.2-0.20230320143102-7a16078ce517/go.mod h1:ZiFZcrue3+n2mHH+KLHRipbYVULkgy3Myko5S7IIs74= github.com/TykTechnologies/murmur3 v0.0.0-20230310161213-aad17efd5632 h1:T5NWziFusj8au5nxAqMMh/bZyX9CAyYnBkaMSsfH6BA= github.com/TykTechnologies/murmur3 v0.0.0-20230310161213-aad17efd5632/go.mod h1:UsPYgOFBpNzDXLEti7MKOwHLpVSqdzuNGkVFPspQmnQ= +github.com/TykTechnologies/storage v0.0.0-20230330163006-a30b9b9f5c67 h1:KCdNrHczqffUg0Yg4ueN77zEyZJ9h86gF4zCNqZJy2Q= +github.com/TykTechnologies/storage v0.0.0-20230330163006-a30b9b9f5c67/go.mod h1:+0S3KuNlLGBTMTSFREuZFm315zzXjuuCO4QSAPy+d3M= +github.com/TykTechnologies/storage v0.0.0-20230405205718-1a2c0ac56c2a h1:b09GAESqHDrUcLyNPWT8K5DZuclc2x8wv3/MK5LGoXQ= +github.com/TykTechnologies/storage v0.0.0-20230405205718-1a2c0ac56c2a/go.mod h1:+0S3KuNlLGBTMTSFREuZFm315zzXjuuCO4QSAPy+d3M= +github.com/TykTechnologies/storage v0.0.0-20230410132731-c13ef37ecbd9 h1:MN+4v/nbMzqc9IyxpjG/e4yBwhJfBNRkEOB+L8u481w= +github.com/TykTechnologies/storage v0.0.0-20230410132731-c13ef37ecbd9/go.mod h1:+0S3KuNlLGBTMTSFREuZFm315zzXjuuCO4QSAPy+d3M= +github.com/TykTechnologies/storage v0.0.0-20230410152719-1e659ae95643 h1:vFml52JVqB1yOMUyq10o5JytEfC93KattU/xTfzxAlM= +github.com/TykTechnologies/storage v0.0.0-20230410152719-1e659ae95643/go.mod h1:+0S3KuNlLGBTMTSFREuZFm315zzXjuuCO4QSAPy+d3M= github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/agnivade/levenshtein v1.1.1 h1:QY8M92nrzkmr798gCo3kmMyqXFzdQVpxLlGPRBij0P8= github.com/agnivade/levenshtein v1.1.1/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo= @@ -443,8 +451,9 @@ github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0 github.com/klauspost/compress v1.9.8/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.12/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.12.2 h1:2KCfW3I9M7nSc5wOqXAlW2v2U6v+w6cbjvbfp+OykW8= github.com/klauspost/compress v1.12.2/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc= +github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= @@ -477,8 +486,6 @@ github.com/logrusorgru/aurora/v3 v3.0.0 h1:R6zcoZZbvVcGMvDCKo45A9U/lzYyzl5NfYIvz github.com/logrusorgru/aurora/v3 v3.0.0/go.mod h1:vsR12bk5grlLvLXAYrBsb5Oc/N+LxAlxggSjiwMnCUc= github.com/logzio/logzio-go v0.0.0-20200316143903-ac8fc0e2910e h1:j4tDETg2tUX0AZq2CClOpW8rBf9rPEBNjiXgQoso4Z8= github.com/logzio/logzio-go v0.0.0-20200316143903-ac8fc0e2910e/go.mod h1:OBprCVuGvtyYcaCmYjE32bF12d5AAHeXS5xI0QbIXMI= -github.com/lonelycode/mgohacks v0.0.0-20150820024025-f9c291f7e57e h1:VvfhTFKhOTHD0xtCOPpzWxw03TUdtkRVWjRL3Lcnhuk= -github.com/lonelycode/mgohacks v0.0.0-20150820024025-f9c291f7e57e/go.mod h1:xVJqf7VdD7Xfgmi9XY63aOYtrYClQOtwHX2FEOgtCKM= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -529,6 +536,8 @@ github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9 github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/moesif/moesifapi-go v1.0.6 h1:r3ppy6p5jxzdauziRI3lMtcjDpVH/zW2an2rYXLkNWE= github.com/moesif/moesifapi-go v1.0.6/go.mod h1:wRGgVy0QeiCgnjFEiD13HD2Aa7reI8nZXtCnddNnZGs= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe h1:iruDEfMl2E6fbMZ9s0scYfZQ84/6SPL6zC8ACM2oIL0= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= @@ -706,6 +715,7 @@ github.com/tidwall/gjson v1.11.0 h1:C16pk7tQNiH6VlCrtIXL1w8GaOsi1X3W8KDkE1BuYd4= github.com/tidwall/gjson v1.11.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/tidwall/sjson v1.0.4 h1:UcdIRXff12Lpnu3OLtZvnc03g4vH2suXDXhBwBqmzYg= @@ -726,6 +736,12 @@ github.com/vektah/gqlparser/v2 v2.5.1/go.mod h1:mPgqFBu/woKTVYWyNk8cO3kh4S/f4aRF github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.1.1 h1:VOMT+81stJgXW3CpHyqHN3AXDYIMsx56mEFrB37Mb/E= +github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= +github.com/xdg-go/stringprep v1.0.3 h1:kdwGpVNwPFtjs98xCGkHjQtGKh86rDcRZN17QEMCOIs= +github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= github.com/xdg/scram v1.0.3 h1:nTadYh2Fs4BK2xdldEa2g5bbaZp0/+1nJMMPtPxS/to= github.com/xdg/scram v1.0.3/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= @@ -741,10 +757,14 @@ github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1: github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c h1:3lbZUMbMiGUW/LMkfsEABsc5zNT9+b1CvsJx47JzJ8g= github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c/go.mod h1:UrdRz5enIKZ63MEE3IF9l2/ebyx59GyGgPi+tICQdmM= +github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d h1:splanxYIlg+5LfHAM6xpdFEAYOk8iySO56hMFq6uLyA= +github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= +go.mongodb.org/mongo-driver v1.11.2 h1:+1v2rDQUWNcGW7/7E0Jvdz51V38XXxJfhzbV17aNHCw= +go.mongodb.org/mongo-driver v1.11.2/go.mod h1:s7p5vEtfbeR1gYi6pnj3c3/urpbLv2T5Sfd6Rp2HBB8= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -790,8 +810,9 @@ golang.org/x/crypto v0.0.0-20201112155050-0c6587e931a9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 h1:7I4JAnoQBe7ZtJcBaYHi5UtiO8tQHbUSXxL+pnGRANg= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d h1:sK3txAijHtOK88l68nt020reeT1ZdKLIYetKl95FzVY= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -853,6 +874,7 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b h1:PxfKdU9lEEDYjdIzOtC4qFWgkU2rGHdKlKowJSMN9h0= diff --git a/pumps/graph_mongo.go b/pumps/graph_mongo.go index 754d43ffd..778b3cd1c 100644 --- a/pumps/graph_mongo.go +++ b/pumps/graph_mongo.go @@ -5,6 +5,7 @@ import ( "fmt" "strings" + "github.com/TykTechnologies/storage/persistent/id" "github.com/TykTechnologies/tyk-pump/analytics" "github.com/mitchellh/mapstructure" "github.com/sirupsen/logrus" @@ -62,7 +63,7 @@ func (g *GraphMongoPump) Init(config interface{}) error { g.capCollection() - indexCreateErr := g.ensureIndexes() + indexCreateErr := g.ensureIndexes(g.dbConf.CollectionName) if indexCreateErr != nil { g.log.Error(indexCreateErr) } @@ -84,33 +85,28 @@ func (g *GraphMongoPump) WriteData(ctx context.Context, data []interface{}) erro g.log.Debug("Attempting to write ", len(data), " records...") - for g.dbSession == nil { - g.log.Debug("Connecting to analytics store") - g.connect() - } accumulateSet := g.AccumulateSet(data, true) errCh := make(chan error, len(accumulateSet)) for _, dataSet := range accumulateSet { - go func(dataSet []interface{}, errCh chan error) { - sess := g.dbSession.Copy() - defer sess.Close() - + go func(dataSet []id.DBObject, errCh chan error) { // make a graph record array with variable length in case there are errors with some conversion - finalSet := make([]interface{}, 0) + finalSet := make([]id.DBObject, 0) for _, d := range dataSet { - r, ok := d.(analytics.AnalyticsRecord) + r, ok := d.(*analytics.AnalyticsRecord) if !ok { continue } + r.SetObjectID(id.NewObjectID()) + var ( gr analytics.GraphRecord err error ) if r.RawRequest == "" || r.RawResponse == "" || r.ApiSchema == "" { g.log.Warn("skipping record parsing") - gr = analytics.GraphRecord{AnalyticsRecord: r} + gr = analytics.GraphRecord{AnalyticsRecord: *r} } else { gr = r.ToGraphRecord() if err != nil { @@ -120,17 +116,14 @@ func (g *GraphMongoPump) WriteData(ctx context.Context, data []interface{}) erro } } - finalSet = append(finalSet, gr) + finalSet = append(finalSet, &gr) } - analyticsCollection := sess.DB("").C(collectionName) - g.log.WithFields(logrus.Fields{ "collection": collectionName, "number of records": len(finalSet), }).Debug("Attempt to purge records") - - err := analyticsCollection.Insert(finalSet...) + err := g.store.Insert(context.Background(), finalSet...) if err != nil { g.log.WithFields(logrus.Fields{"collection": collectionName, "number of records": len(finalSet)}).Error("Problem inserting to mongo collection: ", err) diff --git a/pumps/graph_mongo_test.go b/pumps/graph_mongo_test.go index 3493012c6..05f25821b 100644 --- a/pumps/graph_mongo_test.go +++ b/pumps/graph_mongo_test.go @@ -145,10 +145,6 @@ X-Ratelimit-Reset: 0 ` func TestGraphMongoPump_WriteData(t *testing.T) { - c := Conn{} - c.ConnectDb() - defer c.CleanDb() - conf := defaultConf() pump := GraphMongoPump{ MongoPump: MongoPump{ @@ -160,6 +156,8 @@ func TestGraphMongoPump_WriteData(t *testing.T) { pump.dbConf.CollectionCapEnable = true pump.dbConf.CollectionCapMaxSizeBytes = 0 + pump.connect() + type customRecord struct { rawRequest string rawResponse string @@ -325,17 +323,22 @@ func TestGraphMongoPump_WriteData(t *testing.T) { assert.NoError(t, err) } - // now check for the written data - sess := pump.dbSession.Copy() defer func() { - if err := sess.DB("").C(conf.CollectionName).DropCollection(); err != nil { + if err := pump.store.DropDatabase(context.Background()); err != nil { pump.log.WithError(err).Warn("error dropping collection") } }() - analyticsColl := sess.DB("").C(conf.CollectionName) + + // now check for the written data var results []analytics.GraphRecord - query := analyticsColl.Find(nil) - assert.NoError(t, query.All(&results)) + + // Using the same collection name as the default pump config + d := dbObject{ + tableName: pump.dbConf.CollectionName, + } + err = pump.store.Query(context.Background(), d, &results, nil) + + assert.Nil(t, err) if diff := cmp.Diff(tc.expectedGraphRecords, results, cmpopts.IgnoreFields(analytics.GraphRecord{}, "AnalyticsRecord")); diff != "" { t.Error(diff) } diff --git a/pumps/mgo_helper_test.go b/pumps/mgo_helper_test.go index d338a3b01..0f421c871 100644 --- a/pumps/mgo_helper_test.go +++ b/pumps/mgo_helper_test.go @@ -3,86 +3,111 @@ package pumps import ( - "gopkg.in/mgo.v2" - "gopkg.in/mgo.v2/bson" + "context" + "os" + + "github.com/TykTechnologies/storage/persistent" + "github.com/TykTechnologies/storage/persistent/dbm" + "github.com/TykTechnologies/storage/persistent/id" + "github.com/TykTechnologies/storage/persistent/index" ) -const dbAddr = "127.0.0.1:27017" -const colName = "test_collection" +const ( + dbAddr = "mongodb://localhost:27017/test" + colName = "test_collection" +) type Conn struct { - Session *mgo.Session + Store persistent.PersistentStorage +} + +func (c *Conn) TableName() string { + return colName +} + +// SetObjectID is a dummy function to satisfy the interface +func (*Conn) GetObjectID() id.ObjectId { + return "" +} + +// SetObjectID is a dummy function to satisfy the interface +func (*Conn) SetObjectID(id.ObjectId) { + // empty } func (c *Conn) ConnectDb() { - if c.Session == nil { + if c.Store == nil { var err error - c.Session, err = mgo.Dial(dbAddr) + c.Store, err = persistent.NewPersistentStorage(&persistent.ClientOpts{ + Type: "mgo", + ConnectionString: dbAddr, + }) if err != nil { - panic("Unable to connect to mongo") + panic("Unable to connect to mongo: " + err.Error()) } } } func (c *Conn) CleanDb() { - sess := c.Session.Copy() - defer sess.Close() - - if err := sess.DB("").DropDatabase(); err != nil { + err := c.Store.DropDatabase(context.Background()) + if err != nil { panic(err) } } func (c *Conn) CleanCollection() { - sess := c.Session.Copy() - defer sess.Close() - - if err := sess.DB("").C(colName).DropCollection(); err != nil { + err := c.Store.Drop(context.Background(), c) + if err != nil { panic(err) } } func (c *Conn) CleanIndexes() { - sess := c.Session.Copy() - defer sess.Close() - - indexes, err := sess.DB("").C(colName).Indexes() + err := c.Store.CleanIndexes(context.Background(), c) if err != nil { panic(err) } - for _, index := range indexes { - sess.DB("").C(colName).DropIndexName(index.Name) - } - } -func (c *Conn) InsertDoc() { - sess := c.Session.Copy() - defer sess.Close() +type Doc struct { + ID id.ObjectId `bson:"_id"` + Foo string `bson:"foo"` +} - if err := sess.DB("").C(colName).Insert(bson.M{"foo": "bar"}); err != nil { - panic(err) - } +func (d Doc) GetObjectID() id.ObjectId { + return d.ID } -func (c *Conn) GetCollectionStats() (colStats bson.M) { - sess := c.Session.Copy() - defer sess.Close() +func (d *Doc) SetObjectID(id id.ObjectId) { + d.ID = id +} - data := bson.D{{Name: "collStats", Value: colName}} +func (d Doc) TableName() string { + return colName +} - if err := sess.DB("").Run(data, &colStats); err != nil { +func (c *Conn) InsertDoc() { + doc := Doc{ + Foo: "bar", + } + doc.SetObjectID(id.NewObjectID()) + err := c.Store.Insert(context.Background(), &doc) + if err != nil { panic(err) } +} +func (c *Conn) GetCollectionStats() (colStats dbm.DBM) { + var err error + colStats, err = c.Store.DBTableStats(context.Background(), c) + if err != nil { + panic(err) + } return colStats } -func (c *Conn) GetIndexes() ([]mgo.Index, error) { - sess := c.Session.Copy() - defer sess.Close() - - return sess.DB("").C(colName).Indexes() +func (c *Conn) GetIndexes() ([]index.Index, error) { + return c.Store.GetIndexes(context.Background(), c) } func defaultConf() MongoConf { @@ -95,6 +120,12 @@ func defaultConf() MongoConf { conf.MongoURL = dbAddr conf.MongoSSLInsecureSkipVerify = true + if os.Getenv("MONGO_DRIVER") == "mongo-go" { + conf.MongoDriverType = persistent.OfficialMongo + } else { + conf.MongoDriverType = persistent.Mgo + } + return conf } @@ -107,5 +138,11 @@ func defaultSelectiveConf() MongoSelectiveConf { conf.MongoURL = dbAddr conf.MongoSSLInsecureSkipVerify = true + if os.Getenv("MONGO_DRIVER") == "mongo-go" { + conf.MongoDriverType = persistent.OfficialMongo + } else { + conf.MongoDriverType = persistent.Mgo + } + return conf } diff --git a/pumps/mongo.go b/pumps/mongo.go index 58e5daa2b..6a965455b 100644 --- a/pumps/mongo.go +++ b/pumps/mongo.go @@ -5,23 +5,21 @@ import ( "crypto" "crypto/ecdsa" "crypto/rsa" - "crypto/tls" "crypto/x509" "encoding/base64" - "encoding/pem" "fmt" - "io/ioutil" - "net" "regexp" "strconv" - "strings" - "time" + "github.com/TykTechnologies/storage/persistent" + "github.com/TykTechnologies/storage/persistent/dbm" + "github.com/TykTechnologies/storage/persistent/id" + "github.com/TykTechnologies/storage/persistent/index" "github.com/TykTechnologies/tyk-pump/analytics" "github.com/kelseyhightower/envconfig" "github.com/mitchellh/mapstructure" "github.com/sirupsen/logrus" - "gopkg.in/mgo.v2" + "gopkg.in/vmihailenco/msgpack.v2" ) @@ -34,15 +32,17 @@ const ( ) type MongoPump struct { - IsUptime bool - dbSession *mgo.Session - dbConf *MongoConf + IsUptime bool + store persistent.PersistentStorage + dbConf *MongoConf CommonPumpConfig } -var mongoPrefix = "mongo-pump" -var mongoPumpPrefix = "PMP_MONGO" -var mongoDefaultEnv = PUMPS_ENV_PREFIX + "_MONGO" + PUMPS_ENV_META_PREFIX +var ( + mongoPrefix = "mongo-pump" + mongoPumpPrefix = "PMP_MONGO" + mongoDefaultEnv = PUMPS_ENV_PREFIX + "_MONGO" + PUMPS_ENV_META_PREFIX +) type MongoType int @@ -81,29 +81,41 @@ type BaseMongoConf struct { OmitIndexCreation bool `json:"omit_index_creation" mapstructure:"omit_index_creation"` // Set the consistency mode for the session, it defaults to `Strong`. The valid values are: strong, monotonic, eventual. MongoSessionConsistency string `json:"mongo_session_consistency" mapstructure:"mongo_session_consistency"` + // MongoDriverType is the type of the driver (library) to use. The valid values are: "mongo-go" and "mgo". + MongoDriverType string `json:"driver_type" mapstructure:"driver_type"` +} +type dbObject struct { + tableName string +} + +func (d dbObject) TableName() string { + return d.tableName +} + +// GetObjectID is a dummy function to satisfy the interface +func (dbObject) GetObjectID() id.ObjectId { + return "" +} + +// SetObjectID is a dummy function to satisfy the interface +func (dbObject) SetObjectID(id.ObjectId) { + // empty +} + +func createDBObject(tableName string) dbObject { + return dbObject{tableName: tableName} } func (b *BaseMongoConf) GetBlurredURL() string { // mongo uri match with regex ^(mongodb:(?:\/{2})?)((\w+?):(\w+?)@|:?@?)(\S+?):(\d+)(\/(\S+?))?(\?replicaSet=(\S+?))?$ // but we need only a segment, so regex explanation: https://regex101.com/r/8Uzwtw/1 regex := `^(mongodb:(?:\/{2})?)((...+?):(...+?)@)` - var re = regexp.MustCompile(regex) + re := regexp.MustCompile(regex) blurredUrl := re.ReplaceAllString(b.MongoURL, "***:***@") return blurredUrl } -func (b *BaseMongoConf) SetMongoConsistency(session *mgo.Session) { - switch b.MongoSessionConsistency { - case "eventual": - session.SetMode(mgo.Eventual, true) - case "monotonic": - session.SetMode(mgo.Monotonic, true) - default: - session.SetMode(mgo.Strong, true) - } -} - // @PumpConf Mongo type MongoConf struct { // TYKCONFIGEXPAND @@ -124,38 +136,6 @@ type MongoConf struct { CollectionCapEnable bool `json:"collection_cap_enable" mapstructure:"collection_cap_enable"` } -func loadCertficateAndKeyFromFile(path string) (*tls.Certificate, error) { - raw, err := ioutil.ReadFile(path) - if err != nil { - return nil, err - } - - var cert tls.Certificate - for { - block, rest := pem.Decode(raw) - if block == nil { - break - } - if block.Type == "CERTIFICATE" { - cert.Certificate = append(cert.Certificate, block.Bytes) - } else { - cert.PrivateKey, err = parsePrivateKey(block.Bytes) - if err != nil { - return nil, fmt.Errorf("Failure reading private key from \"%s\": %s", path, err) - } - } - raw = rest - } - - if len(cert.Certificate) == 0 { - return nil, fmt.Errorf("No certificate found in \"%s\"", path) - } else if cert.PrivateKey == nil { - return nil, fmt.Errorf("No private key found in \"%s\"", path) - } - - return &cert, nil -} - func parsePrivateKey(der []byte) (crypto.PrivateKey, error) { if key, err := x509.ParsePKCS1PrivateKey(der); err == nil { return key, nil @@ -174,98 +154,6 @@ func parsePrivateKey(der []byte) (crypto.PrivateKey, error) { return nil, fmt.Errorf("Failed to parse private key") } -func mongoType(session *mgo.Session) MongoType { - // Querying for the features which 100% not supported by AWS DocumentDB - var result struct { - Code int `bson:"code"` - } - session.Run("features", &result) - - switch result.Code { - case AWSDBError: - return AWSDocumentDB - case CosmosDBError: - return CosmosDB - default: - return StandardMongo - } -} - -func mongoDialInfo(conf BaseMongoConf) (dialInfo *mgo.DialInfo, err error) { - if dialInfo, err = mgo.ParseURL(conf.MongoURL); err != nil { - return dialInfo, err - } - - if conf.MongoUseSSL { - dialInfo.DialServer = func(addr *mgo.ServerAddr) (net.Conn, error) { - tlsConfig := &tls.Config{} - if conf.MongoSSLInsecureSkipVerify { - tlsConfig.InsecureSkipVerify = true - } - - if conf.MongoSSLCAFile != "" { - caCert, err := ioutil.ReadFile(conf.MongoSSLCAFile) - if err != nil { - log.Fatal("Can't load mongo CA certificates: ", err) - } - caCertPool := x509.NewCertPool() - caCertPool.AppendCertsFromPEM(caCert) - tlsConfig.RootCAs = caCertPool - } - - if conf.MongoSSLAllowInvalidHostnames { - tlsConfig.InsecureSkipVerify = true - tlsConfig.VerifyPeerCertificate = func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { - // Code copy/pasted and adapted from - // https://github.com/golang/go/blob/81555cb4f3521b53f9de4ce15f64b77cc9df61b9/src/crypto/tls/handshake_client.go#L327-L344, but adapted to skip the hostname verification. - // See https://github.com/golang/go/issues/21971#issuecomment-412836078. - - // If this is the first handshake on a connection, process and - // (optionally) verify the server's certificates. - certs := make([]*x509.Certificate, len(rawCerts)) - for i, asn1Data := range rawCerts { - cert, err := x509.ParseCertificate(asn1Data) - if err != nil { - return err - } - certs[i] = cert - } - - opts := x509.VerifyOptions{ - Roots: tlsConfig.RootCAs, - CurrentTime: time.Now(), - DNSName: "", // <- skip hostname verification - Intermediates: x509.NewCertPool(), - } - - for i, cert := range certs { - if i == 0 { - continue - } - opts.Intermediates.AddCert(cert) - } - _, err := certs[0].Verify(opts) - - return err - } - } - - if conf.MongoSSLPEMKeyfile != "" { - cert, err := loadCertficateAndKeyFromFile(conf.MongoSSLPEMKeyfile) - if err != nil { - log.Fatal("Can't load mongo client certificate: ", err) - } - - tlsConfig.Certificates = []tls.Certificate{*cert} - } - - return tls.Dial("tcp", addr.String(), tlsConfig) - } - } - - return dialInfo, err -} - func (m *MongoPump) New() Pump { newPump := MongoPump{} return &newPump @@ -298,22 +186,24 @@ func (m *MongoPump) Init(config interface{}) error { m.log.Fatal("Failed to decode configuration: ", err) } - //we check for the environment configuration if this pumps is not the uptime pump + // we check for the environment configuration if this pumps is not the uptime pump if !m.IsUptime { processPumpEnvVars(m, m.log, m.dbConf, mongoDefaultEnv) - //we keep this env check for backward compatibility + // we keep this env check for backward compatibility overrideErr := envconfig.Process(mongoPumpPrefix, m.dbConf) if overrideErr != nil { m.log.Error("Failed to process environment variables for mongo pump: ", overrideErr) } - } else if m.IsUptime && m.dbConf.MongoURL == "" { + } else if m.dbConf.MongoURL == "" { m.log.Debug("Trying to set uptime pump with PMP_MONGO env vars") - //we keep this env check for backward compatibility + // we keep this env check for backward compatibility overrideErr := envconfig.Process(mongoPumpPrefix, m.dbConf) if overrideErr != nil { m.log.Error("Failed to process environment variables for mongo pump: ", overrideErr) } + + m.dbConf.CollectionName = "tyk_uptime_analytics" } if m.dbConf.MaxInsertBatchSizeBytes == 0 { @@ -330,7 +220,7 @@ func (m *MongoPump) Init(config interface{}) error { m.capCollection() - indexCreateErr := m.ensureIndexes() + indexCreateErr := m.ensureIndexes(m.dbConf.CollectionName) if indexCreateErr != nil { m.log.Error(indexCreateErr) } @@ -344,10 +234,9 @@ func (m *MongoPump) Init(config interface{}) error { } func (m *MongoPump) capCollection() (ok bool) { - - var colName = m.dbConf.CollectionName - var colCapMaxSizeBytes = m.dbConf.CollectionCapMaxSizeBytes - var colCapEnable = m.dbConf.CollectionCapEnable + colName := m.dbConf.CollectionName + colCapMaxSizeBytes := m.dbConf.CollectionCapMaxSizeBytes + colCapEnable := m.dbConf.CollectionCapEnable if !colCapEnable { return false @@ -379,10 +268,11 @@ func (m *MongoPump) capCollection() (ok bool) { m.log.Infof("-- No max collection size set for %s, defaulting to %d", colName, colCapMaxSizeBytes) } - sess := m.dbSession.Copy() - defer sess.Close() + d := dbObject{ + tableName: colName, + } - err = m.dbSession.DB("").C(colName).Create(&mgo.CollectionInfo{Capped: true, MaxBytes: colCapMaxSizeBytes}) + err = m.store.Migrate(context.Background(), []id.DBObject{d}, dbm.DBM{"capped": true, "maxBytes": colCapMaxSizeBytes}) if err != nil { m.log.Errorf("Unable to create capped collection for (%s). %s", colName, err.Error()) @@ -396,109 +286,79 @@ func (m *MongoPump) capCollection() (ok bool) { // collectionExists checks to see if a collection name exists in the db. func (m *MongoPump) collectionExists(name string) (bool, error) { - sess := m.dbSession.Copy() - defer sess.Close() - - colNames, err := sess.DB("").CollectionNames() - if err != nil { - m.log.Error("Unable to get collection names: ", err) - - return false, err - } - - for _, coll := range colNames { - if coll == name { - return true, nil - } - } - - return false, nil + return m.store.HasTable(context.Background(), name) } -func (m *MongoPump) ensureIndexes() error { +func (m *MongoPump) ensureIndexes(collectionName string) error { if m.dbConf.OmitIndexCreation { m.log.Debug("omit_index_creation set to true, omitting index creation..") return nil } if m.dbConf.MongoDBType == StandardMongo { - exists, errExists := m.collectionExists(m.dbConf.CollectionName) + exists, errExists := m.collectionExists(collectionName) if errExists == nil && exists { - m.log.Info("Collection ", m.dbConf.CollectionName, " exists, omitting index creation..") + m.log.Info("Collection ", collectionName, " exists, omitting index creation..") return nil } } var err error - sess := m.dbSession.Copy() - defer sess.Close() - - c := sess.DB("").C(m.dbConf.CollectionName) - - orgIndex := mgo.Index{ - Key: []string{"orgid"}, + orgIndex := index.Index{ + Keys: []dbm.DBM{{"orgid": 1}}, Background: m.dbConf.MongoDBType == StandardMongo, } - err = c.EnsureIndex(orgIndex) + d := createDBObject(collectionName) + + err = m.store.CreateIndex(context.Background(), d, orgIndex) if err != nil { return err } - apiIndex := mgo.Index{ - Key: []string{"apiid"}, + apiIndex := index.Index{ + Keys: []dbm.DBM{{"apiid": 1}}, Background: m.dbConf.MongoDBType == StandardMongo, } - err = c.EnsureIndex(apiIndex) + err = m.store.CreateIndex(context.Background(), d, apiIndex) if err != nil { return err } - logBrowserIndex := mgo.Index{ + logBrowserIndex := index.Index{ Name: "logBrowserIndex", - Key: []string{"-timestamp", "orgid", "apiid", "apikey", "responsecode"}, + Keys: []dbm.DBM{{"timestamp": -1}, {"orgid": 1}, {"apiid": 1}, {"apikey": 1}, {"responsecode": 1}}, Background: m.dbConf.MongoDBType == StandardMongo, } - - err = c.EnsureIndex(logBrowserIndex) - if err != nil && !strings.Contains(err.Error(), "already exists with a different name") { - return err - } - - return nil + return m.store.CreateIndex(context.Background(), d, logBrowserIndex) } func (m *MongoPump) connect() { - var err error - var dialInfo *mgo.DialInfo - - dialInfo, err = mongoDialInfo(m.dbConf.BaseMongoConf) + if m.dbConf.MongoDriverType == "" { + m.dbConf.MongoDriverType = persistent.Mgo + } + + store, err := persistent.NewPersistentStorage(&persistent.ClientOpts{ + ConnectionString: m.dbConf.MongoURL, + UseSSL: m.dbConf.MongoUseSSL, + SSLInsecureSkipVerify: m.dbConf.MongoSSLInsecureSkipVerify, + SSLAllowInvalidHostnames: m.dbConf.MongoSSLAllowInvalidHostnames, + SSLCAFile: m.dbConf.MongoSSLCAFile, + SSLPEMKeyfile: m.dbConf.MongoSSLPEMKeyfile, + SessionConsistency: m.dbConf.MongoSessionConsistency, + ConnectionTimeout: m.timeout, + Type: m.dbConf.MongoDriverType, + }) if err != nil { - m.log.Panic("Mongo URL is invalid: ", err) - } - - if m.timeout > 0 { - dialInfo.Timeout = time.Second * time.Duration(m.timeout) - } - m.dbSession, err = mgo.DialWithInfo(dialInfo) - - for err != nil { - m.log.WithError(err).WithField("dialinfo", m.dbConf.BaseMongoConf.GetBlurredURL()).Error("Mongo connection failed. Retrying.") - time.Sleep(5 * time.Second) - m.dbSession, err = mgo.DialWithInfo(dialInfo) + m.log.Fatal("Failed to connect: ", err) } - if err == nil && m.dbConf.MongoDBType == 0 { - m.dbConf.MongoDBType = mongoType(m.dbSession) - } - - m.dbConf.SetMongoConsistency(m.dbSession) + m.store = store } func (m *MongoPump) WriteData(ctx context.Context, data []interface{}) error { - collectionName := m.dbConf.CollectionName if collectionName == "" { m.log.Fatal("No collection name!") @@ -506,32 +366,19 @@ func (m *MongoPump) WriteData(ctx context.Context, data []interface{}) error { m.log.Debug("Attempting to write ", len(data), " records...") - for m.dbSession == nil { - m.log.Debug("Connecting to analytics store") - m.connect() - } accumulateSet := m.AccumulateSet(data, false) errCh := make(chan error, len(accumulateSet)) for _, dataSet := range accumulateSet { - go func(dataSet []interface{}, errCh chan error) { - sess := m.dbSession.Copy() - defer sess.Close() - - analyticsCollection := sess.DB("").C(collectionName) - + go func(errCh chan error, dataSet ...id.DBObject) { m.log.WithFields(logrus.Fields{ "collection": collectionName, "number of records": len(dataSet), }).Debug("Attempt to purge records") - err := analyticsCollection.Insert(dataSet...) + err := m.store.Insert(context.Background(), dataSet...) if err != nil { m.log.WithFields(logrus.Fields{"collection": collectionName, "number of records": len(dataSet)}).Error("Problem inserting to mongo collection: ", err) - - if strings.Contains(strings.ToLower(err.Error()), "closed explicitly") { - m.log.Warning("--> Detected connection failure!") - } errCh <- err } errCh <- nil @@ -539,7 +386,7 @@ func (m *MongoPump) WriteData(ctx context.Context, data []interface{}) error { "collection": collectionName, "number of records": len(dataSet), }).Info("Completed purging the records") - }(dataSet, errCh) + }(errCh, dataSet...) } for range accumulateSet { @@ -555,85 +402,112 @@ func (m *MongoPump) WriteData(ctx context.Context, data []interface{}) error { return nil } -func (m *MongoPump) AccumulateSet(data []interface{}, isForGraphRecords bool) [][]interface{} { +// AccumulateSet groups data items into chunks based on the max batch size limit while handling graph analytics records separately. +// It returns a 2D array of DBObjects. +func (m *MongoPump) AccumulateSet(data []interface{}, isForGraphRecords bool) [][]id.DBObject { accumulatorTotal := 0 - returnArray := make([][]interface{}, 0) - thisResultSet := make([]interface{}, 0) + returnArray := make([][]id.DBObject, 0) + thisResultSet := make([]id.DBObject, 0) for i, item := range data { - thisItem := item.(analytics.AnalyticsRecord) - if thisItem.ResponseCode == -1 { + // Process the current item and determine if it should be skipped + thisItem, skip := m.processItem(item, isForGraphRecords) + if skip { continue } - // Skip this record if it is a graph analytics record, they will be handled in a different pump - isGraphRecord := thisItem.IsGraphRecord() - if isGraphRecord != isForGraphRecords { - continue - } + // If collection name is not set, we'll use the default one + thisItem.CollectionName = m.dbConf.CollectionName - // Add 1 KB for metadata as average - sizeBytes := len(thisItem.RawRequest) + len(thisItem.RawResponse) + 1024 + // Calculate the size of the current item + sizeBytes := m.getItemSizeBytes(thisItem) - m.log.Debug("Size is: ", sizeBytes) + // Handle large documents that exceed the max document size limit + m.handleLargeDocuments(thisItem, sizeBytes, isForGraphRecords) - if sizeBytes > m.dbConf.MaxDocumentSizeBytes && !isGraphRecord { - m.log.Warning("Document too large, not writing raw request and raw response!") + // Accumulate the item and update the accumulator total, result set, and return array + accumulatorTotal, thisResultSet, returnArray = m.accumulate(thisResultSet, returnArray, thisItem, sizeBytes, accumulatorTotal, i == (len(data)-1)) + } - thisItem.RawRequest = "" - thisItem.RawResponse = base64.StdEncoding.EncodeToString([]byte("Document too large, not writing raw request and raw response!")) - } + // Append the remaining result set to the return array if it's not empty + if len(thisResultSet) > 0 && len(returnArray) == 0 { + returnArray = append(returnArray, thisResultSet) + } + return returnArray +} - if (accumulatorTotal + sizeBytes) <= m.dbConf.MaxInsertBatchSizeBytes { - accumulatorTotal += sizeBytes - } else { - m.log.Debug("Created new chunk entry") - if len(thisResultSet) > 0 { - returnArray = append(returnArray, thisResultSet) - } +// processItem checks if the item should be processed based on its ResponseCode and if it's a graph record. +// It returns the processed item and a boolean indicating if the item should be skipped. +func (m *MongoPump) processItem(item interface{}, isForGraphRecords bool) (*analytics.AnalyticsRecord, bool) { + thisItem, ok := item.(analytics.AnalyticsRecord) + if !ok { + m.log.Error("Couldn't convert item to analytics.AnalyticsRecord") + return nil, true + } + if thisItem.ResponseCode == -1 { + return &thisItem, true + } - thisResultSet = make([]interface{}, 0) - accumulatorTotal = sizeBytes - } + isGraphRecord := thisItem.IsGraphRecord() + if isGraphRecord != isForGraphRecords { + return &thisItem, true + } - m.log.Debug("Accumulator is: ", accumulatorTotal) - thisResultSet = append(thisResultSet, thisItem) + return &thisItem, false +} + +// getItemSizeBytes calculates the size of the item in bytes, including an additional 1 KB for metadata. +func (m *MongoPump) getItemSizeBytes(thisItem *analytics.AnalyticsRecord) int { + // Add 1 KB for metadata as average + return len(thisItem.RawRequest) + len(thisItem.RawResponse) + 1024 +} + +// handleLargeDocuments checks if the item size exceeds the max document size limit and modifies the item if necessary. +func (m *MongoPump) handleLargeDocuments(thisItem *analytics.AnalyticsRecord, sizeBytes int, isGraphRecord bool) { + if sizeBytes > m.dbConf.MaxDocumentSizeBytes && !isGraphRecord { + m.log.Warning("Document too large, not writing raw request and raw response!") + + thisItem.RawRequest = "" + thisItem.RawResponse = base64.StdEncoding.EncodeToString([]byte("Document too large, not writing raw request and raw response!")) + } +} - m.log.Debug(accumulatorTotal, " of ", m.dbConf.MaxInsertBatchSizeBytes) - // Append the last element if the loop is about to end - if i == (len(data) - 1) { - m.log.Debug("Appending last entry") +// accumulate processes the given item and updates the accumulator total, result set, and return array. +// It manages chunking the data into separate sets based on the max batch size limit, and appends the last item when necessary. +func (m *MongoPump) accumulate(thisResultSet []id.DBObject, returnArray [][]id.DBObject, thisItem *analytics.AnalyticsRecord, sizeBytes, accumulatorTotal int, isLastItem bool) (int, []id.DBObject, [][]id.DBObject) { + if (accumulatorTotal + sizeBytes) <= m.dbConf.MaxInsertBatchSizeBytes { + accumulatorTotal += sizeBytes + } else { + m.log.Debug("Created new chunk entry") + if len(thisResultSet) > 0 { returnArray = append(returnArray, thisResultSet) } + + thisResultSet = make([]id.DBObject, 0) + accumulatorTotal = sizeBytes } - if len(thisResultSet) > 0 && len(returnArray) == 0 { + m.log.Debug("Accumulator is: ", accumulatorTotal) + thisResultSet = append(thisResultSet, thisItem) + + m.log.Debug(accumulatorTotal, " of ", m.dbConf.MaxInsertBatchSizeBytes) + if isLastItem { + m.log.Debug("Appending last entry") returnArray = append(returnArray, thisResultSet) } - return returnArray + + return accumulatorTotal, thisResultSet, returnArray } // WriteUptimeData will pull the data from the in-memory store and drop it into the specified MongoDB collection func (m *MongoPump) WriteUptimeData(data []interface{}) { - - for m.dbSession == nil { - m.log.Debug("Connecting to mongoDB store") - m.connect() - } - - collectionName := "tyk_uptime_analytics" - sess := m.dbSession.Copy() - defer sess.Close() - - analyticsCollection := sess.DB("").C(collectionName) - m.log.Debug("Uptime Data: ", len(data)) if len(data) == 0 { return } - keys := make([]interface{}, len(data)) + keys := make([]id.DBObject, len(data)) for i, v := range data { decoded := analytics.UptimeReportData{} @@ -644,21 +518,14 @@ func (m *MongoPump) WriteUptimeData(data []interface{}) { continue } - keys[i] = interface{}(decoded) + keys[i] = &decoded m.log.Debug("Decoded Record: ", decoded) } - m.log.Debug("Writing data to ", collectionName) - - if err := analyticsCollection.Insert(keys...); err != nil { + m.log.Debug("Writing data to ", m.dbConf.CollectionName) + if err := m.store.Insert(context.Background(), keys...); err != nil { m.log.Error("Problem inserting to mongo collection: ", err) - - if strings.Contains(err.Error(), "Closed explicitly") || strings.Contains(err.Error(), "EOF") { - m.log.Warning("--> Detected connection failure, reconnecting") - - m.connect() - } } } diff --git a/pumps/mongo_aggregate.go b/pumps/mongo_aggregate.go index a63eece4b..f900f5313 100644 --- a/pumps/mongo_aggregate.go +++ b/pumps/mongo_aggregate.go @@ -9,24 +9,28 @@ import ( "time" "github.com/kelseyhightower/envconfig" - "github.com/lonelycode/mgohacks" "github.com/mitchellh/mapstructure" - "gopkg.in/mgo.v2" - "gopkg.in/mgo.v2/bson" + "github.com/sirupsen/logrus" + "github.com/TykTechnologies/storage/persistent" + "github.com/TykTechnologies/storage/persistent/dbm" + "github.com/TykTechnologies/storage/persistent/index" "github.com/TykTechnologies/tyk-pump/analytics" - "github.com/sirupsen/logrus" ) -var mongoAggregatePumpPrefix = "PMP_MONGOAGG" -var mongoAggregateDefaultEnv = PUMPS_ENV_PREFIX + "_MONGOAGGREGATE" + PUMPS_ENV_META_PREFIX +var ( + mongoAggregatePumpPrefix = "PMP_MONGOAGG" + mongoAggregateDefaultEnv = PUMPS_ENV_PREFIX + "_MONGOAGGREGATE" + PUMPS_ENV_META_PREFIX +) -var THRESHOLD_LEN_TAG_LIST = 1000 -var COMMON_TAGS_COUNT = 5 +var ( + ThresholdLenTagList = 1000 + CommonTagsCount = 5 +) type MongoAggregatePump struct { - dbSession *mgo.Session - dbConf *MongoAggregateConf + store persistent.PersistentStorage + dbConf *MongoAggregateConf CommonPumpConfig } @@ -124,8 +128,8 @@ func (m *MongoAggregatePump) printAlert(doc analytics.AnalyticsRecordAggregate, // list 5 common tag prefix l := len(listOfCommonPrefix) - if l > COMMON_TAGS_COUNT { - l = COMMON_TAGS_COUNT + if l > CommonTagsCount { + l = CommonTagsCount } m.log.Warnf("WARNING: Found more than %v tag entries per document, which may cause performance issues with aggregate logs. List of most common tag-prefix: [%v]. You can ignore these tags using ignore_tag_prefix_list option", thresholdLenTagList, strings.Join(listOfCommonPrefix[:l], ", ")) @@ -168,14 +172,14 @@ func (m *MongoAggregatePump) Init(config interface{}) error { processPumpEnvVars(m, m.log, m.dbConf, mongoAggregateDefaultEnv) - //we keep this env check for backward compatibility + // we keep this env check for backward compatibility overrideErr := envconfig.Process(mongoAggregatePumpPrefix, m.dbConf) if overrideErr != nil { m.log.Error("Failed to process environment variables for mongo aggregate pump: ", overrideErr) } if m.dbConf.ThresholdLenTagList == 0 { - m.dbConf.ThresholdLenTagList = THRESHOLD_LEN_TAG_LIST + m.dbConf.ThresholdLenTagList = ThresholdLenTagList } m.SetAggregationTime() @@ -185,7 +189,7 @@ func (m *MongoAggregatePump) Init(config interface{}) error { m.log.Info(m.GetName() + " Initialized") // look for the last record timestamp stored in the collection - lastTimestampAgggregateRecord, err := getLastDocumentTimestamp(m.dbSession, analytics.AgggregateMixedCollectionName) + lastTimestampAgggregateRecord, err := m.getLastDocumentTimestamp() // we will set it to the lastDocumentTimestamp map to track the timestamp of different documents of different Mongo Aggregators if err != nil { @@ -199,92 +203,89 @@ func (m *MongoAggregatePump) Init(config interface{}) error { func (m *MongoAggregatePump) connect() { var err error - var dialInfo *mgo.DialInfo - dialInfo, err = mongoDialInfo(m.dbConf.BaseMongoConf) + if m.dbConf.MongoDriverType == "" { + m.dbConf.MongoDriverType = "mgo" + } + + m.store, err = persistent.NewPersistentStorage(&persistent.ClientOpts{ + ConnectionString: m.dbConf.MongoURL, + UseSSL: m.dbConf.MongoUseSSL, + SSLInsecureSkipVerify: m.dbConf.MongoSSLInsecureSkipVerify, + SSLAllowInvalidHostnames: m.dbConf.MongoSSLAllowInvalidHostnames, + SSLCAFile: m.dbConf.MongoSSLCAFile, + SSLPEMKeyfile: m.dbConf.MongoSSLPEMKeyfile, + SessionConsistency: m.dbConf.MongoSessionConsistency, + ConnectionTimeout: m.timeout, + Type: m.dbConf.MongoDriverType, + }) if err != nil { - m.log.Panic("Mongo URL is invalid: ", err) - } - - if m.timeout > 0 { - dialInfo.Timeout = time.Second * time.Duration(m.timeout) - } - - m.dbSession, err = mgo.DialWithInfo(dialInfo) - - for err != nil { - m.log.WithError(err).WithField("dialinfo", m.dbConf.BaseMongoConf.GetBlurredURL()).Error("Mongo connection failed. Retrying.") - time.Sleep(5 * time.Second) - m.dbSession, err = mgo.DialWithInfo(dialInfo) + m.log.Fatal("Failed to connect to mongo: ", err) } - - if err == nil && m.dbConf.MongoDBType == 0 { - m.dbConf.MongoDBType = mongoType(m.dbSession) - } - - m.dbConf.SetMongoConsistency(m.dbSession) } -func (m *MongoAggregatePump) ensureIndexes(c *mgo.Collection) error { +func (m *MongoAggregatePump) ensureIndexes(collectionName string) error { if m.dbConf.OmitIndexCreation { m.log.Debug("omit_index_creation set to true, omitting index creation..") return nil } - //We are going to check if the collection exists only when the DB Type is MongoDB. The mgo CollectionNames func leaks cursors on DocDB. + // We are going to check if the collection exists only when the DB Type is MongoDB. The mgo CollectionNames func leaks cursors on DocDB. if m.dbConf.MongoDBType == StandardMongo { - exists, errExists := m.collectionExists(c.Name) + exists, errExists := m.collectionExists(collectionName) if errExists == nil && exists { - m.log.Debug("Collection ", c.Name, " exists, omitting index creation") + m.log.Debug("Collection ", collectionName, " exists, omitting index creation") return nil } } - + d := dbObject{ + tableName: collectionName, + } var err error // CosmosDB does not support "expireAt" option if m.dbConf.MongoDBType != CosmosDB { - ttlIndex := mgo.Index{ - Key: []string{"expireAt"}, - ExpireAfter: 0, - Background: m.dbConf.MongoDBType == StandardMongo, + ttlIndex := index.Index{ + Keys: []dbm.DBM{{"expireAt": 1}}, + TTL: 0, + IsTTLIndex: true, + Background: m.dbConf.MongoDBType == StandardMongo, } - - err = mgohacks.EnsureTTLIndex(c, ttlIndex) + err = m.store.CreateIndex(context.Background(), d, ttlIndex) if err != nil { return err } } - apiIndex := mgo.Index{ - Key: []string{"timestamp"}, + apiIndex := index.Index{ + Keys: []dbm.DBM{{"timestamp": 1}}, Background: m.dbConf.MongoDBType == StandardMongo, } - err = c.EnsureIndex(apiIndex) + err = m.store.CreateIndex(context.Background(), d, apiIndex) if err != nil { return err } - orgIndex := mgo.Index{ - Key: []string{"orgid"}, + orgIndex := index.Index{ + Keys: []dbm.DBM{{"orgid": 1}}, Background: m.dbConf.MongoDBType == StandardMongo, } - - return c.EnsureIndex(orgIndex) + return m.store.CreateIndex(context.Background(), d, orgIndex) } func (m *MongoAggregatePump) WriteData(ctx context.Context, data []interface{}) error { m.log.Debug("Attempting to write ", len(data), " records") - if m.dbSession == nil { - m.log.Debug("Connecting to analytics store") - m.connect() - m.WriteData(ctx, data) - } else { - // calculate aggregates - analyticsPerOrg := analytics.AggregateData(data, m.dbConf.TrackAllPaths, m.dbConf.IgnoreTagPrefixList, m.dbConf.MongoURL, m.dbConf.AggregationTime) - // put aggregated data into MongoDB - for orgID, filteredData := range analyticsPerOrg { - err := m.DoAggregatedWriting(ctx, orgID, filteredData) + // calculate aggregates + analyticsPerOrg := analytics.AggregateData(data, m.dbConf.TrackAllPaths, m.dbConf.IgnoreTagPrefixList, m.dbConf.MongoURL, m.dbConf.AggregationTime) + // put aggregated data into MongoDB + writingAttempts := []bool{false} + if m.dbConf.UseMixedCollection { + writingAttempts = append(writingAttempts, true) + } + for orgID := range analyticsPerOrg { + filteredData := analyticsPerOrg[orgID] + for _, isMixedCollection := range writingAttempts { + err := m.DoAggregatedWriting(ctx, &filteredData, isMixedCollection) if err != nil { // checking if the error is related to the document size and AggregateSelfHealing is enabled if shouldSelfHeal := m.ShouldSelfHeal(err); shouldSelfHeal { @@ -297,73 +298,24 @@ func (m *MongoAggregatePump) WriteData(ctx context.Context, data []interface{}) } return err } - - m.log.Debug("Processed aggregated data for ", orgID) } + m.log.Debug("Processed aggregated data for ", orgID) } + m.log.Info("Purged ", len(data), " records...") return nil } -func (m *MongoAggregatePump) doMixedWrite(changeDoc analytics.AnalyticsRecordAggregate, query bson.M) { - thisSession := m.dbSession.Copy() - defer thisSession.Close() - analyticsCollection := thisSession.DB("").C(analytics.AgggregateMixedCollectionName) - m.ensureIndexes(analyticsCollection) - - avgChange := mgo.Change{ - Update: changeDoc, - ReturnNew: true, - Upsert: true, - } - - m.log.WithFields(logrus.Fields{ - "collection": analytics.AgggregateMixedCollectionName, - }).Debug("Attempt to upsert aggregated doc") - - final := analytics.AnalyticsRecordAggregate{} - _, avgErr := analyticsCollection.Find(query).Apply(avgChange, &final) - - if avgErr != nil { - m.log.WithFields(logrus.Fields{ - "collection": analytics.AgggregateMixedCollectionName, - }).Error("Mixed coll upsert failure: ", avgErr) - m.HandleWriteErr(avgErr) - } - m.log.WithFields(logrus.Fields{ - "collection": analytics.AgggregateMixedCollectionName, - }).Info("Completed upserting") -} - -func (m *MongoAggregatePump) HandleWriteErr(err error) error { - if err != nil { - m.log.Error("Problem inserting or updating to mongo collection: ", err) - if strings.Contains(err.Error(), "Closed explicitly") || strings.Contains(err.Error(), "EOF") { - m.log.Warning("--> Detected connection failure, reconnecting") - m.connect() - } - } - return err -} - -func (m *MongoAggregatePump) DoAggregatedWriting(ctx context.Context, orgID string, filteredData analytics.AnalyticsRecordAggregate) error { - collectionName, collErr := m.GetCollectionName(orgID) - if collErr != nil { - m.log.Info("No OrgID for AnalyticsRecord, skipping") - return nil - } - thisSession := m.dbSession.Copy() - defer thisSession.Close() - - analyticsCollection := thisSession.DB("").C(collectionName) - indexCreateErr := m.ensureIndexes(analyticsCollection) +func (m *MongoAggregatePump) DoAggregatedWriting(ctx context.Context, filteredData *analytics.AnalyticsRecordAggregate, mixed bool) error { + filteredData.Mixed = mixed + indexCreateErr := m.ensureIndexes(filteredData.TableName()) if indexCreateErr != nil { m.log.Error(indexCreateErr) } - query := bson.M{ + query := dbm.DBM{ "orgid": filteredData.OrgID, "timestamp": filteredData.TimeStamp, } @@ -373,69 +325,43 @@ func (m *MongoAggregatePump) DoAggregatedWriting(ctx context.Context, orgID stri } updateDoc := filteredData.AsChange() - - change := mgo.Change{ - Update: updateDoc, - ReturnNew: true, - Upsert: true, + doc := &analytics.AnalyticsRecordAggregate{ + OrgID: filteredData.OrgID, + Mixed: mixed, } - doc := analytics.AnalyticsRecordAggregate{} - _, err := analyticsCollection.Find(query).Apply(change, &doc) + m.log.WithFields(logrus.Fields{ + "collection": doc.TableName(), + }).Debug("Attempt to upsert aggregated doc") + + err := m.store.Upsert(ctx, doc, query, updateDoc) if err != nil { m.log.WithField("query", query).Error("UPSERT Failure: ", err) - return m.HandleWriteErr(err) + return err } // We have the new doc back, lets fix the averages avgUpdateDoc := doc.AsTimeUpdate() - avgChange := mgo.Change{ - Update: avgUpdateDoc, - ReturnNew: true, + + withTimeUpdate := analytics.AnalyticsRecordAggregate{ + OrgID: filteredData.OrgID, } - withTimeUpdate := analytics.AnalyticsRecordAggregate{} - _, avgErr := analyticsCollection.Find(query).Apply(avgChange, &withTimeUpdate) + err = m.store.Upsert(ctx, &withTimeUpdate, query, avgUpdateDoc) + if err != nil { + m.log.WithField("query", query).Error("AvgUpdate Failure: ", err) + return err + } if m.dbConf.ThresholdLenTagList != -1 && (len(withTimeUpdate.Tags) > m.dbConf.ThresholdLenTagList) { m.printAlert(withTimeUpdate, m.dbConf.ThresholdLenTagList) } - if avgErr != nil { - m.log.WithField("query", query).Error("AvgUpdate Failure: ", avgErr) - return m.HandleWriteErr(avgErr) - } - - if m.dbConf.UseMixedCollection { - thisData := analytics.AnalyticsRecordAggregate{} - err := analyticsCollection.Find(query).One(&thisData) - if err != nil { - m.log.WithField("query", query).Error("Couldn't find query doc:", err) - } else { - m.doMixedWrite(thisData, query) - } - } return nil } // collectionExists checks to see if a collection name exists in the db. func (m *MongoAggregatePump) collectionExists(name string) (bool, error) { - sess := m.dbSession.Copy() - defer sess.Close() - - colNames, err := sess.DB("").CollectionNames() - if err != nil { - m.log.Error("Unable to get collection names: ", err) - - return false, err - } - - for _, coll := range colNames { - if coll == name { - return true, nil - } - } - - return false, nil + return m.store.HasTable(context.Background(), name) } // WriteUptimeData will pull the data from the in-memory store and drop it into the specified MongoDB collection @@ -444,16 +370,20 @@ func (m *MongoAggregatePump) WriteUptimeData(data []interface{}) { } // getLastDocumentTimestamp will return the timestamp of the last document in the collection -func getLastDocumentTimestamp(session *mgo.Session, collectionName string) (time.Time, error) { - var doc bson.M - err := session.DB("").C(collectionName).Find(nil).Sort("-$natural").One(&doc) +func (m *MongoAggregatePump) getLastDocumentTimestamp() (time.Time, error) { + d := dbObject{ + tableName: analytics.AgggregateMixedCollectionName, + } + + var result dbm.DBM + err := m.store.Query(context.Background(), d, &result, dbm.DBM{"_sort": "-$natural", "_limit": 1}) if err != nil { return time.Time{}, err } - if ts, ok := doc["timestamp"].(time.Time); ok { + if ts, ok := result["timestamp"].(time.Time); ok { return ts, nil } - return time.Time{}, errors.New("timestamp of type: time.Time not found in bson map") + return time.Time{}, errors.New("timestamp of type: time.Time not found in query result") } // divideAggregationTime divides by two the analytics stored per minute setting diff --git a/pumps/mongo_aggregate_test.go b/pumps/mongo_aggregate_test.go index 57175be30..b79523929 100644 --- a/pumps/mongo_aggregate_test.go +++ b/pumps/mongo_aggregate_test.go @@ -7,14 +7,28 @@ import ( "testing" "time" + "github.com/TykTechnologies/storage/persistent/dbm" + "github.com/TykTechnologies/storage/persistent/id" "github.com/TykTechnologies/tyk-pump/analytics" "github.com/TykTechnologies/tyk-pump/analytics/demo" "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" - "gopkg.in/mgo.v2" - "gopkg.in/mgo.v2/bson" ) +type dummyObject struct { + tableName string +} + +func (dummyObject) GetObjectID() id.ObjectId { + return "" +} + +func (dummyObject) SetObjectID(id.ObjectId) {} + +func (d dummyObject) TableName() string { + return d.tableName +} + func TestDoAggregatedWritingWithIgnoredAggregations(t *testing.T) { cfgPump1 := make(map[string]interface{}) cfgPump1["mongo_url"] = "mongodb://localhost:27017/tyk_analytics" @@ -65,13 +79,9 @@ func TestDoAggregatedWritingWithIgnoredAggregations(t *testing.T) { } defer func() { - //we clean the db after we finish the test - //we use pmp1 session since it should be the same - sess := pmp1.dbSession.Copy() - defer sess.Close() - - if err := sess.DB("").DropDatabase(); err != nil { - panic(err) + err := pmp1.store.DropDatabase(context.Background()) + if err != nil { + t.Errorf("error dropping database: %v", err) } }() @@ -91,28 +101,24 @@ func TestDoAggregatedWritingWithIgnoredAggregations(t *testing.T) { for _, tc := range tcs { t.Run(tc.testName, func(t *testing.T) { - collectionName := "" + newDummyObject := dummyObject{} if tc.IsMixed { - collectionName = analytics.AgggregateMixedCollectionName + newDummyObject.tableName = analytics.AgggregateMixedCollectionName } else { var collErr error - collectionName, collErr = pmp1.GetCollectionName("123") + newDummyObject.tableName, collErr = pmp1.GetCollectionName("123") assert.Nil(t, collErr) } - thisSession := pmp1.dbSession.Copy() - defer thisSession.Close() - - analyticsCollection := thisSession.DB("").C(collectionName) - //we build the query using the timestamp as we do in aggregated analytics - query := bson.M{ + // we build the query using the timestamp as we do in aggregated analytics + query := dbm.DBM{ "orgid": "123", "timestamp": time.Date(timeNow.Year(), timeNow.Month(), timeNow.Day(), timeNow.Hour(), 0, 0, 0, timeNow.Location()), } res := analytics.AnalyticsRecordAggregate{} // fetch the results - errFind := analyticsCollection.Find(query).One(&res) + errFind := pmp1.store.Query(context.Background(), newDummyObject, &res, query) assert.Nil(t, errFind) // double check that the res is not nil @@ -191,12 +197,12 @@ func TestAggregationTime(t *testing.T) { defer func() { // we clean the db after we finish every test case - sess := pmp1.dbSession.Copy() - defer sess.Close() - - if err := sess.DB("").DropDatabase(); err != nil { - panic(err) - } + defer func() { + err := pmp1.store.DropDatabase(context.Background()) + if err != nil { + t.Fatal(err) + } + }() }() ctx := context.TODO() @@ -211,20 +217,15 @@ func TestAggregationTime(t *testing.T) { keys[0] = analytics.AnalyticsRecord{APIID: "api1", OrgID: "123", TimeStamp: timeNow, APIKey: "apikey1"} } - collectionName := analytics.AgggregateMixedCollectionName - - thisSession := pmp1.dbSession.Copy() - defer thisSession.Close() - - analyticsCollection := thisSession.DB("").C(collectionName) - - query := bson.M{ + query := dbm.DBM{ "orgid": "123", } results := []analytics.AnalyticsRecordAggregate{} // fetch the results - errFind := analyticsCollection.Find(query).All(&results) + errFind := pmp1.store.Query(context.Background(), &analytics.AnalyticsRecordAggregate{ + Mixed: true, + }, &results, query) assert.Nil(t, errFind) // double check that the res is not nil @@ -301,14 +302,13 @@ func TestMongoAggregatePump_SelfHealing(t *testing.T) { } defer func() { - // we clean the db after we finish the test - // we use pmp1 session since it should be the same - sess := pmp1.dbSession.Copy() - defer sess.Close() - - if err := sess.DB("").DropDatabase(); err != nil { - panic(err) - } + // we clean the db after we finish every test case + defer func() { + err := pmp1.store.DropDatabase(context.Background()) + if err != nil { + t.Fatal(err) + } + }() }() var count int @@ -327,7 +327,7 @@ func TestMongoAggregatePump_SelfHealing(t *testing.T) { assert.Equal(t, 1, pmp1.dbConf.AggregationTime) // checking lastDocumentTimestamp - ts, err := getLastDocumentTimestamp(pmp1.dbSession, "tyk_analytics_aggregates") + ts, err := pmp1.getLastDocumentTimestamp() assert.Nil(t, err) assert.NotNil(t, ts) break @@ -467,50 +467,6 @@ func TestMongoAggregatePump_ShouldSelfHeal(t *testing.T) { } } -func TestMongoAggregatePump_HandleWriteErr(t *testing.T) { - cfgPump1 := make(map[string]interface{}) - cfgPump1["mongo_url"] = "mongodb://localhost:27017/tyk_analytics" - cfgPump1["ignore_aggregations"] = []string{"apikeys"} - cfgPump1["use_mixed_collection"] = true - cfgPump1["store_analytics_per_minute"] = false - pmp1 := MongoAggregatePump{} - - errInit1 := pmp1.Init(cfgPump1) - if errInit1 != nil { - t.Error(errInit1) - return - } - - tests := []struct { - inputErr error - name string - wantErr bool - }{ - { - name: "nil error", - inputErr: nil, - wantErr: false, - }, - { - name: "random error", - inputErr: errors.New("random error"), - wantErr: true, - }, - { - name: "EOF error", - inputErr: errors.New("EOF"), - wantErr: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if err := pmp1.HandleWriteErr(tt.inputErr); (err != nil) != tt.wantErr { - t.Errorf("MongoAggregatePump.HandleWriteErr() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} - func TestMongoAggregatePump_StoreAnalyticsPerMinute(t *testing.T) { cfgPump1 := make(map[string]interface{}) cfgPump1["mongo_url"] = "mongodb://localhost:27017/tyk_analytics" @@ -528,53 +484,3 @@ func TestMongoAggregatePump_StoreAnalyticsPerMinute(t *testing.T) { // Checking if the aggregation time is set to 1. Doesn't matter if aggregation_time is equal to 45 or 1, the result should be always 1. assert.True(t, pmp1.dbConf.AggregationTime == 1) } - -func TestMongoAggregatePump_SessionConsistency(t *testing.T) { - cfgPump1 := make(map[string]interface{}) - cfgPump1["mongo_url"] = "mongodb://localhost:27017/tyk_analytics" - cfgPump1["ignore_aggregations"] = []string{"apikeys"} - cfgPump1["use_mixed_collection"] = true - cfgPump1["store_analytics_per_minute"] = false - - pmp1 := MongoAggregatePump{} - - tests := []struct { - testName string - sessionConsistency string - expectedSessionMode mgo.Mode - }{ - { - testName: "should set session mode to strong", - sessionConsistency: "strong", - expectedSessionMode: mgo.Strong, - }, - { - testName: "should set session mode to monotonic", - sessionConsistency: "monotonic", - expectedSessionMode: mgo.Monotonic, - }, - { - testName: "should set session mode to eventual", - sessionConsistency: "eventual", - expectedSessionMode: mgo.Eventual, - }, - { - testName: "should set session mode to strong by default", - sessionConsistency: "", - expectedSessionMode: mgo.Strong, - }, - } - - for _, test := range tests { - t.Run(test.testName, func(t *testing.T) { - cfgPump1["mongo_session_consistency"] = test.sessionConsistency - errInit1 := pmp1.Init(cfgPump1) - if errInit1 != nil { - t.Error(errInit1) - return - } - - assert.Equal(t, test.expectedSessionMode, pmp1.dbSession.Mode()) - }) - } -} diff --git a/pumps/mongo_selective.go b/pumps/mongo_selective.go index 1d2f3a5df..c933bf9b2 100644 --- a/pumps/mongo_selective.go +++ b/pumps/mongo_selective.go @@ -4,26 +4,30 @@ import ( "context" "errors" "strings" - "time" "github.com/kelseyhightower/envconfig" - "github.com/lonelycode/mgohacks" "github.com/mitchellh/mapstructure" - "gopkg.in/mgo.v2" + "gopkg.in/vmihailenco/msgpack.v2" + "github.com/TykTechnologies/storage/persistent" + "github.com/TykTechnologies/storage/persistent/dbm" + "github.com/TykTechnologies/storage/persistent/id" + "github.com/TykTechnologies/storage/persistent/index" "github.com/TykTechnologies/tyk-pump/analytics" ) type MongoSelectivePump struct { - dbSession *mgo.Session - dbConf *MongoSelectiveConf + store persistent.PersistentStorage + dbConf *MongoSelectiveConf CommonPumpConfig } -var mongoSelectivePrefix = "mongo-pump-selective" -var mongoSelectivePumpPrefix = "PMP_MONGOSEL" -var mongoSelectiveDefaultEnv = PUMPS_ENV_PREFIX + "_MONGOSELECTIVE" + PUMPS_ENV_META_PREFIX +var ( + mongoSelectivePrefix = "mongo-pump-selective" + mongoSelectivePumpPrefix = "PMP_MONGOSEL" + mongoSelectiveDefaultEnv = PUMPS_ENV_PREFIX + "_MONGOSELECTIVE" + PUMPS_ENV_META_PREFIX +) // @PumpConf MongoSelective type MongoSelectiveConf struct { @@ -72,7 +76,7 @@ func (m *MongoSelectivePump) Init(config interface{}) error { processPumpEnvVars(m, m.log, m.dbConf, mongoSelectiveDefaultEnv) - //we keep this env check for backward compatibility + // we keep this env check for backward compatibility overrideErr := envconfig.Process(mongoSelectivePumpPrefix, m.dbConf) if overrideErr != nil { m.log.Error("Failed to process environment variables for mongo selective pump: ", overrideErr) @@ -98,78 +102,79 @@ func (m *MongoSelectivePump) Init(config interface{}) error { func (m *MongoSelectivePump) connect() { var err error - var dialInfo *mgo.DialInfo - - dialInfo, err = mongoDialInfo(m.dbConf.BaseMongoConf) - if err != nil { - m.log.Panic("Mongo URL is invalid: ", err) - } - if m.timeout > 0 { - dialInfo.Timeout = time.Second * time.Duration(m.timeout) + if m.dbConf.MongoDriverType == "" { + // Default to mgo + m.dbConf.MongoDriverType = persistent.Mgo } - m.dbSession, err = mgo.DialWithInfo(dialInfo) - - for err != nil { - m.log.WithError(err).WithField("dialinfo", m.dbConf.BaseMongoConf.GetBlurredURL()).Error("Mongo connection failed. Retrying.") - time.Sleep(5 * time.Second) - m.dbSession, err = mgo.DialWithInfo(dialInfo) - } - - if err == nil && m.dbConf.MongoDBType == 0 { - m.dbConf.MongoDBType = mongoType(m.dbSession) + m.store, err = persistent.NewPersistentStorage(&persistent.ClientOpts{ + ConnectionString: m.dbConf.MongoURL, + UseSSL: m.dbConf.MongoUseSSL, + SSLInsecureSkipVerify: m.dbConf.MongoSSLInsecureSkipVerify, + SSLAllowInvalidHostnames: m.dbConf.MongoSSLAllowInvalidHostnames, + SSLCAFile: m.dbConf.MongoSSLCAFile, + SSLPEMKeyfile: m.dbConf.MongoSSLPEMKeyfile, + SessionConsistency: m.dbConf.MongoSessionConsistency, + ConnectionTimeout: m.timeout, + Type: m.dbConf.MongoDriverType, + }) + if err != nil { + m.log.Fatal("Failed to connect to mongo: ", err) } - - m.dbConf.SetMongoConsistency(m.dbSession) } -func (m *MongoSelectivePump) ensureIndexes(c *mgo.Collection) error { +func (m *MongoSelectivePump) ensureIndexes(collectionName string) error { if m.dbConf.OmitIndexCreation { m.log.Debug("omit_index_creation set to true, omitting index creation..") return nil } if m.dbConf.MongoDBType == StandardMongo { - exists, errExists := m.collectionExists(c.Name) + exists, errExists := m.collectionExists(collectionName) if errExists == nil && exists { - m.log.Debug("Collection ", c.Name, " exists, omitting index creation") + m.log.Debug("Collection ", collectionName, " exists, omitting index creation") return nil } } var err error - // CosmosDB does not support "expireAt" option - if m.dbConf.MongoDBType != CosmosDB { - ttlIndex := mgo.Index{ - Key: []string{"expireAt"}, - ExpireAfter: 0, - Background: m.dbConf.MongoDBType == StandardMongo, - } - - err = mgohacks.EnsureTTLIndex(c, ttlIndex) - if err != nil { - return err - } + d := dbObject{ + tableName: collectionName, } - apiIndex := mgo.Index{ - Key: []string{"apiid"}, + apiIndex := index.Index{ + Keys: []dbm.DBM{{"apiid": 1}}, Background: m.dbConf.MongoDBType == StandardMongo, } - err = c.EnsureIndex(apiIndex) + err = m.store.CreateIndex(context.Background(), d, apiIndex) if err != nil { return err } - logBrowserIndex := mgo.Index{ + // CosmosDB does not support "expireAt" option + if m.dbConf.MongoDBType != CosmosDB { + ttlIndex := index.Index{ + Keys: []dbm.DBM{{"expireAt": 1}}, + IsTTLIndex: true, + TTL: 0, + Background: m.dbConf.MongoDBType == StandardMongo, + } + + err = m.store.CreateIndex(context.Background(), d, ttlIndex) + if err != nil { + return err + } + } + + logBrowserIndex := index.Index{ Name: "logBrowserIndex", - Key: []string{"-timestamp", "apiid", "apikey", "responsecode"}, + Keys: []dbm.DBM{{"timestamp": -1}, {"apiid": 1}, {"apikey": 1}, {"responsecode": 1}}, Background: m.dbConf.MongoDBType == StandardMongo, } - err = c.EnsureIndex(logBrowserIndex) + err = m.store.CreateIndex(context.Background(), d, logBrowserIndex) if err != nil && !strings.Contains(err.Error(), "already exists with a different name") { return err } @@ -180,171 +185,166 @@ func (m *MongoSelectivePump) ensureIndexes(c *mgo.Collection) error { func (m *MongoSelectivePump) WriteData(ctx context.Context, data []interface{}) error { m.log.Debug("Attempting to write ", len(data), " records...") - if m.dbSession == nil { - m.log.Debug("Connecting to analytics store") - m.connect() - m.WriteData(ctx, data) - } else { - analyticsPerOrg := make(map[string][]interface{}) - - for _, v := range data { - orgID := v.(analytics.AnalyticsRecord).OrgID - collectionName, collErr := m.GetCollectionName(orgID) - skip := false - if collErr != nil { - m.log.Warning("No OrgID for AnalyticsRecord, skipping") - skip = true - } + analyticsPerOrg := make(map[string][]interface{}) - if !skip { - _, found := analyticsPerOrg[collectionName] - if !found { - analyticsPerOrg[collectionName] = []interface{}{v} - } else { - analyticsPerOrg[collectionName] = append(analyticsPerOrg[collectionName], v) - } + for _, v := range data { + orgID := v.(analytics.AnalyticsRecord).OrgID + collectionName, collErr := m.GetCollectionName(orgID) + skip := false + if collErr != nil { + m.log.Warning("No OrgID for AnalyticsRecord, skipping") + skip = true + } + + if !skip { + _, found := analyticsPerOrg[collectionName] + if !found { + analyticsPerOrg[collectionName] = []interface{}{v} + } else { + analyticsPerOrg[collectionName] = append(analyticsPerOrg[collectionName], v) } } + } - for col_name, filtered_data := range analyticsPerOrg { - - for _, dataSet := range m.AccumulateSet(filtered_data) { - thisSession := m.dbSession.Copy() - defer thisSession.Close() - analyticsCollection := thisSession.DB("").C(col_name) - - indexCreateErr := m.ensureIndexes(analyticsCollection) - if indexCreateErr != nil { - m.log.WithField("collection", col_name).Error(indexCreateErr) - } - - err := analyticsCollection.Insert(dataSet...) - if err != nil { - m.log.WithField("collection", col_name).Error("Problem inserting to mongo collection: ", err) - if strings.Contains(strings.ToLower(err.Error()), "closed explicitly") { - m.log.Warning("--> Detected connection failure, reconnecting") - m.connect() - } - } + for colName, filteredData := range analyticsPerOrg { + for _, dataSet := range m.AccumulateSet(filteredData) { + indexCreateErr := m.ensureIndexes(colName) + if indexCreateErr != nil { + m.log.WithField("collection", colName).Error(indexCreateErr) } + err := m.store.Insert(context.Background(), dataSet...) + if err != nil { + m.log.WithField("collection", colName).Error("Problem inserting to mongo collection: ", err) + } } - } + m.log.Info("Purged ", len(data), " records...") return nil } -func (m *MongoSelectivePump) AccumulateSet(data []interface{}) [][]interface{} { +// AccumulateSet organizes analytics data into a set of chunks based on their size. +func (m *MongoSelectivePump) AccumulateSet(data []interface{}) [][]id.DBObject { accumulatorTotal := 0 - returnArray := make([][]interface{}, 0) + returnArray := make([][]id.DBObject, 0) + thisResultSet := make([]id.DBObject, 0) - thisResultSet := make([]interface{}, 0) + // Process each item in the data array. for i, item := range data { - thisItem := item.(analytics.AnalyticsRecord) - if thisItem.ResponseCode == -1 { + thisItem, skip := m.processItem(item) + if skip { continue } - // Add 1 KB for metadata as average - sizeBytes := len([]byte(thisItem.RawRequest)) + len([]byte(thisItem.RawResponse)) + 1024 - skip := false - if sizeBytes > m.dbConf.MaxDocumentSizeBytes { - m.log.Warning("Document too large, skipping!") - skip = true - } + sizeBytes := m.getItemSizeBytes(thisItem) + accumulatorTotal, thisResultSet, returnArray = m.accumulate(thisResultSet, returnArray, thisItem, sizeBytes, accumulatorTotal, i == (len(data)-1)) + } - m.log.Debug("Size is: ", sizeBytes) + return returnArray +} - if !skip { - if (accumulatorTotal + sizeBytes) < m.dbConf.MaxInsertBatchSizeBytes { - accumulatorTotal += sizeBytes - } else { - m.log.Debug("Created new chunk entry") - if len(thisResultSet) > 0 { - returnArray = append(returnArray, thisResultSet) - } +// processItem checks if the item should be skipped or processed. +func (m *MongoSelectivePump) processItem(item interface{}) (*analytics.AnalyticsRecord, bool) { + thisItem, ok := item.(analytics.AnalyticsRecord) + if !ok { + m.log.Warning("Couldn't convert item to analytics.AnalyticsRecord, skipping") + return &thisItem, true + } - thisResultSet = make([]interface{}, 0) - accumulatorTotal = sizeBytes - } - thisResultSet = append(thisResultSet, thisItem) + // Skip item if the response code is -1. + if thisItem.ResponseCode == -1 { + return &thisItem, true + } - m.log.Debug(accumulatorTotal, " of ", m.dbConf.MaxInsertBatchSizeBytes) - // Append the last element if the loop is about to end - if i == (len(data) - 1) { - m.log.Debug("Appending last entry") - returnArray = append(returnArray, thisResultSet) - } - } + return &thisItem, false +} + +// getItemSizeBytes calculates the size of the analytics item in bytes and checks if it's within the allowed limit. +func (m *MongoSelectivePump) getItemSizeBytes(thisItem *analytics.AnalyticsRecord) int { + // Add 1 KB for metadata as average. + sizeBytes := len([]byte(thisItem.RawRequest)) + len([]byte(thisItem.RawResponse)) + 1024 + // Skip item if its size exceeds the maximum allowed document size. + if sizeBytes > m.dbConf.MaxDocumentSizeBytes { + m.log.Warning("Document too large, skipping!") + return -1 } - return returnArray + m.log.Debug("Size is:", sizeBytes) + return sizeBytes } -// WriteUptimeData will pull the data from the in-memory store and drop it into the specified MongoDB collection -func (m *MongoSelectivePump) WriteUptimeData(data []interface{}) { - if m.dbSession == nil { - m.log.Debug("Connecting to mongoDB store") - m.connect() - m.WriteUptimeData(data) +// accumulate processes the given item and updates the accumulator total, result set, and return array. +// It manages chunking the data into separate sets based on the max batch size limit, and appends the last item when necessary. +func (m *MongoSelectivePump) accumulate(thisResultSet []id.DBObject, returnArray [][]id.DBObject, thisItem *analytics.AnalyticsRecord, sizeBytes, accumulatorTotal int, isLastItem bool) (int, []id.DBObject, [][]id.DBObject) { + // If the item size is invalid (negative), return the current state + if sizeBytes < 0 { + return accumulatorTotal, thisResultSet, returnArray + } + + // If the current accumulator total plus the item size is within the max batch size limit, + // add the item size to the accumulator total + if (accumulatorTotal + sizeBytes) < m.dbConf.MaxInsertBatchSizeBytes { + accumulatorTotal += sizeBytes } else { - m.log.Info("MONGO Selective Should not be writing uptime data!") - collectionName := "tyk_uptime_analytics" - thisSession := m.dbSession.Copy() - defer thisSession.Close() - analyticsCollection := thisSession.DB("").C(collectionName) - - m.log.Debug("Uptime Data: ", len(data)) - - if len(data) > 0 { - keys := make([]interface{}, len(data)) - - for i, v := range data { - decoded := analytics.UptimeReportData{} - // ToDo: should this work with serializer? - err := msgpack.Unmarshal(v.([]byte), &decoded) - m.log.Debug("Decoded Record: ", decoded) - if err != nil { - m.log.Error("Couldn't unmarshal analytics data:", err) - } else { - keys[i] = interface{}(decoded) - } - } + // If the item size exceeds the max batch size limit, + // create a new chunk entry and reset the accumulator total and result set + m.log.Debug("Created new chunk entry") + if len(thisResultSet) > 0 { + returnArray = append(returnArray, thisResultSet) + } - err := analyticsCollection.Insert(keys...) - m.log.Debug("Wrote data to ", collectionName) + thisResultSet = make([]id.DBObject, 0) + accumulatorTotal = sizeBytes + } - if err != nil { - m.log.WithField("collection", collectionName).Error("Problem inserting to mongo collection: ", err) - if strings.Contains(err.Error(), "Closed explicitly") || strings.Contains(err.Error(), "EOF") { - m.log.Warning("--> Detected connection failure, reconnecting") - m.connect() - } - } - } + thisResultSet = append(thisResultSet, thisItem) + + m.log.Debug(accumulatorTotal, " of ", m.dbConf.MaxInsertBatchSizeBytes) + + if isLastItem { + m.log.Debug("Appending last entry") + returnArray = append(returnArray, thisResultSet) } -} -// collectionExists checks to see if a collection name exists in the db. -func (m *MongoSelectivePump) collectionExists(name string) (bool, error) { - sess := m.dbSession.Copy() - defer sess.Close() + return accumulatorTotal, thisResultSet, returnArray +} - colNames, err := sess.DB("").CollectionNames() - if err != nil { - m.log.Error("Unable to get collection names: ", err) +// WriteUptimeData will pull the data from the in-memory store and drop it into the specified MongoDB collection +func (m *MongoSelectivePump) WriteUptimeData(data []interface{}) { + m.log.Info("MONGO Selective Should not be writing uptime data!") + m.log.Debug("Uptime Data: ", len(data)) - return false, err + if len(data) == 0 { + return } - for _, coll := range colNames { - if coll == name { - return true, nil + keys := make([]id.DBObject, len(data)) + + for i, v := range data { + decoded := analytics.UptimeReportData{} + + if err := msgpack.Unmarshal([]byte(v.(string)), &decoded); err != nil { + // ToDo: should this work with serializer? + m.log.Error("Couldn't unmarshal analytics data:", err) + continue } + + keys[i] = &decoded + + m.log.Debug("Decoded Record: ", decoded) + } + + m.log.Debug("Writing data to ", analytics.UptimeSQLTable) + + if err := m.store.Insert(context.Background(), keys...); err != nil { + m.log.Error("Problem inserting to mongo collection: ", err) } +} - return false, nil +// collectionExists checks to see if a collection name exists in the db. +func (m *MongoSelectivePump) collectionExists(name string) (bool, error) { + return m.store.HasTable(context.Background(), name) } diff --git a/pumps/mongo_selective_test.go b/pumps/mongo_selective_test.go index a86bc2d18..1093a6429 100644 --- a/pumps/mongo_selective_test.go +++ b/pumps/mongo_selective_test.go @@ -1,11 +1,16 @@ package pumps import ( + "context" + "fmt" "testing" + "time" + "github.com/TykTechnologies/storage/persistent/dbm" + "github.com/TykTechnologies/storage/persistent/id" "github.com/TykTechnologies/tyk-pump/analytics" "github.com/stretchr/testify/assert" - "gopkg.in/mgo.v2" + "gopkg.in/vmihailenco/msgpack.v2" ) func TestMongoSelectivePump_AccumulateSet(t *testing.T) { @@ -98,43 +103,258 @@ func TestMongoSelectivePump_AccumulateSet(t *testing.T) { )) } -func TestMongoSelectivePump_SessionConsistency(t *testing.T) { +func TestConnection(t *testing.T) { mPump := MongoSelectivePump{} conf := defaultSelectiveConf() mPump.dbConf = &conf + // Checking if the connection is nil before connecting + assert.Nil(t, mPump.store) + mPump.log = log.WithField("prefix", mongoPrefix) + + t.Run("should connect to mgo", func(t *testing.T) { + // If connect fails, it will stop the execution with a fatal error + mPump.connect() + // Checking if the connection is not nil after connecting + assert.NotNil(t, mPump.store) + // Checking if the connection is alive + assert.Nil(t, mPump.store.Ping(context.Background())) + }) +} + +func TestEnsureIndexes(t *testing.T) { + mPump := MongoSelectivePump{} + conf := defaultSelectiveConf() + mPump.dbConf = &conf + mPump.log = log.WithField("prefix", mongoPrefix) + mPump.connect() + + // _id, apiid_1, expireAt_1, logBrowserIndex are the current indexes + numberOfCreatedIndexes := 4 + + t.Run("should ensure indexes", func(t *testing.T) { + defer func() { + assert.NoError(t, mPump.store.DropDatabase(context.Background())) + }() + collectionName := "index_test" + obj := dbObject{ + tableName: collectionName, + } + + err := mPump.ensureIndexes(collectionName) + assert.NoError(t, err) + + // Checking if the indexes are created + indexes, err := mPump.store.GetIndexes(context.Background(), obj) + assert.NoError(t, err) + assert.NotNil(t, indexes) + + // Checking if the indexes are created with the correct name + fmt.Printf("indexes: %#v\n", indexes) + assert.Len(t, indexes, numberOfCreatedIndexes) + assert.Equal(t, "_id_", indexes[0].Name) + assert.Equal(t, "apiid_1", indexes[1].Name) + assert.Equal(t, "expireAt_1", indexes[2].Name) + assert.Equal(t, "logBrowserIndex", indexes[3].Name) + + // Checking if the indexes are created with the correct keys + assert.Len(t, indexes[0].Keys, 1) + assert.Len(t, indexes[1].Keys, 1) + assert.Len(t, indexes[2].Keys, 1) + assert.Len(t, indexes[3].Keys, 4) // 4 keys because of the compound index: timestamp, apiid, apikey, responsecode + }) + t.Run("should ensure one less index using CosmosDB", func(t *testing.T) { + defer func() { + mPump.dbConf.MongoDBType = StandardMongo + assert.NoError(t, mPump.store.DropDatabase(context.Background())) + }() + mPump.dbConf.MongoDBType = CosmosDB + collectionName := "index_test_cosmosdb" + obj := dbObject{ + tableName: collectionName, + } + + err := mPump.ensureIndexes(obj.TableName()) + assert.NoError(t, err) + + // Checking if the indexes are created + indexes, err := mPump.store.GetIndexes(context.Background(), obj) + assert.NoError(t, err) + assert.NotNil(t, indexes) + + // Checking if the indexes are created with the correct name + assert.Len(t, indexes, numberOfCreatedIndexes-1) + assert.Equal(t, "_id_", indexes[0].Name) + assert.Equal(t, "apiid_1", indexes[1].Name) + assert.Equal(t, "logBrowserIndex", indexes[2].Name) + + // Checking if the indexes are created with the correct keys + assert.Len(t, indexes[0].Keys, 1) + assert.Len(t, indexes[1].Keys, 1) + assert.Len(t, indexes[2].Keys, 4) // 4 keys because of the compound index: timestamp, apiid, apikey, responsecode + }) + + t.Run("should not ensure indexes because of omit index creation setting", func(t *testing.T) { + defer func() { + conf.OmitIndexCreation = false + assert.NoError(t, mPump.store.DropDatabase(context.Background())) + }() + collectionName := "index_test" + obj := dbObject{ + tableName: collectionName, + } + conf.OmitIndexCreation = true + + err := mPump.ensureIndexes(collectionName) + assert.NoError(t, err) + + // Since the indexes were not created, the collection does not exist, and an error is expected + indexes, err := mPump.store.GetIndexes(context.Background(), obj) + assert.Error(t, err) + assert.Nil(t, indexes) + }) + + t.Run("should not ensure indexes because the collection already exists", func(t *testing.T) { + defer func() { + assert.NoError(t, mPump.store.DropDatabase(context.Background())) + }() + collectionName := "index_test" + obj := dbObject{ + tableName: collectionName, + } + // Creating the collection + err := mPump.store.Migrate(context.Background(), []id.DBObject{obj}) + assert.NoError(t, err) + + // Creating the indexes + err = mPump.ensureIndexes(collectionName) + assert.NoError(t, err) + + // Checking if the indexes are created + indexes, err := mPump.store.GetIndexes(context.Background(), obj) + assert.NoError(t, err) + assert.NotNil(t, indexes) + + // Checking if the default _id index is created + assert.Len(t, indexes, 1) + assert.Equal(t, "_id_", indexes[0].Name) + }) +} + +func TestWriteData(t *testing.T) { + mPump := MongoSelectivePump{} + conf := defaultSelectiveConf() + mPump.dbConf = &conf + mPump.log = log.WithField("prefix", mongoPrefix) + mPump.connect() + defer func() { + assert.NoError(t, mPump.store.DropDatabase(context.Background())) + }() + + t.Run("should write 3 records", func(t *testing.T) { + defer func() { + assert.NoError(t, mPump.store.DropDatabase(context.Background())) + }() + data := []interface{}{ + analytics.AnalyticsRecord{ + APIID: "123", + OrgID: "abc", + }, + analytics.AnalyticsRecord{ + APIID: "456", + OrgID: "abc", + }, + analytics.AnalyticsRecord{ + APIID: "789", + OrgID: "abc", + }, + } + err := mPump.WriteData(context.Background(), data) + assert.NoError(t, err) + + var results []analytics.AnalyticsRecord + err = mPump.store.Query(context.Background(), &analytics.AnalyticsRecord{}, &results, nil) + assert.NoError(t, err) + assert.Len(t, results, 3) + assert.Equal(t, "123", results[0].APIID) + assert.Equal(t, "456", results[1].APIID) + assert.Equal(t, "789", results[2].APIID) + }) + + t.Run("should not write data because the collection does not exist", func(t *testing.T) { + defer func() { + assert.NoError(t, mPump.store.DropDatabase(context.Background())) + }() + // data with empty orgID + data := []interface{}{ + analytics.AnalyticsRecord{ + APIID: "123", + }, + } + err := mPump.WriteData(context.Background(), data) + assert.NoError(t, err) + + var results []analytics.AnalyticsRecord + err = mPump.store.Query(context.Background(), &analytics.AnalyticsRecord{}, &results, nil) + assert.NoError(t, err) + + // No data should be written + assert.Len(t, results, 0) + }) +} + +func TestWriteUptimeDataMongoSelective(t *testing.T) { + now := time.Now() tests := []struct { - testName string - sessionConsistency string - expectedSessionMode mgo.Mode + name string + Record interface{} + RecordsAmountToWrite int }{ { - testName: "should set session mode to strong", - sessionConsistency: "strong", - expectedSessionMode: mgo.Strong, - }, - { - testName: "should set session mode to monotonic", - sessionConsistency: "monotonic", - expectedSessionMode: mgo.Monotonic, + name: "write 3 uptime records", + Record: &analytics.UptimeReportData{OrgID: "1", URL: "url1", TimeStamp: now}, + RecordsAmountToWrite: 3, }, { - testName: "should set session mode to eventual", - sessionConsistency: "eventual", - expectedSessionMode: mgo.Eventual, + name: "write 6 uptime records", + Record: &analytics.UptimeReportData{OrgID: "1", URL: "url1", TimeStamp: now}, + RecordsAmountToWrite: 6, }, { - testName: "should set session mode to strong by default", - sessionConsistency: "", - expectedSessionMode: mgo.Strong, + name: "length of records is 0", + Record: &analytics.UptimeReportData{}, + RecordsAmountToWrite: 0, }, } for _, test := range tests { - t.Run(test.testName, func(t *testing.T) { - mPump.dbConf.MongoSessionConsistency = test.sessionConsistency - mPump.connect() - assert.Equal(t, test.expectedSessionMode, mPump.dbSession.Mode()) + t.Run(test.name, func(t *testing.T) { + newPump := &MongoSelectivePump{} + conf := defaultConf() + err := newPump.Init(conf) + assert.Nil(t, err) + + keys := []interface{}{} + for i := 0; i < test.RecordsAmountToWrite; i++ { + encoded, err := msgpack.Marshal(test.Record) + assert.Nil(t, err) + keys = append(keys, string(encoded)) + } + + newPump.WriteUptimeData(keys) + + defer func() { + // clean up the table + err := newPump.store.DropDatabase(context.Background()) + assert.Nil(t, err) + }() + + dbRecords := []analytics.UptimeReportData{} + err = newPump.store.Query(context.Background(), &analytics.UptimeReportData{}, &dbRecords, dbm.DBM{}) + assert.NoError(t, err) + + // check amount of rows in the table + assert.Equal(t, test.RecordsAmountToWrite, len(dbRecords)) }) } } diff --git a/pumps/mongo_test.go b/pumps/mongo_test.go index be60ab197..42a8499be 100644 --- a/pumps/mongo_test.go +++ b/pumps/mongo_test.go @@ -5,10 +5,13 @@ import ( "encoding/base64" "strconv" "testing" + "time" "github.com/stretchr/testify/assert" - "gopkg.in/mgo.v2" + "gopkg.in/vmihailenco/msgpack.v2" + "github.com/TykTechnologies/storage/persistent/dbm" + "github.com/TykTechnologies/storage/persistent/id" "github.com/TykTechnologies/tyk-pump/analytics" ) @@ -17,11 +20,6 @@ func newPump() Pump { } func TestMongoPump_capCollection_Enabled(t *testing.T) { - - c := Conn{} - c.ConnectDb() - defer c.CleanDb() - pump := newPump() conf := defaultConf() @@ -38,21 +36,19 @@ func TestMongoPump_capCollection_Enabled(t *testing.T) { } func TestMongoPumpOmitIndexCreation(t *testing.T) { - - c := Conn{} - c.ConnectDb() - defer c.CleanDb() - pump := newPump() conf := defaultConf() mPump := pump.(*MongoPump) + mPump.dbConf = &conf record := analytics.AnalyticsRecord{ OrgID: "test-org", APIID: "test-api", } records := []interface{}{record, record} + dbObject := createDBObject(conf.CollectionName) + mPump.connect() tcs := []struct { testName string @@ -132,18 +128,38 @@ func TestMongoPumpOmitIndexCreation(t *testing.T) { mPump.dbConf.MongoDBType = tc.dbType mPump.log = log.WithField("prefix", mongoPrefix) mPump.connect() - defer c.CleanIndexes() + defer func() { + err := mPump.store.CleanIndexes(context.Background(), dbObject) + if err != nil { + t.Fatal(err) + } + }() + // Drop collection if it exists if tc.shouldDropCollection { - c.CleanDb() + if HasTable(t, mPump, dbObject) { + err := mPump.store.Drop(context.Background(), dbObject) + if err != nil { + t.Error("there shouldn't be an error dropping database", err) + } + } + } else { + // Create collection if it doesn't exist + CreateCollectionIfNeeded(t, mPump, dbObject) } - if err := mPump.ensureIndexes(); err != nil { + if err := mPump.ensureIndexes(dbObject.TableName()); err != nil { t.Error("there shouldn't be an error ensuring indexes", err) } - mPump.WriteData(context.Background(), records) - indexes, errIndexes := c.GetIndexes() + err := mPump.WriteData(context.Background(), records) + if err != nil { + t.Error("there shouldn't be an error writing data", err) + } + // Before getting indexes, we must ensure that the collection exists to avoid an unexpected error + CreateCollectionIfNeeded(t, mPump, dbObject) + + indexes, errIndexes := mPump.store.GetIndexes(context.Background(), dbObject) if errIndexes != nil { t.Error("error getting indexes:", errIndexes) } @@ -155,8 +171,27 @@ func TestMongoPumpOmitIndexCreation(t *testing.T) { } } -func TestMongoPump_capCollection_Exists(t *testing.T) { +func CreateCollectionIfNeeded(t *testing.T, mPump *MongoPump, dbObject id.DBObject) { + t.Helper() + if !HasTable(t, mPump, dbObject) { + err := mPump.store.Migrate(context.Background(), []id.DBObject{dbObject}) + if err != nil { + t.Error("there shouldn't be an error migrating database", err) + } + } +} +func HasTable(t *testing.T, mPump *MongoPump, dbObject id.DBObject) bool { + t.Helper() + hasTable, err := mPump.store.HasTable(context.Background(), dbObject.TableName()) + if err != nil { + t.Error("there shouldn't be an error checking if table exists", err) + } + + return hasTable +} + +func TestMongoPump_capCollection_Exists(t *testing.T) { c := Conn{} c.ConnectDb() defer c.CleanDb() @@ -180,7 +215,6 @@ func TestMongoPump_capCollection_Exists(t *testing.T) { } func TestMongoPump_capCollection_Not64arch(t *testing.T) { - c := Conn{} c.ConnectDb() defer c.CleanDb() @@ -206,7 +240,6 @@ func TestMongoPump_capCollection_Not64arch(t *testing.T) { } func TestMongoPump_capCollection_SensibleDefaultSize(t *testing.T) { - if strconv.IntSize < 64 { t.Skip("skipping as < 64bit arch") } @@ -240,7 +273,6 @@ func TestMongoPump_capCollection_SensibleDefaultSize(t *testing.T) { } func TestMongoPump_capCollection_OverrideSize(t *testing.T) { - if strconv.IntSize < 64 { t.Skip("skipping as < 64bit arch") } @@ -366,7 +398,7 @@ func TestMongoPump_AccumulateSetIgnoreDocSize(t *testing.T) { accumulated := mPump.AccumulateSet(dataSet, true) for _, x := range accumulated { for _, y := range x { - rec, ok := y.(analytics.AnalyticsRecord) + rec, ok := y.(*analytics.AnalyticsRecord) assert.True(t, ok) if rec.IsGraphRecord() { assert.NotEmpty(t, rec.RawRequest) @@ -443,46 +475,65 @@ func TestGetBlurredURL(t *testing.T) { } } -func TestMongoPump_SessionConsistency(t *testing.T) { - pump := newPump() - conf := defaultConf() - - mPump, ok := pump.(*MongoPump) - assert.True(t, ok) - mPump.dbConf = &conf +func TestWriteUptimeData(t *testing.T) { + now := time.Now() tests := []struct { - testName string - sessionConsistency string - expectedSessionMode mgo.Mode + name string + Record *analytics.UptimeReportData + RecordsAmountToWrite int }{ { - testName: "should set session mode to strong", - sessionConsistency: "strong", - expectedSessionMode: mgo.Strong, + name: "write 3 uptime records", + Record: &analytics.UptimeReportData{OrgID: "1", URL: "url1", TimeStamp: now}, + RecordsAmountToWrite: 3, }, { - testName: "should set session mode to monotonic", - sessionConsistency: "monotonic", - expectedSessionMode: mgo.Monotonic, + name: "write 6 uptime records", + Record: &analytics.UptimeReportData{OrgID: "1", URL: "url1", TimeStamp: now}, + RecordsAmountToWrite: 6, }, { - testName: "should set session mode to eventual", - sessionConsistency: "eventual", - expectedSessionMode: mgo.Eventual, - }, - { - testName: "should set session mode to strong by default", - sessionConsistency: "", - expectedSessionMode: mgo.Strong, + name: "length of records is 0", + Record: &analytics.UptimeReportData{}, + RecordsAmountToWrite: 0, }, } for _, test := range tests { - t.Run(test.testName, func(t *testing.T) { - mPump.dbConf.MongoSessionConsistency = test.sessionConsistency - mPump.connect() - assert.Equal(t, test.expectedSessionMode, mPump.dbSession.Mode()) + t.Run(test.name, func(t *testing.T) { + newPump := &MongoPump{IsUptime: true} + conf := defaultConf() + err := newPump.Init(conf) + assert.Nil(t, err) + + keys := []interface{}{} + for i := 0; i < test.RecordsAmountToWrite; i++ { + encoded, err := msgpack.Marshal(test.Record) + assert.Nil(t, err) + keys = append(keys, string(encoded)) + } + + newPump.WriteUptimeData(keys) + + defer func() { + // clean up the table + err := newPump.store.DropDatabase(context.Background()) + assert.Nil(t, err) + }() + + // check if the table exists + hasTable, err := newPump.store.HasTable(context.Background(), newPump.dbConf.CollectionName) + assert.Nil(t, err) + assert.Equal(t, true, hasTable) + + dbRecords := []analytics.UptimeReportData{} + if err := newPump.store.Query(context.Background(), &analytics.UptimeReportData{}, &dbRecords, dbm.DBM{}); err != nil { + t.Fatal("Error getting analytics records from Mongo") + } + + // check amount of rows in the table + assert.Equal(t, test.RecordsAmountToWrite, len(dbRecords)) }) } } diff --git a/serializer/golanglint.xml b/serializer/golanglint.xml new file mode 100644 index 000000000..acd65569b --- /dev/null +++ b/serializer/golanglint.xml @@ -0,0 +1,4 @@ + + + + diff --git a/serializer/serializer_test.go b/serializer/serializer_test.go index 7eb6a600c..5af8b72c8 100644 --- a/serializer/serializer_test.go +++ b/serializer/serializer_test.go @@ -8,6 +8,7 @@ import ( "github.com/TykTechnologies/tyk-pump/analytics" "github.com/TykTechnologies/tyk-pump/analytics/demo" "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" "github.com/stretchr/testify/assert" ) @@ -74,7 +75,7 @@ func TestSerializer_Decode(t *testing.T) { t.Fatal(err) } - recordsAreEqual := cmp.Equal(record, *newRecord) + recordsAreEqual := cmp.Equal(record, *newRecord, cmpopts.IgnoreUnexported(analytics.AnalyticsRecord{})) assert.Equal(t, true, recordsAreEqual, "records should be equal after decoding") }) } @@ -131,6 +132,7 @@ func BenchmarkProtobufEncoding(b *testing.B) { } b.ReportMetric(float64(serialSize)/float64(b.N), "B/serial") } + func BenchmarkMsgpEncoding(b *testing.B) { serializer := NewAnalyticsSerializer(MSGP_SERIALIZER) records := []analytics.AnalyticsRecord{ From 68b22060c5f89d4d0a3dd0869ed2f8fce46fef94 Mon Sep 17 00:00:00 2001 From: Matias <83959431+mativm02@users.noreply.github.com> Date: Wed, 19 Apr 2023 11:02:10 -0300 Subject: [PATCH 058/102] changing initial log and pump's version (#605) --- main.go | 2 +- pumps/version.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/main.go b/main.go index 72f6b4643..f71e87945 100644 --- a/main.go +++ b/main.go @@ -69,7 +69,7 @@ func Init() { log.WithFields(logrus.Fields{ "prefix": mainPrefix, - }).Info("## Tyk Analytics Pump, ", pumps.VERSION, " ##") + }).Info("## Tyk Pump, ", pumps.VERSION, " ##") // If no environment variable is set, check the configuration file: if os.Getenv("TYK_LOGLEVEL") == "" { diff --git a/pumps/version.go b/pumps/version.go index 40601e0cd..444d3697b 100644 --- a/pumps/version.go +++ b/pumps/version.go @@ -1,6 +1,6 @@ package pumps var ( - VERSION = "v1.7.0" + VERSION = "v1.8.0" builtBy, Commit, buildDate string ) From 51e48e100a95325929d60860635d69319b489490 Mon Sep 17 00:00:00 2001 From: Matias <83959431+mativm02@users.noreply.github.com> Date: Thu, 20 Apr 2023 14:15:42 -0300 Subject: [PATCH 059/102] updating storage to v1.0.1 (#607) --- analytics/aggregate.go | 117 +++++++++++++++--------------- analytics/aggregate_test.go | 113 ++++++++++++++--------------- analytics/analytics.go | 70 +++++++++--------- analytics/analytics_test.go | 6 +- analytics/graph_record.go | 8 +- analytics/graph_record_test.go | 6 +- analytics/uptime_data.go | 36 ++++----- analytics/uptime_data_test.go | 6 +- go.mod | 2 +- go.sum | 2 + pumps/graph_mongo.go | 10 +-- pumps/graph_mongo_test.go | 2 +- pumps/graph_sql_aggregate_test.go | 4 +- pumps/graph_sql_test.go | 4 +- pumps/mgo_helper_test.go | 22 +++--- pumps/mongo.go | 36 +++++---- pumps/mongo_aggregate.go | 21 +++--- pumps/mongo_aggregate_test.go | 11 ++- pumps/mongo_selective.go | 28 ++++--- pumps/mongo_selective_test.go | 7 +- pumps/mongo_test.go | 13 ++-- pumps/random | 0 serializer/protobuf.go | 4 +- 23 files changed, 259 insertions(+), 269 deletions(-) create mode 100644 pumps/random diff --git a/analytics/aggregate.go b/analytics/aggregate.go index 8b17ba9e6..c9ce8ffe3 100644 --- a/analytics/aggregate.go +++ b/analytics/aggregate.go @@ -10,8 +10,7 @@ import ( "sync" "time" - "github.com/TykTechnologies/storage/persistent/dbm" - "github.com/TykTechnologies/storage/persistent/id" + "github.com/TykTechnologies/storage/persistent/model" "github.com/fatih/structs" "github.com/sirupsen/logrus" "gorm.io/gorm" @@ -86,7 +85,7 @@ type AggregateFieldList struct { } type AnalyticsRecordAggregate struct { - id id.ObjectId `bson:"_id" gorm:"-:all"` + id model.ObjectID `bson:"_id" gorm:"-:all"` TimeStamp time.Time OrgID string TimeID struct { @@ -127,11 +126,11 @@ func (f *AnalyticsRecordAggregate) TableName() string { return "z_tyk_analyticz_aggregate_" + f.OrgID } -func (f *AnalyticsRecordAggregate) GetObjectID() id.ObjectId { +func (f *AnalyticsRecordAggregate) GetObjectID() model.ObjectID { return f.id } -func (f *AnalyticsRecordAggregate) SetObjectID(id id.ObjectId) { +func (f *AnalyticsRecordAggregate) SetObjectID(id model.ObjectID) { f.id = id } @@ -275,53 +274,53 @@ func (f AnalyticsRecordAggregate) New() AnalyticsRecordAggregate { return thisF } -func (f *AnalyticsRecordAggregate) generateBSONFromProperty(parent, thisUnit string, incVal *Counter, newUpdate dbm.DBM) dbm.DBM { +func (f *AnalyticsRecordAggregate) generateBSONFromProperty(parent, thisUnit string, incVal *Counter, newUpdate model.DBM) model.DBM { constructor := parent + "." + thisUnit + "." if parent == "" { constructor = thisUnit + "." } - newUpdate["$inc"].(dbm.DBM)[constructor+"hits"] = incVal.Hits - newUpdate["$inc"].(dbm.DBM)[constructor+"success"] = incVal.Success - newUpdate["$inc"].(dbm.DBM)[constructor+"errortotal"] = incVal.ErrorTotal + newUpdate["$inc"].(model.DBM)[constructor+"hits"] = incVal.Hits + newUpdate["$inc"].(model.DBM)[constructor+"success"] = incVal.Success + newUpdate["$inc"].(model.DBM)[constructor+"errortotal"] = incVal.ErrorTotal for k, v := range incVal.ErrorMap { - newUpdate["$inc"].(dbm.DBM)[constructor+"errormap."+k] = v + newUpdate["$inc"].(model.DBM)[constructor+"errormap."+k] = v } - newUpdate["$inc"].(dbm.DBM)[constructor+"totalrequesttime"] = incVal.TotalRequestTime - newUpdate["$set"].(dbm.DBM)[constructor+"identifier"] = incVal.Identifier - newUpdate["$set"].(dbm.DBM)[constructor+"humanidentifier"] = incVal.HumanIdentifier - newUpdate["$set"].(dbm.DBM)[constructor+"lasttime"] = incVal.LastTime - newUpdate["$set"].(dbm.DBM)[constructor+"openconnections"] = incVal.OpenConnections - newUpdate["$set"].(dbm.DBM)[constructor+"closedconnections"] = incVal.ClosedConnections - newUpdate["$set"].(dbm.DBM)[constructor+"bytesin"] = incVal.BytesIn - newUpdate["$set"].(dbm.DBM)[constructor+"bytesout"] = incVal.BytesOut - newUpdate["$max"].(dbm.DBM)[constructor+"maxlatency"] = incVal.MaxLatency + newUpdate["$inc"].(model.DBM)[constructor+"totalrequesttime"] = incVal.TotalRequestTime + newUpdate["$set"].(model.DBM)[constructor+"identifier"] = incVal.Identifier + newUpdate["$set"].(model.DBM)[constructor+"humanidentifier"] = incVal.HumanIdentifier + newUpdate["$set"].(model.DBM)[constructor+"lasttime"] = incVal.LastTime + newUpdate["$set"].(model.DBM)[constructor+"openconnections"] = incVal.OpenConnections + newUpdate["$set"].(model.DBM)[constructor+"closedconnections"] = incVal.ClosedConnections + newUpdate["$set"].(model.DBM)[constructor+"bytesin"] = incVal.BytesIn + newUpdate["$set"].(model.DBM)[constructor+"bytesout"] = incVal.BytesOut + newUpdate["$max"].(model.DBM)[constructor+"maxlatency"] = incVal.MaxLatency // Don't update min latency in case of errors if incVal.Hits != incVal.ErrorTotal { if newUpdate["$min"] == nil { - newUpdate["$min"] = dbm.DBM{} + newUpdate["$min"] = model.DBM{} } - newUpdate["$min"].(dbm.DBM)[constructor+"minlatency"] = incVal.MinLatency - newUpdate["$min"].(dbm.DBM)[constructor+"minupstreamlatency"] = incVal.MinUpstreamLatency + newUpdate["$min"].(model.DBM)[constructor+"minlatency"] = incVal.MinLatency + newUpdate["$min"].(model.DBM)[constructor+"minupstreamlatency"] = incVal.MinUpstreamLatency } - newUpdate["$max"].(dbm.DBM)[constructor+"maxupstreamlatency"] = incVal.MaxUpstreamLatency - newUpdate["$inc"].(dbm.DBM)[constructor+"totalupstreamlatency"] = incVal.TotalUpstreamLatency - newUpdate["$inc"].(dbm.DBM)[constructor+"totallatency"] = incVal.TotalLatency + newUpdate["$max"].(model.DBM)[constructor+"maxupstreamlatency"] = incVal.MaxUpstreamLatency + newUpdate["$inc"].(model.DBM)[constructor+"totalupstreamlatency"] = incVal.TotalUpstreamLatency + newUpdate["$inc"].(model.DBM)[constructor+"totallatency"] = incVal.TotalLatency return newUpdate } -func (f *AnalyticsRecordAggregate) generateSetterForTime(parent, thisUnit string, realTime float64, newUpdate dbm.DBM) dbm.DBM { +func (f *AnalyticsRecordAggregate) generateSetterForTime(parent, thisUnit string, realTime float64, newUpdate model.DBM) model.DBM { constructor := parent + "." + thisUnit + "." if parent == "" { constructor = thisUnit + "." } - newUpdate["$set"].(dbm.DBM)[constructor+"requesttime"] = realTime + newUpdate["$set"].(model.DBM)[constructor+"requesttime"] = realTime return newUpdate } -func (f *AnalyticsRecordAggregate) latencySetter(parent, thisUnit string, newUpdate dbm.DBM, counter *Counter) dbm.DBM { +func (f *AnalyticsRecordAggregate) latencySetter(parent, thisUnit string, newUpdate model.DBM, counter *Counter) model.DBM { if counter.Hits > 0 { counter.Latency = float64(counter.TotalLatency) / float64(counter.Hits) counter.UpstreamLatency = float64(counter.TotalUpstreamLatency) / float64(counter.Hits) @@ -334,8 +333,8 @@ func (f *AnalyticsRecordAggregate) latencySetter(parent, thisUnit string, newUpd if parent == "" { constructor = thisUnit + "." } - newUpdate["$set"].(dbm.DBM)[constructor+"latency"] = counter.Latency - newUpdate["$set"].(dbm.DBM)[constructor+"upstreamlatency"] = counter.UpstreamLatency + newUpdate["$set"].(model.DBM)[constructor+"latency"] = counter.Latency + newUpdate["$set"].(model.DBM)[constructor+"upstreamlatency"] = counter.UpstreamLatency return newUpdate } @@ -429,11 +428,11 @@ func (f *AnalyticsRecordAggregate) Dimensions() (dimensions []Dimension) { return } -func (f *AnalyticsRecordAggregate) AsChange() (newUpdate dbm.DBM) { - newUpdate = dbm.DBM{ - "$inc": dbm.DBM{}, - "$set": dbm.DBM{}, - "$max": dbm.DBM{}, +func (f *AnalyticsRecordAggregate) AsChange() (newUpdate model.DBM) { + newUpdate = model.DBM{ + "$inc": model.DBM{}, + "$set": model.DBM{}, + "$max": model.DBM{}, } for _, d := range f.Dimensions() { @@ -444,18 +443,18 @@ func (f *AnalyticsRecordAggregate) AsChange() (newUpdate dbm.DBM) { asTime := f.TimeStamp newTime := time.Date(asTime.Year(), asTime.Month(), asTime.Day(), asTime.Hour(), asTime.Minute(), 0, 0, asTime.Location()) - newUpdate["$set"].(dbm.DBM)["timestamp"] = newTime - newUpdate["$set"].(dbm.DBM)["expireAt"] = f.ExpireAt - newUpdate["$set"].(dbm.DBM)["timeid.year"] = newTime.Year() - newUpdate["$set"].(dbm.DBM)["timeid.month"] = newTime.Month() - newUpdate["$set"].(dbm.DBM)["timeid.day"] = newTime.Day() - newUpdate["$set"].(dbm.DBM)["timeid.hour"] = newTime.Hour() - newUpdate["$set"].(dbm.DBM)["lasttime"] = f.LastTime + newUpdate["$set"].(model.DBM)["timestamp"] = newTime + newUpdate["$set"].(model.DBM)["expireAt"] = f.ExpireAt + newUpdate["$set"].(model.DBM)["timeid.year"] = newTime.Year() + newUpdate["$set"].(model.DBM)["timeid.month"] = newTime.Month() + newUpdate["$set"].(model.DBM)["timeid.day"] = newTime.Day() + newUpdate["$set"].(model.DBM)["timeid.hour"] = newTime.Hour() + newUpdate["$set"].(model.DBM)["lasttime"] = f.LastTime return newUpdate } -func (f *AnalyticsRecordAggregate) SetErrorList(parent, thisUnit string, counter *Counter, newUpdate dbm.DBM) { +func (f *AnalyticsRecordAggregate) SetErrorList(parent, thisUnit string, counter *Counter, newUpdate model.DBM) { constructor := parent + "." + thisUnit + "." if parent == "" { constructor = thisUnit + "." @@ -476,10 +475,10 @@ func (f *AnalyticsRecordAggregate) SetErrorList(parent, thisUnit string, counter counter.ErrorList = errorlist - newUpdate["$set"].(dbm.DBM)[constructor+"errorlist"] = counter.ErrorList + newUpdate["$set"].(model.DBM)[constructor+"errorlist"] = counter.ErrorList } -func (f *AnalyticsRecordAggregate) getRecords(fieldName string, data map[string]*Counter, newUpdate dbm.DBM) []Counter { +func (f *AnalyticsRecordAggregate) getRecords(fieldName string, data map[string]*Counter, newUpdate model.DBM) []Counter { result := make([]Counter, 0) for thisUnit, incVal := range data { @@ -497,41 +496,41 @@ func (f *AnalyticsRecordAggregate) getRecords(fieldName string, data map[string] return result } -func (f *AnalyticsRecordAggregate) AsTimeUpdate() dbm.DBM { - newUpdate := dbm.DBM{ - "$set": dbm.DBM{}, +func (f *AnalyticsRecordAggregate) AsTimeUpdate() model.DBM { + newUpdate := model.DBM{ + "$set": model.DBM{}, } // We need to create lists of API data so that we can aggregate across the list // in order to present top-20 style lists of APIs, Tokens etc. // apis := make([]Counter, 0) - newUpdate["$set"].(dbm.DBM)["lists.apiid"] = f.getRecords("apiid", f.APIID, newUpdate) + newUpdate["$set"].(model.DBM)["lists.apiid"] = f.getRecords("apiid", f.APIID, newUpdate) - newUpdate["$set"].(dbm.DBM)["lists.errors"] = f.getRecords("errors", f.Errors, newUpdate) + newUpdate["$set"].(model.DBM)["lists.errors"] = f.getRecords("errors", f.Errors, newUpdate) - newUpdate["$set"].(dbm.DBM)["lists.versions"] = f.getRecords("versions", f.Versions, newUpdate) + newUpdate["$set"].(model.DBM)["lists.versions"] = f.getRecords("versions", f.Versions, newUpdate) - newUpdate["$set"].(dbm.DBM)["lists.apikeys"] = f.getRecords("apikeys", f.APIKeys, newUpdate) + newUpdate["$set"].(model.DBM)["lists.apikeys"] = f.getRecords("apikeys", f.APIKeys, newUpdate) - newUpdate["$set"].(dbm.DBM)["lists.oauthids"] = f.getRecords("oauthids", f.OauthIDs, newUpdate) + newUpdate["$set"].(model.DBM)["lists.oauthids"] = f.getRecords("oauthids", f.OauthIDs, newUpdate) - newUpdate["$set"].(dbm.DBM)["lists.geo"] = f.getRecords("geo", f.Geo, newUpdate) + newUpdate["$set"].(model.DBM)["lists.geo"] = f.getRecords("geo", f.Geo, newUpdate) - newUpdate["$set"].(dbm.DBM)["lists.tags"] = f.getRecords("tags", f.Tags, newUpdate) + newUpdate["$set"].(model.DBM)["lists.tags"] = f.getRecords("tags", f.Tags, newUpdate) - newUpdate["$set"].(dbm.DBM)["lists.endpoints"] = f.getRecords("endpoints", f.Endpoints, newUpdate) + newUpdate["$set"].(model.DBM)["lists.endpoints"] = f.getRecords("endpoints", f.Endpoints, newUpdate) for thisUnit, incVal := range f.KeyEndpoint { parent := "lists.keyendpoints." + thisUnit - newUpdate["$set"].(dbm.DBM)[parent] = f.getRecords("keyendpoints."+thisUnit, incVal, newUpdate) + newUpdate["$set"].(model.DBM)[parent] = f.getRecords("keyendpoints."+thisUnit, incVal, newUpdate) } for thisUnit, incVal := range f.OauthEndpoint { parent := "lists.oauthendpoints." + thisUnit - newUpdate["$set"].(dbm.DBM)[parent] = f.getRecords("oauthendpoints."+thisUnit, incVal, newUpdate) + newUpdate["$set"].(model.DBM)[parent] = f.getRecords("oauthendpoints."+thisUnit, incVal, newUpdate) } - newUpdate["$set"].(dbm.DBM)["lists.apiendpoints"] = f.getRecords("apiendpoints", f.ApiEndpoint, newUpdate) + newUpdate["$set"].(model.DBM)["lists.apiendpoints"] = f.getRecords("apiendpoints", f.ApiEndpoint, newUpdate) var newTime float64 diff --git a/analytics/aggregate_test.go b/analytics/aggregate_test.go index d868a2ad4..b97ee099d 100644 --- a/analytics/aggregate_test.go +++ b/analytics/aggregate_test.go @@ -6,8 +6,7 @@ import ( "testing" "time" - "github.com/TykTechnologies/storage/persistent/dbm" - "github.com/TykTechnologies/storage/persistent/id" + "github.com/TykTechnologies/storage/persistent/model" "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/require" @@ -99,7 +98,7 @@ func TestAggregateGraphData(t *testing.T) { RawPath: "/", APIName: "test-api", APIID: "test-api", - ApiSchema: base64.StdEncoding.EncodeToString([]byte(sampleSchema)), + APISchema: base64.StdEncoding.EncodeToString([]byte(sampleSchema)), Tags: []string{PredefinedTagGraphAnalytics}, ResponseCode: 200, Day: 1, @@ -286,7 +285,7 @@ func TestAggregateGraphData_Dimension(t *testing.T) { RawPath: "/", APIName: "test-api", APIID: "test-api", - ApiSchema: base64.StdEncoding.EncodeToString([]byte(sampleSchema)), + APISchema: base64.StdEncoding.EncodeToString([]byte(sampleSchema)), Tags: []string{PredefinedTagGraphAnalytics}, ResponseCode: 200, Day: 1, @@ -475,7 +474,7 @@ func TestAggregatedRecord_TableName(t *testing.T) { func TestAggregatedRecord_GetObjectID(t *testing.T) { t.Run("should return the ID field", func(t *testing.T) { - id := id.NewObjectID() + id := model.NewObjectID() record := AnalyticsRecordAggregate{ id: id, } @@ -485,7 +484,7 @@ func TestAggregatedRecord_GetObjectID(t *testing.T) { func TestAggregatedRecord_SetObjectID(t *testing.T) { t.Run("should set the ID field", func(t *testing.T) { - id := id.NewObjectID() + id := model.NewObjectID() record := AnalyticsRecordAggregate{} record.SetObjectID(id) assert.Equal(t, id, record.id) @@ -504,7 +503,7 @@ func TestAnalyticsRecordAggregate_generateBSONFromProperty(t *testing.T) { tcs := []struct { givenCounter *Counter - expected dbm.DBM + expected model.DBM testName string givenName string @@ -531,8 +530,8 @@ func TestAnalyticsRecordAggregate_generateBSONFromProperty(t *testing.T) { }, givenName: "test", givenValue: "total", - expected: dbm.DBM{ - "$set": dbm.DBM{ + expected: model.DBM{ + "$set": model.DBM{ "test.total.bytesin": int64(0), "test.total.bytesout": int64(0), "test.total.humanidentifier": "", @@ -541,7 +540,7 @@ func TestAnalyticsRecordAggregate_generateBSONFromProperty(t *testing.T) { "test.total.openconnections": int64(0), "test.total.closedconnections": int64(0), }, - "$inc": dbm.DBM{ + "$inc": model.DBM{ "test.total.errormap.200": int(1), "test.total.errortotal": int(0), "test.total.hits": int(2), @@ -550,11 +549,11 @@ func TestAnalyticsRecordAggregate_generateBSONFromProperty(t *testing.T) { "test.total.totalrequesttime": float64(100), "test.total.totalupstreamlatency": int64(20), }, - "$max": dbm.DBM{ + "$max": model.DBM{ "test.total.maxlatency": int64(100), "test.total.maxupstreamlatency": int64(110), }, - "$min": dbm.DBM{ + "$min": model.DBM{ "test.total.minlatency": int64(20), "test.total.minupstreamlatency": int64(10), }, @@ -581,8 +580,8 @@ func TestAnalyticsRecordAggregate_generateBSONFromProperty(t *testing.T) { }, givenName: "test", givenValue: "total", - expected: dbm.DBM{ - "$set": dbm.DBM{ + expected: model.DBM{ + "$set": model.DBM{ "test.total.bytesin": int64(0), "test.total.bytesout": int64(0), "test.total.humanidentifier": "", @@ -591,7 +590,7 @@ func TestAnalyticsRecordAggregate_generateBSONFromProperty(t *testing.T) { "test.total.openconnections": int64(0), "test.total.closedconnections": int64(0), }, - "$inc": dbm.DBM{ + "$inc": model.DBM{ "test.total.errormap.500": int(2), "test.total.errortotal": int(2), "test.total.hits": int(2), @@ -600,11 +599,11 @@ func TestAnalyticsRecordAggregate_generateBSONFromProperty(t *testing.T) { "test.total.totalrequesttime": float64(100), "test.total.totalupstreamlatency": int64(20), }, - "$max": dbm.DBM{ + "$max": model.DBM{ "test.total.maxlatency": int64(100), "test.total.maxupstreamlatency": int64(110), }, - "$min": dbm.DBM{}, // we don't update mins on case of full error counter + "$min": model.DBM{}, // we don't update mins on case of full error counter }, }, @@ -629,8 +628,8 @@ func TestAnalyticsRecordAggregate_generateBSONFromProperty(t *testing.T) { }, givenName: "", givenValue: "noname", - expected: dbm.DBM{ - "$set": dbm.DBM{ + expected: model.DBM{ + "$set": model.DBM{ "noname.bytesin": int64(0), "noname.bytesout": int64(0), "noname.humanidentifier": "", @@ -639,7 +638,7 @@ func TestAnalyticsRecordAggregate_generateBSONFromProperty(t *testing.T) { "noname.openconnections": int64(0), "noname.closedconnections": int64(0), }, - "$inc": dbm.DBM{ + "$inc": model.DBM{ "noname.errormap.500": int(2), "noname.errortotal": int(2), "noname.hits": int(2), @@ -648,11 +647,11 @@ func TestAnalyticsRecordAggregate_generateBSONFromProperty(t *testing.T) { "noname.totalrequesttime": float64(100), "noname.totalupstreamlatency": int64(20), }, - "$max": dbm.DBM{ + "$max": model.DBM{ "noname.maxlatency": int64(100), "noname.maxupstreamlatency": int64(110), }, - "$min": dbm.DBM{}, // we don't update mins on case of full error counter + "$min": model.DBM{}, // we don't update mins on case of full error counter }, }, } @@ -661,11 +660,11 @@ func TestAnalyticsRecordAggregate_generateBSONFromProperty(t *testing.T) { t.Run(tc.testName, func(t *testing.T) { aggregate := &AnalyticsRecordAggregate{} - baseDBM := dbm.DBM{ - "$set": dbm.DBM{}, - "$inc": dbm.DBM{}, - "$max": dbm.DBM{}, - "$min": dbm.DBM{}, + baseDBM := model.DBM{ + "$set": model.DBM{}, + "$inc": model.DBM{}, + "$max": model.DBM{}, + "$min": model.DBM{}, } actual := aggregate.generateBSONFromProperty(tc.givenName, tc.givenValue, tc.givenCounter, baseDBM) @@ -678,7 +677,7 @@ func TestAnalyticsRecordAggregate_generateBSONFromProperty(t *testing.T) { func TestAnalyticsRecordAggregate_generateSetterForTime(t *testing.T) { tcs := []struct { - expected dbm.DBM + expected model.DBM testName string givenName string @@ -690,8 +689,8 @@ func TestAnalyticsRecordAggregate_generateSetterForTime(t *testing.T) { givenName: "test", givenValue: "total", givenRequestTime: 100, - expected: dbm.DBM{ - "$set": dbm.DBM{ + expected: model.DBM{ + "$set": model.DBM{ "test.total.requesttime": float64(100), }, }, @@ -701,8 +700,8 @@ func TestAnalyticsRecordAggregate_generateSetterForTime(t *testing.T) { givenName: "", givenValue: "noname", givenRequestTime: 130, - expected: dbm.DBM{ - "$set": dbm.DBM{ + expected: model.DBM{ + "$set": model.DBM{ "noname.requesttime": float64(130), }, }, @@ -713,8 +712,8 @@ func TestAnalyticsRecordAggregate_generateSetterForTime(t *testing.T) { t.Run(tc.testName, func(t *testing.T) { aggregate := &AnalyticsRecordAggregate{} - baseDBM := dbm.DBM{ - "$set": dbm.DBM{}, + baseDBM := model.DBM{ + "$set": model.DBM{}, } actual := aggregate.generateSetterForTime(tc.givenName, tc.givenValue, tc.givenRequestTime, baseDBM) @@ -728,7 +727,7 @@ func TestAnalyticsRecordAggregate_generateSetterForTime(t *testing.T) { func TestAnalyticsRecordAggregate_latencySetter(t *testing.T) { tcs := []struct { givenCounter *Counter - expected dbm.DBM + expected model.DBM testName string givenName string @@ -743,8 +742,8 @@ func TestAnalyticsRecordAggregate_latencySetter(t *testing.T) { }, givenName: "test", givenValue: "total", - expected: dbm.DBM{ - "$set": dbm.DBM{ + expected: model.DBM{ + "$set": model.DBM{ "test.total.latency": float64(50), "test.total.upstreamlatency": float64(100), }, @@ -759,8 +758,8 @@ func TestAnalyticsRecordAggregate_latencySetter(t *testing.T) { }, givenName: "", givenValue: "noname", - expected: dbm.DBM{ - "$set": dbm.DBM{ + expected: model.DBM{ + "$set": model.DBM{ "noname.latency": float64(100), "noname.upstreamlatency": float64(200), }, @@ -776,8 +775,8 @@ func TestAnalyticsRecordAggregate_latencySetter(t *testing.T) { }, givenName: "", givenValue: "noname", - expected: dbm.DBM{ - "$set": dbm.DBM{ + expected: model.DBM{ + "$set": model.DBM{ "noname.latency": float64(0), "noname.upstreamlatency": float64(0), }, @@ -789,8 +788,8 @@ func TestAnalyticsRecordAggregate_latencySetter(t *testing.T) { t.Run(tc.testName, func(t *testing.T) { aggregate := &AnalyticsRecordAggregate{} - baseDBM := dbm.DBM{ - "$set": dbm.DBM{}, + baseDBM := model.DBM{ + "$set": model.DBM{}, } actual := aggregate.latencySetter(tc.givenName, tc.givenValue, baseDBM, tc.givenCounter) @@ -806,7 +805,7 @@ func TestAnalyticsRecordAggregate_AsChange(t *testing.T) { tcs := []struct { given *AnalyticsRecordAggregate - expected dbm.DBM + expected model.DBM testName string }{ { @@ -867,8 +866,8 @@ func TestAnalyticsRecordAggregate_AsChange(t *testing.T) { TimeStamp: currentTime, ExpireAt: currentTime, }, - expected: dbm.DBM{ - "$inc": dbm.DBM{ + expected: model.DBM{ + "$inc": model.DBM{ "total.hits": int(2), "total.success": int(2), "total.errortotal": int(0), @@ -888,7 +887,7 @@ func TestAnalyticsRecordAggregate_AsChange(t *testing.T) { "versions.v2.totalrequesttime": float64(200), "versions.v2.totalupstreamlatency": int64(200), }, - "$min": dbm.DBM{ + "$min": model.DBM{ "total.minlatency": int64(10), "total.minupstreamlatency": int64(20), "versions.v1.minlatency": int64(10), @@ -896,7 +895,7 @@ func TestAnalyticsRecordAggregate_AsChange(t *testing.T) { "versions.v2.minlatency": int64(10), "versions.v2.minupstreamlatency": int64(20), }, - "$max": dbm.DBM{ + "$max": model.DBM{ "total.maxlatency": int64(100), "total.maxupstreamlatency": int64(100), "versions.v1.maxlatency": int64(100), @@ -904,7 +903,7 @@ func TestAnalyticsRecordAggregate_AsChange(t *testing.T) { "versions.v2.maxlatency": int64(100), "versions.v2.maxupstreamlatency": int64(100), }, - "$set": dbm.DBM{ + "$set": model.DBM{ "expireAt": currentTime, "lasttime": currentTime, "timestamp": currentTime, @@ -1000,8 +999,8 @@ func TestAnalyticsRecordAggregate_AsChange(t *testing.T) { TimeStamp: currentTime, ExpireAt: currentTime, }, - expected: dbm.DBM{ - "$inc": dbm.DBM{ + expected: model.DBM{ + "$inc": model.DBM{ "total.hits": int(4), "total.success": int(1), "total.errortotal": int(3), @@ -1025,13 +1024,13 @@ func TestAnalyticsRecordAggregate_AsChange(t *testing.T) { "apiid.api2.totalrequesttime": float64(200), "apiid.api2.errortotal": int(0), }, - "$min": dbm.DBM{ + "$min": model.DBM{ "total.minlatency": int64(10), "total.minupstreamlatency": int64(20), "apiid.api2.minlatency": int64(10), "apiid.api2.minupstreamlatency": int64(20), }, - "$max": dbm.DBM{ + "$max": model.DBM{ "total.maxlatency": int64(100), "total.maxupstreamlatency": int64(100), "apiid.api1.maxlatency": int64(100), @@ -1039,7 +1038,7 @@ func TestAnalyticsRecordAggregate_AsChange(t *testing.T) { "apiid.api2.maxlatency": int64(100), "apiid.api2.maxupstreamlatency": int64(100), }, - "$set": dbm.DBM{ + "$set": model.DBM{ "expireAt": currentTime, "lasttime": currentTime, "timestamp": currentTime, @@ -1088,7 +1087,7 @@ func TestAnalyticsRecordAggregate_AsTimeUpdate(t *testing.T) { tcs := []struct { given *AnalyticsRecordAggregate - expected dbm.DBM + expected model.DBM testName string }{ { @@ -1154,8 +1153,8 @@ func TestAnalyticsRecordAggregate_AsTimeUpdate(t *testing.T) { MaxUpstreamLatency: 100, }, }, - expected: dbm.DBM{ - "$set": dbm.DBM{ + expected: model.DBM{ + "$set": model.DBM{ "apiendpoints./get.errorlist": []ErrorData{{Code: "404", Count: 1}, {Code: "500", Count: 2}}, "apiendpoints./get.latency": float64(100), "apiendpoints./get.requesttime": float64(0), diff --git a/analytics/analytics.go b/analytics/analytics.go index 023fade4d..403006be7 100644 --- a/analytics/analytics.go +++ b/analytics/analytics.go @@ -14,7 +14,7 @@ import ( "github.com/oschwald/maxminddb-golang" "google.golang.org/protobuf/types/known/timestamppb" - "github.com/TykTechnologies/storage/persistent/id" + "github.com/TykTechnologies/storage/persistent/model" analyticsproto "github.com/TykTechnologies/tyk-pump/analytics/proto" "github.com/TykTechnologies/tyk-pump/logger" @@ -42,37 +42,37 @@ const SQLTable = "tyk_analytics" // AnalyticsRecord encodes the details of a request type AnalyticsRecord struct { - id id.ObjectId `bson:"_id" gorm:"-:all"` - Method string `json:"method" gorm:"column:method"` - Host string `json:"host" gorm:"column:host"` - Path string `json:"path" gorm:"column:path"` - RawPath string `json:"raw_path" gorm:"column:rawpath"` - ContentLength int64 `json:"content_length" gorm:"column:contentlength"` - UserAgent string `json:"user_agent" gorm:"column:useragent"` - Day int `json:"day" sql:"-"` - Month time.Month `json:"month" sql:"-"` - Year int `json:"year" sql:"-"` - Hour int `json:"hour" sql:"-"` - ResponseCode int `json:"response_code" gorm:"column:responsecode;index"` - APIKey string `json:"api_key" gorm:"column:apikey;index"` - TimeStamp time.Time `json:"timestamp" gorm:"column:timestamp;index"` - APIVersion string `json:"api_version" gorm:"column:apiversion"` - APIName string `json:"api_name" sql:"-"` - APIID string `json:"api_id" gorm:"column:apiid;index"` - OrgID string `json:"org_id" gorm:"column:orgid;index"` - OauthID string `json:"oauth_id" gorm:"column:oauthid;index"` - RequestTime int64 `json:"request_time" gorm:"column:requesttime"` - RawRequest string `json:"raw_request" gorm:"column:rawrequest"` - RawResponse string `json:"raw_response" gorm:"column:rawresponse"` - IPAddress string `json:"ip_address" gorm:"column:ipaddress"` - Geo GeoData `json:"geo" gorm:"embedded"` - Network NetworkStats `json:"network"` - Latency Latency `json:"latency"` - Tags []string `json:"tags"` - Alias string `json:"alias"` - TrackPath bool `json:"track_path" gorm:"column:trackpath"` - ExpireAt time.Time `bson:"expireAt" json:"expireAt"` - ApiSchema string `json:"api_schema" bson:"-" gorm:"-:all"` + id model.ObjectID `bson:"_id" gorm:"-:all"` + Method string `json:"method" gorm:"column:method"` + Host string `json:"host" gorm:"column:host"` + Path string `json:"path" gorm:"column:path"` + RawPath string `json:"raw_path" gorm:"column:rawpath"` + ContentLength int64 `json:"content_length" gorm:"column:contentlength"` + UserAgent string `json:"user_agent" gorm:"column:useragent"` + Day int `json:"day" sql:"-"` + Month time.Month `json:"month" sql:"-"` + Year int `json:"year" sql:"-"` + Hour int `json:"hour" sql:"-"` + ResponseCode int `json:"response_code" gorm:"column:responsecode;index"` + APIKey string `json:"api_key" gorm:"column:apikey;index"` + TimeStamp time.Time `json:"timestamp" gorm:"column:timestamp;index"` + APIVersion string `json:"api_version" gorm:"column:apiversion"` + APIName string `json:"api_name" sql:"-"` + APIID string `json:"api_id" gorm:"column:apiid;index"` + OrgID string `json:"org_id" gorm:"column:orgid;index"` + OauthID string `json:"oauth_id" gorm:"column:oauthid;index"` + RequestTime int64 `json:"request_time" gorm:"column:requesttime"` + RawRequest string `json:"raw_request" gorm:"column:rawrequest"` + RawResponse string `json:"raw_response" gorm:"column:rawresponse"` + IPAddress string `json:"ip_address" gorm:"column:ipaddress"` + Geo GeoData `json:"geo" gorm:"embedded"` + Network NetworkStats `json:"network"` + Latency Latency `json:"latency"` + Tags []string `json:"tags"` + Alias string `json:"alias"` + TrackPath bool `json:"track_path" gorm:"column:trackpath"` + ExpireAt time.Time `bson:"expireAt" json:"expireAt"` + APISchema string `json:"api_schema" bson:"-" gorm:"-:all"` CollectionName string `json:"-" bson:"-" gorm:"-:all"` } @@ -84,11 +84,11 @@ func (a *AnalyticsRecord) TableName() string { return SQLTable } -func (a *AnalyticsRecord) GetObjectID() id.ObjectId { +func (a *AnalyticsRecord) GetObjectID() model.ObjectID { return a.id } -func (a *AnalyticsRecord) SetObjectID(id id.ObjectId) { +func (a *AnalyticsRecord) SetObjectID(id model.ObjectID) { a.id = id } @@ -237,7 +237,7 @@ func (a *AnalyticsRecord) GetLineValues() []string { fields = append(fields, a.Alias) fields = append(fields, strconv.FormatBool(a.TrackPath)) fields = append(fields, a.ExpireAt.String()) - fields = append(fields, a.ApiSchema) + fields = append(fields, a.APISchema) return fields } diff --git a/analytics/analytics_test.go b/analytics/analytics_test.go index 27096c70e..380a59e61 100644 --- a/analytics/analytics_test.go +++ b/analytics/analytics_test.go @@ -5,7 +5,7 @@ import ( "testing" "time" - "github.com/TykTechnologies/storage/persistent/id" + "github.com/TykTechnologies/storage/persistent/model" "github.com/fatih/structs" "github.com/stretchr/testify/assert" ) @@ -96,7 +96,7 @@ func TestAnalyticsRecord_Base(t *testing.T) { assert.Equal(t, SQLTable, rec.TableName()) - newID := id.NewObjectID() + newID := model.NewObjectID() rec.SetObjectID(newID) assert.Equal(t, newID, rec.GetObjectID()) } @@ -161,7 +161,7 @@ func TestAnalyticsRecord_GetLineValues(t *testing.T) { APIVersion: "v1", APIName: "api_name", TimeStamp: time.Now(), - ApiSchema: "http", + APISchema: "http", } fields := rec.GetLineValues() diff --git a/analytics/graph_record.go b/analytics/graph_record.go index 911301e52..1b6706853 100644 --- a/analytics/graph_record.go +++ b/analytics/graph_record.go @@ -17,7 +17,7 @@ import ( "github.com/TykTechnologies/graphql-go-tools/pkg/astparser" gql "github.com/TykTechnologies/graphql-go-tools/pkg/graphql" "github.com/TykTechnologies/graphql-go-tools/pkg/operationreport" - "github.com/TykTechnologies/storage/persistent/id" + "github.com/TykTechnologies/storage/persistent/model" ) type GraphRecord struct { @@ -37,12 +37,12 @@ func (g *GraphRecord) TableName() string { } // GetObjectID is a dummy function to satisfy the interface -func (*GraphRecord) GetObjectID() id.ObjectId { +func (*GraphRecord) GetObjectID() model.ObjectID { return "" } // SetObjectID is a dummy function to satisfy the interface -func (*GraphRecord) SetObjectID(id.ObjectId) { +func (*GraphRecord) SetObjectID(model.ObjectID) { // empty } @@ -171,7 +171,7 @@ func (a *AnalyticsRecord) ToGraphRecord() GraphRecord { record.HasErrors = true } - record.parseRequest(a.RawRequest, a.ApiSchema) + record.parseRequest(a.RawRequest, a.APISchema) record.parseResponse(a.RawResponse) diff --git a/analytics/graph_record_test.go b/analytics/graph_record_test.go index 8b851226e..57cbb2b26 100644 --- a/analytics/graph_record_test.go +++ b/analytics/graph_record_test.go @@ -129,7 +129,7 @@ func TestAnalyticsRecord_ToGraphRecord(t *testing.T) { RawPath: "/", APIName: "test-api", APIID: "test-api", - ApiSchema: base64.StdEncoding.EncodeToString([]byte(sampleSchema)), + APISchema: base64.StdEncoding.EncodeToString([]byte(sampleSchema)), ResponseCode: 200, Day: 1, Month: 1, @@ -195,7 +195,7 @@ func TestAnalyticsRecord_ToGraphRecord(t *testing.T) { return g }, modifyRecord: func(a AnalyticsRecord) AnalyticsRecord { - a.ApiSchema = base64.StdEncoding.EncodeToString([]byte(subgraphSchema)) + a.APISchema = base64.StdEncoding.EncodeToString([]byte(subgraphSchema)) return a }, }, @@ -381,7 +381,7 @@ func TestAnalyticsRecord_ToGraphRecord(t *testing.T) { request: `{"query":"query main {\ncharacters {\ninfo\n}\n}\n\nquery second {\nlistCharacters{\ninfo\n}\n}","variables":null,"operationName":""}`, response: `{"errors":[{"message":"no operation specified"}]}`, modifyRecord: func(a AnalyticsRecord) AnalyticsRecord { - a.ApiSchema = "this isn't a base64 is it?" + a.APISchema = "this isn't a base64 is it?" return a }, expected: func() GraphRecord { diff --git a/analytics/uptime_data.go b/analytics/uptime_data.go index 69734847b..9fa7b25d8 100644 --- a/analytics/uptime_data.go +++ b/analytics/uptime_data.go @@ -6,28 +6,28 @@ import ( "gorm.io/gorm" - "github.com/TykTechnologies/storage/persistent/id" + "github.com/TykTechnologies/storage/persistent/model" "github.com/fatih/structs" ) const UptimeSQLTable = "tyk_uptime_analytics" type UptimeReportData struct { - ID id.ObjectId `json:"_id" bson:"_id" gorm:"-:all"` - URL string `json:"url"` - RequestTime int64 `json:"request_time"` - ResponseCode int `json:"response_code"` - TCPError bool `json:"tcp_error"` - ServerError bool `json:"server_error"` - Day int `json:"day"` - Month time.Month `json:"month"` - Year int `json:"year"` - Hour int `json:"hour"` - Minute int `json:"minute"` - TimeStamp time.Time `json:"timestamp"` - ExpireAt time.Time `bson:"expireAt"` - APIID string `json:"api_id"` - OrgID string `json:"org_id"` + ID model.ObjectID `json:"_id" bson:"_id" gorm:"-:all"` + URL string `json:"url"` + RequestTime int64 `json:"request_time"` + ResponseCode int `json:"response_code"` + TCPError bool `json:"tcp_error"` + ServerError bool `json:"server_error"` + Day int `json:"day"` + Month time.Month `json:"month"` + Year int `json:"year"` + Hour int `json:"hour"` + Minute int `json:"minute"` + TimeStamp time.Time `json:"timestamp"` + ExpireAt time.Time `bson:"expireAt"` + APIID string `json:"api_id"` + OrgID string `json:"org_id"` } type UptimeReportAggregateSQL struct { @@ -47,11 +47,11 @@ func (a *UptimeReportAggregateSQL) TableName() string { return UptimeSQLTable } -func (a *UptimeReportData) GetObjectID() id.ObjectId { +func (a *UptimeReportData) GetObjectID() model.ObjectID { return a.ID } -func (a *UptimeReportData) SetObjectID(id id.ObjectId) { +func (a *UptimeReportData) SetObjectID(id model.ObjectID) { a.ID = id } diff --git a/analytics/uptime_data_test.go b/analytics/uptime_data_test.go index 39e7f4f42..7db04d312 100644 --- a/analytics/uptime_data_test.go +++ b/analytics/uptime_data_test.go @@ -4,7 +4,7 @@ import ( "testing" "time" - "github.com/TykTechnologies/storage/persistent/id" + "github.com/TykTechnologies/storage/persistent/model" "github.com/google/go-cmp/cmp" "gorm.io/gorm/clause" @@ -13,7 +13,7 @@ import ( func TestUptimeReportData_GetObjectID(t *testing.T) { t.Run("should return the ID field", func(t *testing.T) { - id := id.NewObjectID() + id := model.NewObjectID() record := UptimeReportData{ ID: id, } @@ -23,7 +23,7 @@ func TestUptimeReportData_GetObjectID(t *testing.T) { func TestUptimeReportData_SetObjectID(t *testing.T) { t.Run("should set the ID field", func(t *testing.T) { - id := id.NewObjectID() + id := model.NewObjectID() record := UptimeReportData{} record.SetObjectID(id) assert.Equal(t, id, record.ID) diff --git a/go.mod b/go.mod index 9eab65398..9ea852f5b 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( github.com/TykTechnologies/gorpc v0.0.0-20210624160652-fe65bda0ccb9 github.com/TykTechnologies/graphql-go-tools v1.6.2-0.20230320143102-7a16078ce517 github.com/TykTechnologies/murmur3 v0.0.0-20230310161213-aad17efd5632 - github.com/TykTechnologies/storage v0.0.0-20230410152719-1e659ae95643 + github.com/TykTechnologies/storage v1.0.1 github.com/aws/aws-sdk-go-v2 v1.16.14 github.com/aws/aws-sdk-go-v2/config v1.9.0 github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.9.0 diff --git a/go.sum b/go.sum index d94c5d3f1..76afc6b58 100644 --- a/go.sum +++ b/go.sum @@ -58,6 +58,8 @@ github.com/TykTechnologies/storage v0.0.0-20230410132731-c13ef37ecbd9 h1:MN+4v/n github.com/TykTechnologies/storage v0.0.0-20230410132731-c13ef37ecbd9/go.mod h1:+0S3KuNlLGBTMTSFREuZFm315zzXjuuCO4QSAPy+d3M= github.com/TykTechnologies/storage v0.0.0-20230410152719-1e659ae95643 h1:vFml52JVqB1yOMUyq10o5JytEfC93KattU/xTfzxAlM= github.com/TykTechnologies/storage v0.0.0-20230410152719-1e659ae95643/go.mod h1:+0S3KuNlLGBTMTSFREuZFm315zzXjuuCO4QSAPy+d3M= +github.com/TykTechnologies/storage v1.0.1 h1:YI85mHMofwIrF0QgrRYqKKd2xuPO/lxGe+SR4w2kKkg= +github.com/TykTechnologies/storage v1.0.1/go.mod h1:+0S3KuNlLGBTMTSFREuZFm315zzXjuuCO4QSAPy+d3M= github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/agnivade/levenshtein v1.1.1 h1:QY8M92nrzkmr798gCo3kmMyqXFzdQVpxLlGPRBij0P8= github.com/agnivade/levenshtein v1.1.1/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo= diff --git a/pumps/graph_mongo.go b/pumps/graph_mongo.go index 778b3cd1c..fd805c711 100644 --- a/pumps/graph_mongo.go +++ b/pumps/graph_mongo.go @@ -5,7 +5,7 @@ import ( "fmt" "strings" - "github.com/TykTechnologies/storage/persistent/id" + "github.com/TykTechnologies/storage/persistent/model" "github.com/TykTechnologies/tyk-pump/analytics" "github.com/mitchellh/mapstructure" "github.com/sirupsen/logrus" @@ -89,22 +89,22 @@ func (g *GraphMongoPump) WriteData(ctx context.Context, data []interface{}) erro errCh := make(chan error, len(accumulateSet)) for _, dataSet := range accumulateSet { - go func(dataSet []id.DBObject, errCh chan error) { + go func(dataSet []model.DBObject, errCh chan error) { // make a graph record array with variable length in case there are errors with some conversion - finalSet := make([]id.DBObject, 0) + finalSet := make([]model.DBObject, 0) for _, d := range dataSet { r, ok := d.(*analytics.AnalyticsRecord) if !ok { continue } - r.SetObjectID(id.NewObjectID()) + r.SetObjectID(model.NewObjectID()) var ( gr analytics.GraphRecord err error ) - if r.RawRequest == "" || r.RawResponse == "" || r.ApiSchema == "" { + if r.RawRequest == "" || r.RawResponse == "" || r.APISchema == "" { g.log.Warn("skipping record parsing") gr = analytics.GraphRecord{AnalyticsRecord: *r} } else { diff --git a/pumps/graph_mongo_test.go b/pumps/graph_mongo_test.go index 05f25821b..2b2ebe62c 100644 --- a/pumps/graph_mongo_test.go +++ b/pumps/graph_mongo_test.go @@ -307,7 +307,7 @@ func TestGraphMongoPump_WriteData(t *testing.T) { Path: "POST", RawRequest: base64.StdEncoding.EncodeToString([]byte(cr.rawRequest)), RawResponse: base64.StdEncoding.EncodeToString([]byte(cr.rawResponse)), - ApiSchema: base64.StdEncoding.EncodeToString([]byte(cr.schema)), + APISchema: base64.StdEncoding.EncodeToString([]byte(cr.schema)), Tags: cr.tags, } if cr.responseCode != 0 { diff --git a/pumps/graph_sql_aggregate_test.go b/pumps/graph_sql_aggregate_test.go index 7ebfd4d2d..55e0a3cfd 100644 --- a/pumps/graph_sql_aggregate_test.go +++ b/pumps/graph_sql_aggregate_test.go @@ -187,7 +187,7 @@ func TestSqlGraphAggregatePump_WriteData(t *testing.T) { RawPath: "/", APIName: "test-api", APIID: "test-api", - ApiSchema: base64.StdEncoding.EncodeToString([]byte(sampleSchema)), + APISchema: base64.StdEncoding.EncodeToString([]byte(sampleSchema)), Tags: []string{analytics.PredefinedTagGraphAnalytics}, ResponseCode: 200, Day: 1, @@ -456,7 +456,7 @@ func TestGraphSQLAggregatePump_WriteData_Sharded(t *testing.T) { RawPath: "/", APIName: "test-api", APIID: "test-api", - ApiSchema: base64.StdEncoding.EncodeToString([]byte(sampleSchema)), + APISchema: base64.StdEncoding.EncodeToString([]byte(sampleSchema)), Tags: []string{analytics.PredefinedTagGraphAnalytics}, ResponseCode: 200, Day: 1, diff --git a/pumps/graph_sql_test.go b/pumps/graph_sql_test.go index 6e4eb4ff8..04b9d5d6f 100644 --- a/pumps/graph_sql_test.go +++ b/pumps/graph_sql_test.go @@ -253,7 +253,7 @@ func TestGraphSQLPump_WriteData(t *testing.T) { } if !item.isHTTP { r.RawRequest = convToBase64(rawGQLRequest) - r.ApiSchema = convToBase64(schema) + r.APISchema = convToBase64(schema) } else { r.RawRequest = convToBase64(rawHTTPReq) r.RawResponse = convToBase64(rawHTTPResponse) @@ -323,7 +323,7 @@ func TestGraphSQLPump_Sharded(t *testing.T) { Path: "/test-api", RawRequest: convToBase64(rawGQLRequest), RawResponse: convToBase64(rawGQLResponse), - ApiSchema: convToBase64(schema), + APISchema: convToBase64(schema), Tags: []string{analytics.PredefinedTagGraphAnalytics}, APIName: "test-api", ResponseCode: 200, diff --git a/pumps/mgo_helper_test.go b/pumps/mgo_helper_test.go index 0f421c871..c439efd42 100644 --- a/pumps/mgo_helper_test.go +++ b/pumps/mgo_helper_test.go @@ -7,9 +7,7 @@ import ( "os" "github.com/TykTechnologies/storage/persistent" - "github.com/TykTechnologies/storage/persistent/dbm" - "github.com/TykTechnologies/storage/persistent/id" - "github.com/TykTechnologies/storage/persistent/index" + "github.com/TykTechnologies/storage/persistent/model" ) const ( @@ -26,12 +24,12 @@ func (c *Conn) TableName() string { } // SetObjectID is a dummy function to satisfy the interface -func (*Conn) GetObjectID() id.ObjectId { +func (*Conn) GetObjectID() model.ObjectID { return "" } // SetObjectID is a dummy function to satisfy the interface -func (*Conn) SetObjectID(id.ObjectId) { +func (*Conn) SetObjectID(model.ObjectID) { // empty } @@ -70,15 +68,15 @@ func (c *Conn) CleanIndexes() { } type Doc struct { - ID id.ObjectId `bson:"_id"` - Foo string `bson:"foo"` + ID model.ObjectID `bson:"_id"` + Foo string `bson:"foo"` } -func (d Doc) GetObjectID() id.ObjectId { +func (d Doc) GetObjectID() model.ObjectID { return d.ID } -func (d *Doc) SetObjectID(id id.ObjectId) { +func (d *Doc) SetObjectID(id model.ObjectID) { d.ID = id } @@ -90,14 +88,14 @@ func (c *Conn) InsertDoc() { doc := Doc{ Foo: "bar", } - doc.SetObjectID(id.NewObjectID()) + doc.SetObjectID(model.NewObjectID()) err := c.Store.Insert(context.Background(), &doc) if err != nil { panic(err) } } -func (c *Conn) GetCollectionStats() (colStats dbm.DBM) { +func (c *Conn) GetCollectionStats() (colStats model.DBM) { var err error colStats, err = c.Store.DBTableStats(context.Background(), c) if err != nil { @@ -106,7 +104,7 @@ func (c *Conn) GetCollectionStats() (colStats dbm.DBM) { return colStats } -func (c *Conn) GetIndexes() ([]index.Index, error) { +func (c *Conn) GetIndexes() ([]model.Index, error) { return c.Store.GetIndexes(context.Background(), c) } diff --git a/pumps/mongo.go b/pumps/mongo.go index 6a965455b..deb8105c4 100644 --- a/pumps/mongo.go +++ b/pumps/mongo.go @@ -12,9 +12,7 @@ import ( "strconv" "github.com/TykTechnologies/storage/persistent" - "github.com/TykTechnologies/storage/persistent/dbm" - "github.com/TykTechnologies/storage/persistent/id" - "github.com/TykTechnologies/storage/persistent/index" + "github.com/TykTechnologies/storage/persistent/model" "github.com/TykTechnologies/tyk-pump/analytics" "github.com/kelseyhightower/envconfig" "github.com/mitchellh/mapstructure" @@ -93,12 +91,12 @@ func (d dbObject) TableName() string { } // GetObjectID is a dummy function to satisfy the interface -func (dbObject) GetObjectID() id.ObjectId { +func (dbObject) GetObjectID() model.ObjectID { return "" } // SetObjectID is a dummy function to satisfy the interface -func (dbObject) SetObjectID(id.ObjectId) { +func (dbObject) SetObjectID(model.ObjectID) { // empty } @@ -272,7 +270,7 @@ func (m *MongoPump) capCollection() (ok bool) { tableName: colName, } - err = m.store.Migrate(context.Background(), []id.DBObject{d}, dbm.DBM{"capped": true, "maxBytes": colCapMaxSizeBytes}) + err = m.store.Migrate(context.Background(), []model.DBObject{d}, model.DBM{"capped": true, "maxBytes": colCapMaxSizeBytes}) if err != nil { m.log.Errorf("Unable to create capped collection for (%s). %s", colName, err.Error()) @@ -305,8 +303,8 @@ func (m *MongoPump) ensureIndexes(collectionName string) error { var err error - orgIndex := index.Index{ - Keys: []dbm.DBM{{"orgid": 1}}, + orgIndex := model.Index{ + Keys: []model.DBM{{"orgid": 1}}, Background: m.dbConf.MongoDBType == StandardMongo, } @@ -317,8 +315,8 @@ func (m *MongoPump) ensureIndexes(collectionName string) error { return err } - apiIndex := index.Index{ - Keys: []dbm.DBM{{"apiid": 1}}, + apiIndex := model.Index{ + Keys: []model.DBM{{"apiid": 1}}, Background: m.dbConf.MongoDBType == StandardMongo, } @@ -327,9 +325,9 @@ func (m *MongoPump) ensureIndexes(collectionName string) error { return err } - logBrowserIndex := index.Index{ + logBrowserIndex := model.Index{ Name: "logBrowserIndex", - Keys: []dbm.DBM{{"timestamp": -1}, {"orgid": 1}, {"apiid": 1}, {"apikey": 1}, {"responsecode": 1}}, + Keys: []model.DBM{{"timestamp": -1}, {"orgid": 1}, {"apiid": 1}, {"apikey": 1}, {"responsecode": 1}}, Background: m.dbConf.MongoDBType == StandardMongo, } return m.store.CreateIndex(context.Background(), d, logBrowserIndex) @@ -370,7 +368,7 @@ func (m *MongoPump) WriteData(ctx context.Context, data []interface{}) error { errCh := make(chan error, len(accumulateSet)) for _, dataSet := range accumulateSet { - go func(errCh chan error, dataSet ...id.DBObject) { + go func(errCh chan error, dataSet ...model.DBObject) { m.log.WithFields(logrus.Fields{ "collection": collectionName, "number of records": len(dataSet), @@ -404,10 +402,10 @@ func (m *MongoPump) WriteData(ctx context.Context, data []interface{}) error { // AccumulateSet groups data items into chunks based on the max batch size limit while handling graph analytics records separately. // It returns a 2D array of DBObjects. -func (m *MongoPump) AccumulateSet(data []interface{}, isForGraphRecords bool) [][]id.DBObject { +func (m *MongoPump) AccumulateSet(data []interface{}, isForGraphRecords bool) [][]model.DBObject { accumulatorTotal := 0 - returnArray := make([][]id.DBObject, 0) - thisResultSet := make([]id.DBObject, 0) + returnArray := make([][]model.DBObject, 0) + thisResultSet := make([]model.DBObject, 0) for i, item := range data { // Process the current item and determine if it should be skipped @@ -474,7 +472,7 @@ func (m *MongoPump) handleLargeDocuments(thisItem *analytics.AnalyticsRecord, si // accumulate processes the given item and updates the accumulator total, result set, and return array. // It manages chunking the data into separate sets based on the max batch size limit, and appends the last item when necessary. -func (m *MongoPump) accumulate(thisResultSet []id.DBObject, returnArray [][]id.DBObject, thisItem *analytics.AnalyticsRecord, sizeBytes, accumulatorTotal int, isLastItem bool) (int, []id.DBObject, [][]id.DBObject) { +func (m *MongoPump) accumulate(thisResultSet []model.DBObject, returnArray [][]model.DBObject, thisItem *analytics.AnalyticsRecord, sizeBytes, accumulatorTotal int, isLastItem bool) (int, []model.DBObject, [][]model.DBObject) { if (accumulatorTotal + sizeBytes) <= m.dbConf.MaxInsertBatchSizeBytes { accumulatorTotal += sizeBytes } else { @@ -483,7 +481,7 @@ func (m *MongoPump) accumulate(thisResultSet []id.DBObject, returnArray [][]id.D returnArray = append(returnArray, thisResultSet) } - thisResultSet = make([]id.DBObject, 0) + thisResultSet = make([]model.DBObject, 0) accumulatorTotal = sizeBytes } @@ -507,7 +505,7 @@ func (m *MongoPump) WriteUptimeData(data []interface{}) { return } - keys := make([]id.DBObject, len(data)) + keys := make([]model.DBObject, len(data)) for i, v := range data { decoded := analytics.UptimeReportData{} diff --git a/pumps/mongo_aggregate.go b/pumps/mongo_aggregate.go index f900f5313..8d413cba8 100644 --- a/pumps/mongo_aggregate.go +++ b/pumps/mongo_aggregate.go @@ -13,8 +13,7 @@ import ( "github.com/sirupsen/logrus" "github.com/TykTechnologies/storage/persistent" - "github.com/TykTechnologies/storage/persistent/dbm" - "github.com/TykTechnologies/storage/persistent/index" + "github.com/TykTechnologies/storage/persistent/model" "github.com/TykTechnologies/tyk-pump/analytics" ) @@ -244,8 +243,8 @@ func (m *MongoAggregatePump) ensureIndexes(collectionName string) error { var err error // CosmosDB does not support "expireAt" option if m.dbConf.MongoDBType != CosmosDB { - ttlIndex := index.Index{ - Keys: []dbm.DBM{{"expireAt": 1}}, + ttlIndex := model.Index{ + Keys: []model.DBM{{"expireAt": 1}}, TTL: 0, IsTTLIndex: true, Background: m.dbConf.MongoDBType == StandardMongo, @@ -256,8 +255,8 @@ func (m *MongoAggregatePump) ensureIndexes(collectionName string) error { } } - apiIndex := index.Index{ - Keys: []dbm.DBM{{"timestamp": 1}}, + apiIndex := model.Index{ + Keys: []model.DBM{{"timestamp": 1}}, Background: m.dbConf.MongoDBType == StandardMongo, } @@ -266,8 +265,8 @@ func (m *MongoAggregatePump) ensureIndexes(collectionName string) error { return err } - orgIndex := index.Index{ - Keys: []dbm.DBM{{"orgid": 1}}, + orgIndex := model.Index{ + Keys: []model.DBM{{"orgid": 1}}, Background: m.dbConf.MongoDBType == StandardMongo, } return m.store.CreateIndex(context.Background(), d, orgIndex) @@ -315,7 +314,7 @@ func (m *MongoAggregatePump) DoAggregatedWriting(ctx context.Context, filteredDa m.log.Error(indexCreateErr) } - query := dbm.DBM{ + query := model.DBM{ "orgid": filteredData.OrgID, "timestamp": filteredData.TimeStamp, } @@ -375,8 +374,8 @@ func (m *MongoAggregatePump) getLastDocumentTimestamp() (time.Time, error) { tableName: analytics.AgggregateMixedCollectionName, } - var result dbm.DBM - err := m.store.Query(context.Background(), d, &result, dbm.DBM{"_sort": "-$natural", "_limit": 1}) + var result model.DBM + err := m.store.Query(context.Background(), d, &result, model.DBM{"_sort": "-$natural", "_limit": 1}) if err != nil { return time.Time{}, err } diff --git a/pumps/mongo_aggregate_test.go b/pumps/mongo_aggregate_test.go index b79523929..a2571c229 100644 --- a/pumps/mongo_aggregate_test.go +++ b/pumps/mongo_aggregate_test.go @@ -7,8 +7,7 @@ import ( "testing" "time" - "github.com/TykTechnologies/storage/persistent/dbm" - "github.com/TykTechnologies/storage/persistent/id" + "github.com/TykTechnologies/storage/persistent/model" "github.com/TykTechnologies/tyk-pump/analytics" "github.com/TykTechnologies/tyk-pump/analytics/demo" "github.com/sirupsen/logrus" @@ -19,11 +18,11 @@ type dummyObject struct { tableName string } -func (dummyObject) GetObjectID() id.ObjectId { +func (dummyObject) GetObjectID() model.ObjectID { return "" } -func (dummyObject) SetObjectID(id.ObjectId) {} +func (dummyObject) SetObjectID(model.ObjectID) {} func (d dummyObject) TableName() string { return d.tableName @@ -111,7 +110,7 @@ func TestDoAggregatedWritingWithIgnoredAggregations(t *testing.T) { } // we build the query using the timestamp as we do in aggregated analytics - query := dbm.DBM{ + query := model.DBM{ "orgid": "123", "timestamp": time.Date(timeNow.Year(), timeNow.Month(), timeNow.Day(), timeNow.Hour(), 0, 0, 0, timeNow.Location()), } @@ -217,7 +216,7 @@ func TestAggregationTime(t *testing.T) { keys[0] = analytics.AnalyticsRecord{APIID: "api1", OrgID: "123", TimeStamp: timeNow, APIKey: "apikey1"} } - query := dbm.DBM{ + query := model.DBM{ "orgid": "123", } diff --git a/pumps/mongo_selective.go b/pumps/mongo_selective.go index c933bf9b2..3ef9f73be 100644 --- a/pumps/mongo_selective.go +++ b/pumps/mongo_selective.go @@ -11,9 +11,7 @@ import ( "gopkg.in/vmihailenco/msgpack.v2" "github.com/TykTechnologies/storage/persistent" - "github.com/TykTechnologies/storage/persistent/dbm" - "github.com/TykTechnologies/storage/persistent/id" - "github.com/TykTechnologies/storage/persistent/index" + "github.com/TykTechnologies/storage/persistent/model" "github.com/TykTechnologies/tyk-pump/analytics" ) @@ -143,8 +141,8 @@ func (m *MongoSelectivePump) ensureIndexes(collectionName string) error { tableName: collectionName, } - apiIndex := index.Index{ - Keys: []dbm.DBM{{"apiid": 1}}, + apiIndex := model.Index{ + Keys: []model.DBM{{"apiid": 1}}, Background: m.dbConf.MongoDBType == StandardMongo, } @@ -155,8 +153,8 @@ func (m *MongoSelectivePump) ensureIndexes(collectionName string) error { // CosmosDB does not support "expireAt" option if m.dbConf.MongoDBType != CosmosDB { - ttlIndex := index.Index{ - Keys: []dbm.DBM{{"expireAt": 1}}, + ttlIndex := model.Index{ + Keys: []model.DBM{{"expireAt": 1}}, IsTTLIndex: true, TTL: 0, Background: m.dbConf.MongoDBType == StandardMongo, @@ -168,9 +166,9 @@ func (m *MongoSelectivePump) ensureIndexes(collectionName string) error { } } - logBrowserIndex := index.Index{ + logBrowserIndex := model.Index{ Name: "logBrowserIndex", - Keys: []dbm.DBM{{"timestamp": -1}, {"apiid": 1}, {"apikey": 1}, {"responsecode": 1}}, + Keys: []model.DBM{{"timestamp": -1}, {"apiid": 1}, {"apikey": 1}, {"responsecode": 1}}, Background: m.dbConf.MongoDBType == StandardMongo, } @@ -226,10 +224,10 @@ func (m *MongoSelectivePump) WriteData(ctx context.Context, data []interface{}) } // AccumulateSet organizes analytics data into a set of chunks based on their size. -func (m *MongoSelectivePump) AccumulateSet(data []interface{}) [][]id.DBObject { +func (m *MongoSelectivePump) AccumulateSet(data []interface{}) [][]model.DBObject { accumulatorTotal := 0 - returnArray := make([][]id.DBObject, 0) - thisResultSet := make([]id.DBObject, 0) + returnArray := make([][]model.DBObject, 0) + thisResultSet := make([]model.DBObject, 0) // Process each item in the data array. for i, item := range data { @@ -278,7 +276,7 @@ func (m *MongoSelectivePump) getItemSizeBytes(thisItem *analytics.AnalyticsRecor // accumulate processes the given item and updates the accumulator total, result set, and return array. // It manages chunking the data into separate sets based on the max batch size limit, and appends the last item when necessary. -func (m *MongoSelectivePump) accumulate(thisResultSet []id.DBObject, returnArray [][]id.DBObject, thisItem *analytics.AnalyticsRecord, sizeBytes, accumulatorTotal int, isLastItem bool) (int, []id.DBObject, [][]id.DBObject) { +func (m *MongoSelectivePump) accumulate(thisResultSet []model.DBObject, returnArray [][]model.DBObject, thisItem *analytics.AnalyticsRecord, sizeBytes, accumulatorTotal int, isLastItem bool) (int, []model.DBObject, [][]model.DBObject) { // If the item size is invalid (negative), return the current state if sizeBytes < 0 { return accumulatorTotal, thisResultSet, returnArray @@ -296,7 +294,7 @@ func (m *MongoSelectivePump) accumulate(thisResultSet []id.DBObject, returnArray returnArray = append(returnArray, thisResultSet) } - thisResultSet = make([]id.DBObject, 0) + thisResultSet = make([]model.DBObject, 0) accumulatorTotal = sizeBytes } @@ -321,7 +319,7 @@ func (m *MongoSelectivePump) WriteUptimeData(data []interface{}) { return } - keys := make([]id.DBObject, len(data)) + keys := make([]model.DBObject, len(data)) for i, v := range data { decoded := analytics.UptimeReportData{} diff --git a/pumps/mongo_selective_test.go b/pumps/mongo_selective_test.go index 1093a6429..f5daecd0f 100644 --- a/pumps/mongo_selective_test.go +++ b/pumps/mongo_selective_test.go @@ -6,8 +6,7 @@ import ( "testing" "time" - "github.com/TykTechnologies/storage/persistent/dbm" - "github.com/TykTechnologies/storage/persistent/id" + "github.com/TykTechnologies/storage/persistent/model" "github.com/TykTechnologies/tyk-pump/analytics" "github.com/stretchr/testify/assert" "gopkg.in/vmihailenco/msgpack.v2" @@ -222,7 +221,7 @@ func TestEnsureIndexes(t *testing.T) { tableName: collectionName, } // Creating the collection - err := mPump.store.Migrate(context.Background(), []id.DBObject{obj}) + err := mPump.store.Migrate(context.Background(), []model.DBObject{obj}) assert.NoError(t, err) // Creating the indexes @@ -350,7 +349,7 @@ func TestWriteUptimeDataMongoSelective(t *testing.T) { }() dbRecords := []analytics.UptimeReportData{} - err = newPump.store.Query(context.Background(), &analytics.UptimeReportData{}, &dbRecords, dbm.DBM{}) + err = newPump.store.Query(context.Background(), &analytics.UptimeReportData{}, &dbRecords, model.DBM{}) assert.NoError(t, err) // check amount of rows in the table diff --git a/pumps/mongo_test.go b/pumps/mongo_test.go index 42a8499be..25ed6cecb 100644 --- a/pumps/mongo_test.go +++ b/pumps/mongo_test.go @@ -10,8 +10,7 @@ import ( "github.com/stretchr/testify/assert" "gopkg.in/vmihailenco/msgpack.v2" - "github.com/TykTechnologies/storage/persistent/dbm" - "github.com/TykTechnologies/storage/persistent/id" + "github.com/TykTechnologies/storage/persistent/model" "github.com/TykTechnologies/tyk-pump/analytics" ) @@ -171,17 +170,17 @@ func TestMongoPumpOmitIndexCreation(t *testing.T) { } } -func CreateCollectionIfNeeded(t *testing.T, mPump *MongoPump, dbObject id.DBObject) { +func CreateCollectionIfNeeded(t *testing.T, mPump *MongoPump, dbObject model.DBObject) { t.Helper() if !HasTable(t, mPump, dbObject) { - err := mPump.store.Migrate(context.Background(), []id.DBObject{dbObject}) + err := mPump.store.Migrate(context.Background(), []model.DBObject{dbObject}) if err != nil { t.Error("there shouldn't be an error migrating database", err) } } } -func HasTable(t *testing.T, mPump *MongoPump, dbObject id.DBObject) bool { +func HasTable(t *testing.T, mPump *MongoPump, dbObject model.DBObject) bool { t.Helper() hasTable, err := mPump.store.HasTable(context.Background(), dbObject.TableName()) if err != nil { @@ -390,7 +389,7 @@ func TestMongoPump_AccumulateSetIgnoreDocSize(t *testing.T) { record.Tags = []string{analytics.PredefinedTagGraphAnalytics} record.RawRequest = bloat record.RawResponse = bloat - record.ApiSchema = bloat + record.APISchema = bloat } dataSet[i] = record } @@ -528,7 +527,7 @@ func TestWriteUptimeData(t *testing.T) { assert.Equal(t, true, hasTable) dbRecords := []analytics.UptimeReportData{} - if err := newPump.store.Query(context.Background(), &analytics.UptimeReportData{}, &dbRecords, dbm.DBM{}); err != nil { + if err := newPump.store.Query(context.Background(), &analytics.UptimeReportData{}, &dbRecords, model.DBM{}); err != nil { t.Fatal("Error getting analytics records from Mongo") } diff --git a/pumps/random b/pumps/random new file mode 100644 index 000000000..e69de29bb diff --git a/serializer/protobuf.go b/serializer/protobuf.go index a90e2ae45..3e830c815 100644 --- a/serializer/protobuf.go +++ b/serializer/protobuf.go @@ -85,7 +85,7 @@ func (pb *ProtobufSerializer) TransformSingleRecordToProto(rec analytics.Analyti Alias: rec.Alias, TrackPath: rec.TrackPath, OauthID: rec.OauthID, - ApiSchema: rec.ApiSchema, + ApiSchema: rec.APISchema, } rec.TimestampToProto(&record) @@ -143,7 +143,7 @@ func (pb *ProtobufSerializer) TransformSingleProtoToAnalyticsRecord(rec analytic Tags: rec.Tags, Alias: rec.Alias, TrackPath: rec.TrackPath, - ApiSchema: rec.ApiSchema, + APISchema: rec.ApiSchema, } tmpRecord.TimeStampFromProto(rec) *record = tmpRecord From 04b648152dced6533654b34a6b1a9879f91feaff Mon Sep 17 00:00:00 2001 From: Matias <83959431+mativm02@users.noreply.github.com> Date: Mon, 24 Apr 2023 09:50:15 -0300 Subject: [PATCH 060/102] adding TT-7216 and TT-8229 to readme (#609) --- README.md | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index a2cd19f59..724baf694 100644 --- a/README.md +++ b/README.md @@ -1270,15 +1270,15 @@ TYK_PMP_PUMPS_CSV_FILTERS_APIIDS=123,789 ### Timeouts You can configure a different timeout for each pump with the configuration option `timeout`. Its default value is 0 seconds, which means that the pump will wait for the writing operation forever. +In Mongo pumps, the default value is 10 seconds. If you want to disable the timeout, you can set the value to 0. Take into account that if you disable the timeout, the pump will wait for the writing operation forever, and it could block the pump execution. -###### JSON / Conf file Example ```json "mongo": { "type": "mongo", - "timeout":5, + "timeout": 5, "meta": { "collection_name": "tyk_analytics", - "mongo_url": "mongodb://username:password@{hostname:port},{hostname:port}/{db_name}" + "mongo_url": "mongodb://username:password@{hostname:port}/{db_name}" } } ``` @@ -1302,7 +1302,7 @@ In case that you have a configured timeout, but it still takes more seconds to w `max_record_size` defines maximum size (in bytes) for Raw Request and Raw Response logs, this value defaults to 0. Is not set then tyk-pump will not trim any data and will store the full information. This can also be set at a pump level. For example: -```{.json} +```json "csv": { "type": "csv", "max_record_size":1000, @@ -1317,7 +1317,7 @@ This can also be set at a pump level. For example: `ignore_fields` defines a list of analytics fields that will be ignored when writing to the pump. This can be used to avoid writing sensitive information to the Database, or data that you don't really need to have. Fields must be written using JSON tags. For example: -```{.json} +```json "csv": { "type": "csv", "ignore_fields":["api_id","api_version"], @@ -1327,6 +1327,19 @@ Fields must be written using JSON tags. For example: } ``` +### Decode Raw Request & Raw Response +`raw_request_decoded` and `raw_response_decoded` decode from base64 the raw request and raw response fields before writing to Pump. This is useful if you want to search for specific values in the raw request/response. Both are disabled by default. +```json +"csv": { + "type": "csv", + "raw_request_decoded": true, + "raw_response_decoded": true, + "meta": { + "csv_dir": "./" + } +} +``` + ## Compiling & Testing 1. Download dependent packages: From 0cc6569f81d638b535536c708f76b0324244e5a6 Mon Sep 17 00:00:00 2001 From: Sedky Shamalah Date: Tue, 25 Apr 2023 16:19:16 +0400 Subject: [PATCH 061/102] add the purge logging to resurface pump (#611) --- pumps/resurface.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pumps/resurface.go b/pumps/resurface.go index a5d2daca1..e820e9cd3 100644 --- a/pumps/resurface.go +++ b/pumps/resurface.go @@ -233,5 +233,7 @@ func (rp *ResurfacePump) WriteData(ctx context.Context, data []interface{}) erro logger.SendHttpMessage(rp.logger, &resp, &req, decoded.TimeStamp.Unix()*1000, decoded.RequestTime, customFields) } + rp.log.Info("Purged ", len(data), " records...") + return nil } From b925e6f2426b0f7eae4643f61a42552adbfe2b10 Mon Sep 17 00:00:00 2001 From: Matias <83959431+mativm02@users.noreply.github.com> Date: Wed, 26 Apr 2023 10:05:42 -0300 Subject: [PATCH 062/102] [TT-8782] Skip raw_request_decoded and raw_response_decoded if Pump is one of Mongo or SQL (#614) * skipping decoding if pump is Mongo or SQL * modifying readme * overriding methods * linting * overriding setters --- README.md | 285 ++++++++++++++++++++-------------- pumps/graph_mongo.go | 12 ++ pumps/graph_mongo_test.go | 19 +++ pumps/mongo.go | 12 ++ pumps/mongo_aggregate.go | 12 ++ pumps/mongo_aggregate_test.go | 19 +++ pumps/mongo_selective.go | 12 ++ pumps/mongo_selective_test.go | 19 +++ pumps/mongo_test.go | 19 +++ pumps/sql.go | 32 ++-- pumps/sql_aggregate.go | 26 +++- pumps/sql_aggregate_test.go | 35 ++++- pumps/sql_test.go | 39 +++-- 13 files changed, 388 insertions(+), 153 deletions(-) diff --git a/README.md b/README.md index 724baf694..c32316330 100644 --- a/README.md +++ b/README.md @@ -16,37 +16,37 @@ Tyk Pump is a pluggable analytics purger to move Analytics generated by your Tyk The table below provides details on the fields within each `tyk_analytics` record. -| Analytics Data Field | Description | Remarks | Example | -| :--- | :--- | :--- | :--- | -| `Method` | Request method | | `GET`, `POST` | -| `Host` | Request `Host` header | Includes host and optional port number of the server to which the request was sent. | `tyk.io`, or `tyk.io:8080` if port is included | -| `Path` | Request path | Displayed in decoded form. | `/foo/bar` for `/foo%2Fbar` or `/foo/bar` | -| `RawPath` | Request path | Same value as `Path`. Does not provide the raw encoded path. | `/foo/bar` for `/foo%2Fbar` or `/foo/bar` | -| `ContentLength` | Request `Content-Length` header | The number of bytes in the request body. | `10` for request body `0123456789` | -| `UserAgent` | Request `User-Agent` header | | `curl/7.86.0` | -| `Day` | Request day | Based on `TimeStamp` field. | `16` for `2022-11-16T03:01:54Z` | -| `Month` | Request month | Based on `TimeStamp` field. | `11` for `2022-11-16T03:01:54Z` | -| `Year` | Request year | Based on `TimeStamp` field. | `2022` for `2022-11-16T03:01:54Z` | -| `Hour` | Request hour | Based on `TimeStamp` field. | `3` for `2022-11-16T03:01:54Z` | -| `ResponseCode` | Response code | Only contains the integer element of the response code. Can be generated by either the gateway or upstream server, depending on how the request is handled. | `200` for `200 OK` | -| `APIKey` | `Request authentication key` | Authentication key, as provided in request. If no API key is provided then gateway will substitute a default value. | Unhashed `auth_key`, hashed `6129dc1e8b64c6b4`, or `00000000` if no authentication provided. | -| `TimeStamp` | Request timestamp | Generated by the gateway, based on the time it receives the request from the client. | `2022-11-16T03:01:54.648+00:00` | -| `APIVersion` | Version of API Definition requested | Based on version configuration of context API definition. If API is unversioned then value is "Not Versioned". | Could be an alphanumeric value such as `1` or `b`. Is `Not Versioned` if not versioned. | -| `APIName` | Name of API Definition requested | | `Foo API` | -| `APIID` | Id of API Definition requested | | `727dad853a8a45f64ab981154d1ffdad` | -| `OrgID` | Organisation Id of API Definition requested | | `5e9d9544a1dcd60001d0ed20` | -| `OauthID` | Id of OAuth client | Value is empty string if not using OAuth, or OAuth client not present. | `my-oauth-client-id` | -| `RequestTime` | Duration of upstream roundtrip | Equal to value of `Latency.Total` field. | `3` for a 3ms roundtrip | -| `RawRequest` | Raw HTTP request | Base64 encoded copy of the request sent from the gateway to the upstream server. | `R0VUIC9nZXQgSFRUUC8xLjEKSG9zdDogdHlrLmlv` | -| `RawResponse` | Raw HTTP response | Base64 encoded copy of the response sent from the gateway to the client. | `SFRUUC8xLjEgMjAwIE9LCkNvbnRlbnQtTGVuZ3RoOiAxOQpEYXRlOiBXZWQsIDE2IE5vdiAyMDIyIDA2OjIxOjE2IEdNVApTZXJ2ZXI6IGd1bmljb3JuLzE5LjkuMAoKewogICJmb28iOiAiYmFyIgp9Cg==` | -| `IPAddress` | Client IP address | Taken from either `X-Real-IP` or `X-Forwarded-For` request headers, if set. Otherwise, determined by gateway based on request. | `172.18.0.1` | -| `Geo` | Client geolocation data | Calculated using MaxMind database, based on client IP address. | `{"country":{"isocode":"SG"},"city":{"geonameid":0,"names":{}},"location":{"latitude":0,"longitude":0,"timezone":""}}` | -| `Network` | Network statistics | Not currently used. | N/A | -| `Latency` | Latency statistics | Contains two fields; `upstream` is the roundtrip duration between the gateway sending the request to the upstream server and it receiving a response. `total` is the `upstream` value plus additional gateway-side functionality such as processing analytics data. | `{"total":3,"upstream":3}` | -| `Tags` | Session context tags | Can contain many tags which refer to many things, such as the gateway, API key, organisation, API definition etc. | `["key-00000000","org-5e9d9544a1dcd60001d0ed20","api-accbdd1b89e84ec97f4f16d4e3197d5c"]` | -| `Alias` | Session alias | Alias of the context authenticated identity. Blank if no alias set or request is unauthenticated. | `my-key-alias` | -| `TrackPath` | Tracked endpoint flag | Value is `true` if the requested endpoint is configured to be tracked, otherwise `false`. | `true` or `false` | -| `ExpireAt` | Future expiry date | Can be used to implement automated data expiry, if supported by storage. | `2022-11-23T07:26:25.762+00:00` | +| Analytics Data Field | Description | Remarks | Example | +| :------------------- | :------------------------------------------ | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | :------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `Method` | Request method | | `GET`, `POST` | +| `Host` | Request `Host` header | Includes host and optional port number of the server to which the request was sent. | `tyk.io`, or `tyk.io:8080` if port is included | +| `Path` | Request path | Displayed in decoded form. | `/foo/bar` for `/foo%2Fbar` or `/foo/bar` | +| `RawPath` | Request path | Same value as `Path`. Does not provide the raw encoded path. | `/foo/bar` for `/foo%2Fbar` or `/foo/bar` | +| `ContentLength` | Request `Content-Length` header | The number of bytes in the request body. | `10` for request body `0123456789` | +| `UserAgent` | Request `User-Agent` header | | `curl/7.86.0` | +| `Day` | Request day | Based on `TimeStamp` field. | `16` for `2022-11-16T03:01:54Z` | +| `Month` | Request month | Based on `TimeStamp` field. | `11` for `2022-11-16T03:01:54Z` | +| `Year` | Request year | Based on `TimeStamp` field. | `2022` for `2022-11-16T03:01:54Z` | +| `Hour` | Request hour | Based on `TimeStamp` field. | `3` for `2022-11-16T03:01:54Z` | +| `ResponseCode` | Response code | Only contains the integer element of the response code. Can be generated by either the gateway or upstream server, depending on how the request is handled. | `200` for `200 OK` | +| `APIKey` | `Request authentication key` | Authentication key, as provided in request. If no API key is provided then gateway will substitute a default value. | Unhashed `auth_key`, hashed `6129dc1e8b64c6b4`, or `00000000` if no authentication provided. | +| `TimeStamp` | Request timestamp | Generated by the gateway, based on the time it receives the request from the client. | `2022-11-16T03:01:54.648+00:00` | +| `APIVersion` | Version of API Definition requested | Based on version configuration of context API definition. If API is unversioned then value is "Not Versioned". | Could be an alphanumeric value such as `1` or `b`. Is `Not Versioned` if not versioned. | +| `APIName` | Name of API Definition requested | | `Foo API` | +| `APIID` | Id of API Definition requested | | `727dad853a8a45f64ab981154d1ffdad` | +| `OrgID` | Organisation Id of API Definition requested | | `5e9d9544a1dcd60001d0ed20` | +| `OauthID` | Id of OAuth client | Value is empty string if not using OAuth, or OAuth client not present. | `my-oauth-client-id` | +| `RequestTime` | Duration of upstream roundtrip | Equal to value of `Latency.Total` field. | `3` for a 3ms roundtrip | +| `RawRequest` | Raw HTTP request | Base64 encoded copy of the request sent from the gateway to the upstream server. | `R0VUIC9nZXQgSFRUUC8xLjEKSG9zdDogdHlrLmlv` | +| `RawResponse` | Raw HTTP response | Base64 encoded copy of the response sent from the gateway to the client. | `SFRUUC8xLjEgMjAwIE9LCkNvbnRlbnQtTGVuZ3RoOiAxOQpEYXRlOiBXZWQsIDE2IE5vdiAyMDIyIDA2OjIxOjE2IEdNVApTZXJ2ZXI6IGd1bmljb3JuLzE5LjkuMAoKewogICJmb28iOiAiYmFyIgp9Cg==` | +| `IPAddress` | Client IP address | Taken from either `X-Real-IP` or `X-Forwarded-For` request headers, if set. Otherwise, determined by gateway based on request. | `172.18.0.1` | +| `Geo` | Client geolocation data | Calculated using MaxMind database, based on client IP address. | `{"country":{"isocode":"SG"},"city":{"geonameid":0,"names":{}},"location":{"latitude":0,"longitude":0,"timezone":""}}` | +| `Network` | Network statistics | Not currently used. | N/A | +| `Latency` | Latency statistics | Contains two fields; `upstream` is the roundtrip duration between the gateway sending the request to the upstream server and it receiving a response. `total` is the `upstream` value plus additional gateway-side functionality such as processing analytics data. | `{"total":3,"upstream":3}` | +| `Tags` | Session context tags | Can contain many tags which refer to many things, such as the gateway, API key, organisation, API definition etc. | `["key-00000000","org-5e9d9544a1dcd60001d0ed20","api-accbdd1b89e84ec97f4f16d4e3197d5c"]` | +| `Alias` | Session alias | Alias of the context authenticated identity. Blank if no alias set or request is unauthenticated. | `my-key-alias` | +| `TrackPath` | Tracked endpoint flag | Value is `true` if the requested endpoint is configured to be tracked, otherwise `false`. | `true` or `false` | +| `ExpireAt` | Future expiry date | Can be used to implement automated data expiry, if supported by storage. | `2022-11-23T07:26:25.762+00:00` | # Pumps / Back ends supported: @@ -70,7 +70,7 @@ The table below provides details on the fields within each `tyk_analytics` recor # Configuration: -This will be your base config. We will then add 1 or more Pumps based off our selected data sinks. +This will be your base config. We will then add 1 or more Pumps based off our selected data sinks.
JSON / Conf File @@ -112,11 +112,11 @@ Create a `pump.conf` file: "max_record_size": 1000 } ``` +
Env Variables - ``` TYK_PMP_OMITCONFIGFILE=true @@ -133,14 +133,15 @@ TYK_PMP_PURGEDELAY=2 TYK_PMP_DONTPURGEUPTIMEDATA=true ``` +
- + ## Base Configuration Fields Explained ### analytics_storage_config -This is the Tyk Pump's primary database which it scrapes Tyk Gateway analytics from. Normally this is `redis`. +This is the Tyk Pump's primary database which it scrapes Tyk Gateway analytics from. Normally this is `redis`. ```json "analytics_storage_config": { @@ -158,6 +159,7 @@ This is the Tyk Pump's primary database which it scrapes Tyk Gateway analytics f "redis_ssl_insecure_skip_verify": false }, ``` + `redis_use_ssl` - Setting this to true to use SSL when connecting to Redis `redis_ssl_insecure_skip_verify` - Set this to true to tell Pump to ignore Redis' cert validation @@ -172,12 +174,12 @@ This is the Tyk Pump's primary database which it scrapes Tyk Gateway analytics f ### Logs -`log_level` - Set the logger details for tyk-pump. The posible values are: `info`,`debug`,`error` and `warn`. By default, the log level is `info`. +`log_level` - Set the logger details for tyk-pump. The posible values are: `info`,`debug`,`error` and `warn`. By default, the log level is `info`. `log_format` - Set the logger format. The possible values are: `text` and `json`. By default, the log format is `text`. - ### Health Check + You can configure the health check endpoint and port for the Tyk Pump: - `health_check_endpoint_name` - The default is "health" @@ -208,6 +210,7 @@ The minimum required configurations for uptime pumps are: - `mongo_url` - The uptime pump mongo connection url. It is usually something like "mongodb://username:password@{hostname:port},{hostname:port}/{db_name}". ###### JSON / Conf File + ``` { "uptime_pump_config": { @@ -219,21 +222,24 @@ The minimum required configurations for uptime pumps are: ``` ###### Env Variables: + ``` -TYK_PMP_UPTIMEPUMPCONFIG_COLLECTIONNAME=tyk_uptime_analytics -TYK_PMP_UPTIMEPUMPCONFIG_MONGOURL=mongodb://tyk-mongo:27017/tyk_analytics -TYK_PMP_UPTIMEPUMPCONFIG_MAXINSERTBATCHSIZEBYTES=500000 -TYK_PMP_UPTIMEPUMPCONFIG_MAXDOCUMENTSIZEBYTES=200000 +TYK_PMP_UPTIMEPUMPCONFIG_COLLECTIONNAME=tyk_uptime_analytics +TYK_PMP_UPTIMEPUMPCONFIG_MONGOURL=mongodb://tyk-mongo:27017/tyk_analytics +TYK_PMP_UPTIMEPUMPCONFIG_MAXINSERTBATCHSIZEBYTES=500000 +TYK_PMP_UPTIMEPUMPCONFIG_MAXDOCUMENTSIZEBYTES=200000 TYK_PMP_UPTIMEPUMPCONFIG_LOGLEVEL=info ``` ## SQL Uptime Pump -*Supported in Tyk Pump v1.5.0+* + +_Supported in Tyk Pump v1.5.0+_ In `uptime_pump_config` you can configure a SQL uptime pump. To do that, you need to add the field `uptime_type` with `sql` value. -You can also use different types of SQL Uptime pumps, like `postgres` or `sqlite` using the `type` field. +You can also use different types of SQL Uptime pumps, like `postgres` or `sqlite` using the `type` field. ###### JSON / Conf file Example + ``` "uptime_pump_config": { "uptime_type": "sql", @@ -245,6 +251,7 @@ You can also use different types of SQL Uptime pumps, like `postgres` or `sqlite ``` ###### Env Variables: + ``` TYK_PMP_UPTIMEPUMPCONFIG_UPTIMETYPE=sql TYK_PMP_UPTIMEPUMPCONFIG_TYPE=postgres @@ -258,6 +265,7 @@ TYK_PMP_UPTIMEPUMPCONFIG_LOGLEVEL=info Example of integrating with GrayLog: ###### JSON / Conf file Example + ``` "graylog": { "type": "graylog", @@ -284,6 +292,7 @@ Example of integrating with GrayLog: ``` ###### Env Variables: + ``` TYK_PMP_PUMPS_GRAYLOG_TYPE=graylog TYK_PMP_PUMPS_GRAYLOG_META_GRAYLOGHOST=10.60.6.15 @@ -292,12 +301,13 @@ TYK_PMP_PUMPS_GRAYLOG_META_TAGS=method,path,response_code,api_key,api_version,ap ``` ## Resurface.io + Resurface provides data-driven API security, by making each and every API call a durable transaction inside a purpose-built data lake. Use Resurface for attack and failure triage, root cause, threat and risk identification, and simply just knowing how your APIs are being used (and misused!). By continously scanning your own data lake, Resurface provides retroactive analysis. It identifies what's important in your API data, sending warnings and alerts in real-time for fast action. The only two fields necessary in the pump cofiguration are: - - `capture_url` corresponds to the Resurface database [capture endpoint URL](https://resurface.io/docs/#getting-capture-url). You might need to subsitute `localhost` for the corresponding hostname, if you're not running resurface locally. - - `rules` corresponds to an [active set of rules](https://resurface.io/logging-rules) that control what data is logged and how sensitive data is masked. The example below applies a predefined set of rules (`include debug`), but logging rules are easily customized to meet the needs of any application. +- `capture_url` corresponds to the Resurface database [capture endpoint URL](https://resurface.io/docs/#getting-capture-url). You might need to subsitute `localhost` for the corresponding hostname, if you're not running resurface locally. +- `rules` corresponds to an [active set of rules](https://resurface.io/logging-rules) that control what data is logged and how sensitive data is masked. The example below applies a predefined set of rules (`include debug`), but logging rules are easily customized to meet the needs of any application. **Note: Resurface requires Detailed Logging to be enabled in order to capture API call details in full.** @@ -315,6 +325,7 @@ The only two fields necessary in the pump cofiguration are: ``` ###### Env Variables + ``` TYK_PMP_PUMPS_RESURFACEIO_TYPE=resurfaceio TYK_PMP_PUMPS_RESURFACEIO_META_URL=http://localhost:7701/message @@ -324,7 +335,9 @@ TYK_PMP_PUMPS_RESURFACEIO_META_RULES="include debug" ## StatsD Example of integrating with StatsD: + ###### JSON / Conf file Example + ``` { "pumps": { @@ -355,8 +368,8 @@ Example of integrating with StatsD: By default, StatsD pump will put the analytic record method and path in your path field. From Pump 1.6+ you can set `separated_method` to true in your Statsd pump meta config in order to have the method attribute in a separated field. - ###### Env Variables: + ``` TYK_PMP_PUMPS_STATSD_TYPE=statsd TYK_PMP_PUMPS_STATSD_META_ADDRESS="localhost:8125" @@ -366,13 +379,14 @@ TYK_PMP_PUMPS_STATSD_META_SEPARATEDMETHOD=false ``` ## Mongo & Tyk Dashboard. -There are 3 mongo pumps. You may use one or multiple depending on the data you want. -The Tyk Dashboard uses various Mongo collections to store and visualize API traffic analytics. Please visit [this link](https://tyk.io/docs/tyk-pump/tyk-pump-configuration/tyk-pump-dashboard-config/) for steps on configuration. -Available Mongo instances are: Standard Mongo, DocumentDB (AWS), CosmosDB (Azure). All of them using the same configuration (CosmosDB does not support "expireAt" index, so it will be skipped) +There are 3 mongo pumps. You may use one or multiple depending on the data you want. +The Tyk Dashboard uses various Mongo collections to store and visualize API traffic analytics. Please visit [this link](https://tyk.io/docs/tyk-pump/tyk-pump-configuration/tyk-pump-dashboard-config/) for steps on configuration. +Available Mongo instances are: Standard Mongo, DocumentDB (AWS), CosmosDB (Azure). All of them using the same configuration (CosmosDB does not support "expireAt" index, so it will be skipped) ###### JSON / Conf File + ```.json { ... @@ -407,6 +421,7 @@ Available Mongo instances are: Standard Mongo, DocumentDB (AWS), CosmosDB (Azure ``` ###### Env Variables + ``` TYK_PMP_PUMPS_MONGO_TYPE=mongo TYK_PMP_PUMPS_MONGO_META_COLLECTIONNAME=tyk_analytics @@ -421,6 +436,7 @@ TYK_PMP_PUMPS_MONGOAGG_META_ENABLESELFHEALING=true ``` ###### Self Healing + By default, the maximum size of a document in MongoDB is 16MB. If we try to update a document that has grown to this size, an error is received. The Mongo Aggregate pump creates a new document in the database for each "aggregation period"; the length of that period is defined by `aggregation_time`. If, during that period (in minutes) the document grows beyond 16MB, the error will be received and no more records will be recorded until the end of the aggregation period (when a new document will be created). @@ -438,10 +454,12 @@ For example, if the `aggregation_time` is configured as 50 (minutes) but the doc Note that `store_analytics_per_minute` takes precedence over `aggregation_time` so if `store_analytics_per_minute` is equal to true, the value of `aggregation_time` will be equal to 1 and self healing will not operate. ## Mongo Graph Pump -As of Pump 1.7+, a new mongo is available called the `mongo_graph` pump. This pump is specifically for parsing + +As of Pump 1.7+, a new mongo is available called the `mongo_graph` pump. This pump is specifically for parsing GraphQL and UDG requests, tracking information like types requested, fields requested, specific graphql body errors etc. A sample config looks like this: + ```json { "pumps": { @@ -456,10 +474,12 @@ A sample config looks like this: ``` ## SQL Graph Pump + Similar to the Mongo graph pump, the `sql-graph` pump is a specialized pump for parsing and recording granular analytics for GraphQL and UDG requests. The difference, like the name says is this pump uses sql type databases as its storage db. Supported SQL databases are `sqlite`, `postgres`, `mysql`. A sample config looks like this: + ```json { "pumps": { @@ -476,7 +496,7 @@ A sample config looks like this: ``` `table_sharding` - This determines how the sql tables are created, if this is set to true, a new table is created for each day of records for the graph data. -The name format for each table is _. Defaults to false. +The name format for each table is *. Defaults to false. ## Elasticsearch Config @@ -497,12 +517,14 @@ The name format for each table is _. Defaults to false. `"disable_bulk"` - Disable batch writing. Defaults to false. `bulk_config`: Batch writing trigger configuration. Each option is an OR with eachother: - * `workers`: Number of workers. Defaults to 1. - * `flush_interval`: Specifies the time in seconds to flush the data and send it to ES. Default disabled. - * `bulk_actions`: Specifies the number of requests needed to flush the data and send it to ES. Defaults to 1000 requests. If it is needed, can be disabled with -1. - * `bulk_size`: Specifies the size (in bytes) needed to flush the data and send it to ES. Defaults to 5MB. If it is needed, can be disabled with -1. + +- `workers`: Number of workers. Defaults to 1. +- `flush_interval`: Specifies the time in seconds to flush the data and send it to ES. Default disabled. +- `bulk_actions`: Specifies the number of requests needed to flush the data and send it to ES. Defaults to 1000 requests. If it is needed, can be disabled with -1. +- `bulk_size`: Specifies the size (in bytes) needed to flush the data and send it to ES. Defaults to 5MB. If it is needed, can be disabled with -1. ###### Env Variables + ``` TYK_PMP_PUMPS_ELASTICSEARCH_TYPE=elasticsearch TYK_PMP_PUMPS_ELASTICSEARCH_META_INDEXNAME=tyk_analytics @@ -517,9 +539,10 @@ TYK_PMP_PUMPS_ELASTICSEARCH_META_BULKCONFIG_FLUSHINTERVAL=60 ``` ## Moesif Config + [Moesif](https://www.moesif.com/?language=tyk-api-gateway) is a user-centric API analytics and monitoring service for APIs. [More Info on Moesif for Tyk](https://www.moesif.com/solutions/track-api-program?language=tyk-api-gateway) -- `"application_id"` - Moesif Application Id. You can find your Moesif Application Id from [_Moesif Dashboard_](https://www.moesif.com/) -> _Top Right Menu_ -> _API Keys_ . Moesif recommends creating separate Application Ids for each environment such as Production, Staging, and Development to keep data isolated. +- `"application_id"` - Moesif Application Id. You can find your Moesif Application Id from [_Moesif Dashboard_](https://www.moesif.com/) -> _Top Right Menu_ -> _API Keys_ . Moesif recommends creating separate Application Ids for each environment such as Production, Staging, and Development to keep data isolated. - `"request_header_masks"` - (optional) An option to mask a specific request header field. Type: String Array `[] string` - `"request_body_masks"` - (optional) An option to mask a specific - request body field. Type: String Array `[] string` - `"response_header_masks"` - (optional) An option to mask a specific response header field. Type: String Array `[] string` @@ -532,17 +555,17 @@ TYK_PMP_PUMPS_ELASTICSEARCH_META_BULKCONFIG_FLUSHINTERVAL=60 - `"authorization_user_id_field"` - (optional) An optional field name use to parse the User from authorization header in Moesif. Type: String. Default value is `sub`. - `"enable_bulk"` - Set this to `true` to enable `bulk_config`. - `"bulk_config"`- (optional) Batch writing trigger configuration. - * `"event_queue_size"` - (optional) An optional field name which specify the maximum number of events to hold in queue before sending to Moesif. In case of network issues when not able to connect/send event to Moesif, skips adding new events to the queue to prevent memory overflow. Type: int. Default value is `10000`. - * `"batch_size"` - (optional) An optional field name which specify the maximum batch size when sending to Moesif. Type: int. Default value is `200`. - * `"timer_wake_up_seconds"` - (optional) An optional field which specifies a time (every n seconds) how often background thread runs to send events to moesif. Type: int. Default value is `2` seconds. + - `"event_queue_size"` - (optional) An optional field name which specify the maximum number of events to hold in queue before sending to Moesif. In case of network issues when not able to connect/send event to Moesif, skips adding new events to the queue to prevent memory overflow. Type: int. Default value is `10000`. + - `"batch_size"` - (optional) An optional field name which specify the maximum batch size when sending to Moesif. Type: int. Default value is `200`. + - `"timer_wake_up_seconds"` - (optional) An optional field which specifies a time (every n seconds) how often background thread runs to send events to moesif. Type: int. Default value is `2` seconds. ###### Env Variables + ``` TYK_PMP_PUMPS_MOESIF_TYPE=moesif TYK_PMP_PUMPS_MOESIF_META_APPLICATIONID="" ``` - ## Hybrid RPC Config Hybrid Pump allows you to install Tyk Pump inside Multi-Cloud or MDCB Worker installations. You can configure Tyk Pump to send data to the source of your choice (i.e. ElasticSearch), and in parallel, forward analytics to the Tyk Cloud. Additionally, you can set the aggregated flag to send only aggregated analytics to MDCB or Tyk Cloud, in order to save network bandwidth between DCs. @@ -568,6 +591,7 @@ call_timeout - This is the timeout (in milliseconds) for RPC calls. rpc_pool_size - This is maximum number of connections to MDCB. ###### Env Variables + ``` TYK_PMP_PUMPS_HYBRID_TYPE=hybrid TYK_PMP_PUMPS_HYBRID_META_RPCKEY=5b5fd341e6355b5eb194765e @@ -583,24 +607,29 @@ TYK_PMP_PUMPS_HYBRID_META_RPCPOOLSIZE=30 ``` ## Prometheus + Prometheus is an open-source monitoring system with a dimensional data model, flexible query language, efficient time series database and modern alerting approach. `Note` - When run as docker image then `"listen_address": ":9090"` Tyk expose the following counters: + - tyk_http_status{code, api} - tyk_http_status_per_path{code, api, path, method} - tyk_http_status_per_key{code, key} - tyk_http_status_per_oauth_client{code, client_id} And the following Histogram for latencies: + - tyk_latency{type, api} Note: base metric families can be removed by configuring the `disabled_metrics` property. #### Custom Prometheus metrics + From Pump 1.6+ it's possible to add custom prometheus metrics using the `custom_metrics` configuration. For example: + ```json "prometheus": { "type": "prometheus", @@ -618,17 +647,19 @@ For example: } }, ``` + This will create a metric for HTTP status code and API name. There are 2 types of `metric_type`: `counter` and `histogram`. -If you are using `histogram`, its always going to use the `request_time` to observe, and you can also set the configuration option `buckets` where you can define the buckets into which observations are counted. +If you are using `histogram`, its always going to use the `request_time` to observe, and you can also set the configuration option `buckets` where you can define the buckets into which observations are counted. `buckets` type is an array of float64 and its default value is `[1, 2, 5, 7, 10, 15, 20, 25, 30, 40, 50, 60, 70, 80, 90, 100, 200, 300, 400, 500, 1000, 2000, 5000, 10000, 30000, 60000]`. The `labels` configuration determines the label name and value extracted from the analytic record. -The available values are: `["host","method", "path", "response_code", "api_key", "time_stamp", "api_version", "api_name", "api_id", "org_id", "oauth_id", "request_time", "ip_address", "alias"]` +The available values are: `["host","method", "path", "response_code", "api_key", "time_stamp", "api_version", "api_name", "api_id", "org_id", "oauth_id", "request_time", "ip_address", "alias"]` ###### JSON / Conf File + ```.json { ... @@ -639,12 +670,13 @@ The available values are: `["host","method", "path", "response_code", "api_key" "listen_address": "localhost:9090", "path": "/metrics" } - } + } } } ``` ###### Env Variables + ``` TYK_PMP_PUMPS_PROMETHEUS_TYPE=prometheus TYK_PMP_PUMPS_PROMETHEUS_META_ADDR=localhost:9090 @@ -665,6 +697,7 @@ TYK_PMP_PUMPS_PROMETHEUS_META_DISABLEDMETRICS=[] - `tags`: List of tags to be added to the metric. The possible options are listed in the below example If no tag is specified the fallback behavior is to use the below tags: + - `path` - `method` - `response_code` @@ -704,6 +737,7 @@ Note that this configuration can generate significant charges due to the unbound ``` On startup, you should see the loaded configs when initializing the dogstatsd pump + ``` [May 10 15:23:44] INFO dogstatsd: initializing pump [May 10 15:23:44] INFO dogstatsd: namespace: pump. @@ -712,8 +746,8 @@ On startup, you should see the loaded configs when initializing the dogstatsd pu [May 10 15:23:44] INFO dogstatsd: async_uds: true, write_timeout: 2s ``` - ###### Env Variables + ``` TYK_PMP_PUMPS_DOGSTATSD_TYPE=dogstatsd TYK_PMP_PUMPS_DOGSTATSD_META_ADDRESS=localhost:8125 @@ -727,10 +761,10 @@ TYK_PMP_PUMPS_DOGSTATSD_META_TAGS=method,response_code,api_version,api_name,api_ ## Splunk -Setting up Splunk with a *HTTP Event Collector* +Setting up Splunk with a _HTTP Event Collector_ - `collector_token`: address of the datadog agent including host & port -- `collector_url`: endpoint the Pump will send analytics too. Should look something like: +- `collector_url`: endpoint the Pump will send analytics too. Should look something like: `https://splunk:8088/services/collector/event` @@ -738,12 +772,10 @@ Setting up Splunk with a *HTTP Event Collector* - `obfuscate_api_keys`: (optional) Controls whether the pump client should hide the API key. In case you still need substring of the value, check the next option. Type: Boolean. Default value is `false`. - `obfuscate_api_keys_length`: (optional) Define the number of the characters from the end of the API key. The `obfuscate_api_keys` should be set to `true`. Type: Integer. Default value is `0`. - `fields`: (optional) Define which Analytics fields should participate in the Splunk event. Check the available fields in the example below. Type: String Array `[] string`. Default value is `["method", "path", "response_code", "api_key", "time_stamp", "api_version", "api_name", "api_id", "org_id", "oauth_id", "raw_request", "request_time", "raw_response", "ip_address"]` -- `ignore_tag_prefix_list`: (optional) Choose which tags to be ignored by the Splunk Pump. Keep in mind that the tag name and value are hyphenated. Type: Type: String Array `[] string`. Default value is `[]` +- `ignore_tag_prefix_list`: (optional) Choose which tags to be ignored by the Splunk Pump. Keep in mind that the tag name and value are hyphenated. Type: Type: String Array `[] string`. Default value is `[]` - `enable_batch`: If this is set to `true`, pump is going to send the analytics records in batch to Splunk. Type: Boolean. Default value is `false`. - `max_content_length`: Max content length in bytes to be sent in batch requests. It should match the `max_content_length` configured in Splunk. If the purged analytics records size don't reach the amount of bytes, they're send anyways in each `purge_loop`. Type: Integer. Default value is 838860800 (~ 800 MB), the same default value as Splunk config. - - ###### JSON / Conf File ```json @@ -828,7 +860,6 @@ Example simplest configuration just needs the token for sending data to your log } ``` - ###### Env Variables ``` @@ -844,22 +875,21 @@ More advanced fields: `meta.disk_threshold` - Set disk queue threshold, once the threshold is crossed the sender will not enqueue the received logs. Default value is `98` (percentage of disk). `meta.check_disk_space` - Set the sender to check if it crosses the maximum allowed disk usage. Default value is `true`. - ## Kafka Config -* `broker`: The list of brokers used to discover the partitions available on the kafka cluster. E.g. "localhost:9092" -* `use_ssl`: Enables SSL connection. -* `ssl_insecure_skip_verify`: Controls whether the pump client verifies the kafka server's certificate chain and host name. -* `client_id`: Unique identifier for client connections established with Kafka. -* `topic`: The topic that the writer will produce messages to. -* `timeout`: Timeout is the maximum amount of time will wait for a connect or write to complete. -* `compressed`: Enable "github.com/golang/snappy" codec to be used to compress Kafka messages. By default is false -* `meta_data`: Can be used to set custom metadata inside the kafka message -* `ssl_cert_file`: Can be used to set custom certificate file for authentication with kafka. -* `ssl_key_file`: Can be used to set custom key file for authentication with kafka. - +- `broker`: The list of brokers used to discover the partitions available on the kafka cluster. E.g. "localhost:9092" +- `use_ssl`: Enables SSL connection. +- `ssl_insecure_skip_verify`: Controls whether the pump client verifies the kafka server's certificate chain and host name. +- `client_id`: Unique identifier for client connections established with Kafka. +- `topic`: The topic that the writer will produce messages to. +- `timeout`: Timeout is the maximum amount of time will wait for a connect or write to complete. +- `compressed`: Enable "github.com/golang/snappy" codec to be used to compress Kafka messages. By default is false +- `meta_data`: Can be used to set custom metadata inside the kafka message +- `ssl_cert_file`: Can be used to set custom certificate file for authentication with kafka. +- `ssl_key_file`: Can be used to set custom key file for authentication with kafka. ###### JSON / Conf File + ```.json { ... @@ -885,6 +915,7 @@ More advanced fields: ``` ###### Env Variables + ``` TYK_PMP_PUMPS_KAFKA_TYPE=kafka TYK_PMP_PUMPS_KAFKA_META_BROKER=localhost:9092 @@ -897,21 +928,20 @@ TYK_PMP_PUMPS_KAFKA_META_COMPRESSED=true TYK_PMP_PUMPS_KAFKA_META_METADATA_KEY=value ``` - ## Influx2 Config + Supported in Tyk Pump v1.5.1+ This pump uses the official Go client library for InfluxDB 2.x. +Configuration options: -Configuration options: - -* `"organization"` - InfluxDB organization name. -* `"bucket"` - InfluxDB bucket where the analytic data is going to be stored. -* `"create_missing_bucket"` - Set this to true if you want to create the bucket if not exists. Defaults to false. -* `"new_bucket_config"` - If `"create_missing_bucket"`is true, you can configure the new bucket configuration under `"new_bucket_config"`: - * `"description"` - Description of the bucket. This is going to be visible in the Influx UI. - * `"retention_rules"`- This is a slice of retention rules for this bucket. An example of this would be: +- `"organization"` - InfluxDB organization name. +- `"bucket"` - InfluxDB bucket where the analytic data is going to be stored. +- `"create_missing_bucket"` - Set this to true if you want to create the bucket if not exists. Defaults to false. +- `"new_bucket_config"` - If `"create_missing_bucket"`is true, you can configure the new bucket configuration under `"new_bucket_config"`: + - `"description"` - Description of the bucket. This is going to be visible in the Influx UI. + - `"retention_rules"`- This is a slice of retention rules for this bucket. An example of this would be: ```.json "retention_rules":[ { @@ -921,10 +951,11 @@ Configuration options: ] ``` which would mean that the data in the bucket expires every 100000 seconds. -* `"token"` - Influx DB Auth token -* `"tags"` - Which elements should work as a tag for the time series. +- `"token"` - Influx DB Auth token +- `"tags"` - Which elements should work as a tag for the time series. ###### JSON / Conf File + ```.json "influx2": { "type": "influx2", @@ -952,6 +983,7 @@ Configuration options: ``` ###### Env Variables + ``` TYK_PMP_PUMPS_INFLUX_TYPE=influx2 TYK_PMP_PUMPS_INFLUX_META_ORGANIZATION=myorg @@ -965,8 +997,8 @@ TYK_PMP_PUMPS_INFLUX_META_FIELDS=request_time TYK_PMP_PUMPS_INFLUX_META_TAGS=path,response_code,api_key,api_version,api_name,api_id,raw_request,ip_address,org_id,oauth_id ``` - ## Syslog + Supported in Tyk Pump v1.0.0+ `"transport"` - Possible values are `udp, tcp, tls` in string form @@ -990,7 +1022,6 @@ When working with FluentD, you should provide a [FluentD Parser](https://docs.fl } ``` - ## Stdout `log_field_name` - Root name of the JSON object the analytics record is nested in @@ -998,6 +1029,7 @@ When working with FluentD, you should provide a [FluentD Parser](https://docs.fl `format` - Format of the analytics logs. Default is `text` if `json` is not explicitly specified. When JSON logging is used all pump logs to stdout will be JSON. ###### JSON / Conf File + ``` "stdout": { "type": "stdout", @@ -1009,6 +1041,7 @@ When working with FluentD, you should provide a [FluentD Parser](https://docs.fl ``` ###### Env Variables + ``` TYK_PMP_PUMPS_STDOUT_TYPE=stdout TYK_PMP_PUMPS_STDOUT_META_LOGFIELDNAME=tyk-analytics-record @@ -1016,9 +1049,10 @@ TYK_PMP_PUMPS_STDOUT_META_FORMAT=json ``` ## SQL Pump -*Supported in Tyk Pump v1.5.0+* -`type` - The supported and tested types are `sqlite` and `postgres`. +_Supported in Tyk Pump v1.5.0+_ + +`type` - The supported and tested types are `sqlite` and `postgres`. `connection_string` - Specifies the connection string to the database. For example, for `sqlite` it would usually work specifying the path/name of the database and for `postgres`, specifying the host, port, user, password and dbname. `log_level` - Specifies the SQL log verbosity. The possible values are: `info`,`error` and `warning`. By default, the value is `silent`, which means that it won't log any SQL query. `table_sharding` - Specifies if all the analytics records are going to be stored in one table or in multiple tables (one per day). By default, `false`. @@ -1026,6 +1060,7 @@ If `table_sharding` is `false`, all the records are going to be stored in `tyk_a `batch_size` - Specifies the amount of records that are going to be written each batch. Type int. By default, it writes 1000 records max per batch. ###### JSON / Conf File + ``` "sql": { "name": "sql", @@ -1038,6 +1073,7 @@ If `table_sharding` is `false`, all the records are going to be stored in `tyk_a ``` ###### Env Variables + ``` TYK_PMP_PUMPS_SQL_NAME=sql TYK_PMP_PUMPS_SQL_META_TYPE=postgres @@ -1045,20 +1081,21 @@ TYK_PMP_PUMPS_SQL_META_CONNECTIONSTRING="host=sql_host port=sql_port user=sql_us TYK_PMP_PUMPS_SQL_META_TABLESHARDING=false ``` - ## SQL Aggregate Pump -*Supported in Tyk Pump v1.5.0+* -`type` - The supported and tested types are `sqlite` and `postgres`. +_Supported in Tyk Pump v1.5.0+_ + +`type` - The supported and tested types are `sqlite` and `postgres`. `connection_string` - Specifies the connection string to the database. For example, for `sqlite` it would usually work specifying the path/name of the database and for `postgres`, specifying the host, port, user, password and dbname. `log_level` - Specifies the SQL log verbosity. The possible values are: `info`,`error` and `warning`. By default, the value is `silent`, which means that it won't log any SQL query. -`track_all_paths` - Specifies if it should store aggregated data for all the endpoints. By default, `false` which means that only store aggregated data for `tracked endpoints`. +`track_all_paths` - Specifies if it should store aggregated data for all the endpoints. By default, `false` which means that only store aggregated data for `tracked endpoints`. `ignore_tag_prefix_list` - Specifies prefixes of tags that should be ignored. `table_sharding` - Specifies if all the analytics records are going to be stored in one table or in multiple tables (one per day). By default, `false`. If `table_sharding` is `false`, all the records are going to be stored in `tyk_aggregated` table. Instead, if it's `true`, all the records of the day are going to be stored in `tyk_aggregated_YYYYMMDD` table, where `YYYYMMDD` is going to change depending on the date. `batch_size` - Specifies the amount of records that are going to be written each batch. Type int. By default, it writes 1000 records max per batch. ###### JSON / Conf File + ``` "sql_aggregate": { "name": "sql_aggregate", @@ -1071,17 +1108,19 @@ If `table_sharding` is `false`, all the records are going to be stored in `tyk_a ``` ###### Env Variables + ``` -TYK_PMP_PUMPS_SQLAGGREGATE_TYPE=sql_aggregate -TYK_PMP_PUMPS_SQLAGGREGATE_META_TYPE=postgres -TYK_PMP_PUMPS_SQLAGGREGATE_META_CONNECTIONSTRING=host=sql_host port=sql_port user=sql_usr dbname=dbname password=sql_pw -TYK_PMP_PUMPS_SQLAGGREGATE_META_TABLESHARDING=true +TYK_PMP_PUMPS_SQLAGGREGATE_TYPE=sql_aggregate +TYK_PMP_PUMPS_SQLAGGREGATE_META_TYPE=postgres +TYK_PMP_PUMPS_SQLAGGREGATE_META_CONNECTIONSTRING=host=sql_host port=sql_port user=sql_usr dbname=dbname password=sql_pw +TYK_PMP_PUMPS_SQLAGGREGATE_META_TABLESHARDING=true ``` ## Timestream Config #### Authentication & Prerequisite -We must authenticate ourselves by providing credentials to AWS. This pump uses the official AWS GO SDK, so instructions on how to authenticate can be found on [their documentation here](https://aws.github.io/aws-sdk-go-v2/docs/configuring-sdk/#specifying-credentials). + +We must authenticate ourselves by providing credentials to AWS. This pump uses the official AWS GO SDK, so instructions on how to authenticate can be found on [their documentation here](https://aws.github.io/aws-sdk-go-v2/docs/configuring-sdk/#specifying-credentials). #### Config Fields @@ -1093,17 +1132,18 @@ We must authenticate ourselves by providing credentials to AWS. This pump uses When you initialize a Timestream Pump, the SDK uses its default credential chain to find AWS credentials. This default credential chain looks for credentials in the following order: -- Environment variables. - - Static Credentials (`AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, `AWS_SESSION_TOKEN`) - - Web Identity Token (`AWS_WEB_IDENTITY_TOKEN_FILE`) +- Environment variables. + - Static Credentials (`AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, `AWS_SESSION_TOKEN`) + - Web Identity Token (`AWS_WEB_IDENTITY_TOKEN_FILE`) - Shared configuration files. - - SDK defaults to credentials file under `.aws` folder that is placed in the home folder on your computer. + - SDK defaults to credentials file under `.aws` folder that is placed in the home folder on your computer. - If your application uses an ECS task definition or RunTask API operation, IAM role for tasks. - If your application is running on an Amazon EC2 instance, IAM role for Amazon EC2. If no credentials are provided, Timestream Pump won't be able to connect. ###### JSON / Conf File + ```json "timestream": { "type": "timestream", @@ -1177,6 +1217,7 @@ If no credentials are provided, Timestream Pump won't be able to connect. ``` ###### Env Variables + ``` TYK_PMP_PUMPS_TIMESTREAM_TYPE=timestream TYK_PMP_PUMPS_TIMESTREAM_META_AWSREGION=us-east-1 @@ -1208,6 +1249,7 @@ TYK_PMP_PUMPS_TIMESTREAM_META_FIELDNAMEMAPPINGS_RATELIMIT_RESET=quota_renewal_ra Enable this Pump to have Tyk Pump create or modify a CSV file to track API Analytics. ###### JSON / Conf File + ``` "csv": { "type": "csv", @@ -1218,6 +1260,7 @@ Enable this Pump to have Tyk Pump create or modify a CSV file to track API Analy ``` ###### Env Variables + ``` TYK_PMP_PUMPS_CSV_TYPE=csv TYK_PMP_PUMPS_CSV_META_CSVDIR=./ @@ -1225,11 +1268,12 @@ TYK_PMP_PUMPS_CSV_META_CSVDIR=./ # Base Pump Configurations -The following configurations can be added to any Pump. Keep reading for an example. +The following configurations can be added to any Pump. Keep reading for an example. ### Filter Records You made add the following config field to each pump called `filters` and its structure is the following: + ```json "filters":{ "api_ids":[], @@ -1245,9 +1289,10 @@ The fields api_ids, org_ids and response_codes works as allow list (APIs and org The priority is always block list configurations over allow list. - Here we see how we can take a CSV Pump, and add a filters section to it: + ###### JSON / Conf file Example + ```json "csv": { "type": "csv", @@ -1261,15 +1306,16 @@ Here we see how we can take a CSV Pump, and add a filters section to it: ``` ###### Env variables + ```yaml -TYK_PMP_PUMPS_CSV_TYPE=csv +TYK_PMP_PUMPS_CSV_TYPE=csv TYK_PMP_PUMPS_CSV_META_CSVDIR=./bar TYK_PMP_PUMPS_CSV_FILTERS_APIIDS=123,789 ``` ### Timeouts -You can configure a different timeout for each pump with the configuration option `timeout`. Its default value is 0 seconds, which means that the pump will wait for the writing operation forever. +You can configure a different timeout for each pump with the configuration option `timeout`. Its default value is 0 seconds, which means that the pump will wait for the writing operation forever. In Mongo pumps, the default value is 10 seconds. If you want to disable the timeout, you can set the value to 0. Take into account that if you disable the timeout, the pump will wait for the writing operation forever, and it could block the pump execution. ```json @@ -1284,15 +1330,15 @@ In Mongo pumps, the default value is 10 seconds. If you want to disable the time ``` ###### Env variables + ```yaml TYK_PMP_PUMPS_MONGO_TYPE=mongo TYK_PMP_PUMPS_MONGO_TIMEOUT=5 -... ``` -In case that any pump doesn't have a configured timeout, and it takes more seconds to write than the value configured for the purge loop in the `purge_delay` config option, you will see the following warning message: `Pump PMP_NAME is taking more time than the value configured of purge_delay. You should try to set a timeout for this pump.`. +In case that any pump doesn't have a configured timeout, and it takes more seconds to write than the value configured for the purge loop in the `purge_delay` config option, you will see the following warning message: `Pump PMP_NAME is taking more time than the value configured of purge_delay. You should try to set a timeout for this pump.`. -In case that you have a configured timeout, but it still takes more seconds to write than the value configured for the purge loop in the `purge_delay` config option, you will see the following warning message: `Pump PMP_NAME is taking more time than the value configured of purge_delay. You should try lowering the timeout configured for this pump.`. +In case that you have a configured timeout, but it still takes more seconds to write than the value configured for the purge loop in the `purge_delay` config option, you will see the following warning message: `Pump PMP_NAME is taking more time than the value configured of purge_delay. You should try lowering the timeout configured for this pump.`. ### Omit Detailed Recording @@ -1302,6 +1348,7 @@ In case that you have a configured timeout, but it still takes more seconds to w `max_record_size` defines maximum size (in bytes) for Raw Request and Raw Response logs, this value defaults to 0. Is not set then tyk-pump will not trim any data and will store the full information. This can also be set at a pump level. For example: + ```json "csv": { "type": "csv", @@ -1328,7 +1375,10 @@ Fields must be written using JSON tags. For example: ``` ### Decode Raw Request & Raw Response + `raw_request_decoded` and `raw_response_decoded` decode from base64 the raw request and raw response fields before writing to Pump. This is useful if you want to search for specific values in the raw request/response. Both are disabled by default. +This setting is not available for Mongo and SQL pumps, since dashboard will decode the raw request/response. + ```json "csv": { "type": "csv", @@ -1361,6 +1411,7 @@ go test -v ./... ``` ## Demo Mode + You can run Tyk Pump in demo mode, which will generate fake analytics data and send it to the configured pumps. This is useful for testing and development. To enable demo mode, use the following flags: - `--demo=` - Enables demo mode and sets the organization ID to use for the demo data. **This is required to enable Demo Mode**. diff --git a/pumps/graph_mongo.go b/pumps/graph_mongo.go index fd805c711..7b1ab42df 100644 --- a/pumps/graph_mongo.go +++ b/pumps/graph_mongo.go @@ -30,6 +30,18 @@ func (g *GraphMongoPump) GetName() string { return "MongoDB Graph Pump" } +func (g *GraphMongoPump) SetDecodingRequest(decoding bool) { + if decoding { + log.WithField("pump", g.GetName()).Warn("Decoding request is not supported for Graph Mongo pump") + } +} + +func (g *GraphMongoPump) SetDecodingResponse(decoding bool) { + if decoding { + log.WithField("pump", g.GetName()).Warn("Decoding response is not supported for Graph Mongo pump") + } +} + func (g *GraphMongoPump) Init(config interface{}) error { g.dbConf = &MongoConf{} g.log = log.WithField("prefix", mongoGraphPrefix) diff --git a/pumps/graph_mongo_test.go b/pumps/graph_mongo_test.go index 2b2ebe62c..30105ad54 100644 --- a/pumps/graph_mongo_test.go +++ b/pumps/graph_mongo_test.go @@ -365,3 +365,22 @@ func TestGraphMongoPump_Init(t *testing.T) { assert.Equal(t, 10*MiB, pump.dbConf.MaxInsertBatchSizeBytes) }) } + +func TestDecodeRequestAndDecodeResponseGraphMongo(t *testing.T) { + newPump := &GraphMongoPump{} + conf := defaultConf() + err := newPump.Init(conf) + assert.Nil(t, err) + + // checking if the default values are false + assert.False(t, newPump.GetDecodedRequest()) + assert.False(t, newPump.GetDecodedResponse()) + + // trying to set the values to true + newPump.SetDecodingRequest(true) + newPump.SetDecodingResponse(true) + + // checking if the values are still false as expected because this pump doesn't support decoding requests/responses + assert.False(t, newPump.GetDecodedRequest()) + assert.False(t, newPump.GetDecodedResponse()) +} diff --git a/pumps/mongo.go b/pumps/mongo.go index deb8105c4..af7738185 100644 --- a/pumps/mongo.go +++ b/pumps/mongo.go @@ -165,6 +165,18 @@ func (m *MongoPump) GetEnvPrefix() string { return m.dbConf.EnvPrefix } +func (m *MongoPump) SetDecodingRequest(decoding bool) { + if decoding { + log.WithField("pump", m.GetName()).Warn("Decoding request is not supported for Mongo pump") + } +} + +func (m *MongoPump) SetDecodingResponse(decoding bool) { + if decoding { + log.WithField("pump", m.GetName()).Warn("Decoding response is not supported for Mongo pump") + } +} + func (m *MongoPump) Init(config interface{}) error { m.dbConf = &MongoConf{} m.log = log.WithField("prefix", mongoPrefix) diff --git a/pumps/mongo_aggregate.go b/pumps/mongo_aggregate.go index 8d413cba8..b6bb89f2c 100644 --- a/pumps/mongo_aggregate.go +++ b/pumps/mongo_aggregate.go @@ -156,6 +156,18 @@ func (m *MongoAggregatePump) GetCollectionName(orgid string) (string, error) { return "z_tyk_analyticz_aggregate_" + orgid, nil } +func (m *MongoAggregatePump) SetDecodingRequest(decoding bool) { + if decoding { + log.WithField("pump", m.GetName()).Warn("Decoding request is not supported for Mongo Aggregate pump") + } +} + +func (m *MongoAggregatePump) SetDecodingResponse(decoding bool) { + if decoding { + log.WithField("pump", m.GetName()).Warn("Decoding response is not supported for Mongo Aggregate pump") + } +} + func (m *MongoAggregatePump) Init(config interface{}) error { m.dbConf = &MongoAggregateConf{} m.log = log.WithField("prefix", analytics.MongoAggregatePrefix) diff --git a/pumps/mongo_aggregate_test.go b/pumps/mongo_aggregate_test.go index a2571c229..1e50b74b6 100644 --- a/pumps/mongo_aggregate_test.go +++ b/pumps/mongo_aggregate_test.go @@ -483,3 +483,22 @@ func TestMongoAggregatePump_StoreAnalyticsPerMinute(t *testing.T) { // Checking if the aggregation time is set to 1. Doesn't matter if aggregation_time is equal to 45 or 1, the result should be always 1. assert.True(t, pmp1.dbConf.AggregationTime == 1) } + +func TestDecodeRequestAndDecodeResponseMongoAggregate(t *testing.T) { + newPump := &MongoAggregatePump{} + conf := defaultConf() + err := newPump.Init(conf) + assert.Nil(t, err) + + // checking if the default values are false + assert.False(t, newPump.GetDecodedRequest()) + assert.False(t, newPump.GetDecodedResponse()) + + // trying to set the values to true + newPump.SetDecodingRequest(true) + newPump.SetDecodingResponse(true) + + // checking if the values are still false as expected because this pump doesn't support decoding requests/responses + assert.False(t, newPump.GetDecodedRequest()) + assert.False(t, newPump.GetDecodedResponse()) +} diff --git a/pumps/mongo_selective.go b/pumps/mongo_selective.go index 3ef9f73be..aff49e246 100644 --- a/pumps/mongo_selective.go +++ b/pumps/mongo_selective.go @@ -59,6 +59,18 @@ func (m *MongoSelectivePump) GetCollectionName(orgid string) (string, error) { return "z_tyk_analyticz_" + orgid, nil } +func (m *MongoSelectivePump) SetDecodingRequest(decoding bool) { + if decoding { + log.WithField("pump", m.GetName()).Warn("Decoding request is not supported for Mongo Selective pump") + } +} + +func (m *MongoSelectivePump) SetDecodingResponse(decoding bool) { + if decoding { + log.WithField("pump", m.GetName()).Warn("Decoding response is not supported for Mongo Selective pump") + } +} + func (m *MongoSelectivePump) Init(config interface{}) error { m.dbConf = &MongoSelectiveConf{} m.log = log.WithField("prefix", mongoSelectivePrefix) diff --git a/pumps/mongo_selective_test.go b/pumps/mongo_selective_test.go index f5daecd0f..eee8ee41d 100644 --- a/pumps/mongo_selective_test.go +++ b/pumps/mongo_selective_test.go @@ -357,3 +357,22 @@ func TestWriteUptimeDataMongoSelective(t *testing.T) { }) } } + +func TestDecodeRequestAndDecodeResponseMongoSelective(t *testing.T) { + newPump := &MongoSelectivePump{} + conf := defaultConf() + err := newPump.Init(conf) + assert.Nil(t, err) + + // checking if the default values are false + assert.False(t, newPump.GetDecodedRequest()) + assert.False(t, newPump.GetDecodedResponse()) + + // trying to set the values to true + newPump.SetDecodingRequest(true) + newPump.SetDecodingResponse(true) + + // checking if the values are still false as expected because this pump doesn't support decoding requests/responses + assert.False(t, newPump.GetDecodedRequest()) + assert.False(t, newPump.GetDecodedResponse()) +} diff --git a/pumps/mongo_test.go b/pumps/mongo_test.go index 25ed6cecb..186f6a208 100644 --- a/pumps/mongo_test.go +++ b/pumps/mongo_test.go @@ -536,3 +536,22 @@ func TestWriteUptimeData(t *testing.T) { }) } } + +func TestDecodeRequestAndDecodeResponseMongo(t *testing.T) { + newPump := &MongoPump{} + conf := defaultConf() + err := newPump.Init(conf) + assert.Nil(t, err) + + // checking if the default values are false + assert.False(t, newPump.GetDecodedRequest()) + assert.False(t, newPump.GetDecodedResponse()) + + // trying to set the values to true + newPump.SetDecodingRequest(true) + newPump.SetDecodingResponse(true) + + // checking if the values are still false as expected because this pump doesn't support decoding requests/responses + assert.False(t, newPump.GetDecodedRequest()) + assert.False(t, newPump.GetDecodedResponse()) +} diff --git a/pumps/sql.go b/pumps/sql.go index 164eb0d71..8e2782ec9 100644 --- a/pumps/sql.go +++ b/pumps/sql.go @@ -101,9 +101,11 @@ func Dialect(cfg *SQLConf) (gorm.Dialector, error) { } } -var SQLPrefix = "SQL-pump" -var SQLDefaultENV = PUMPS_ENV_PREFIX + "_SQL" + PUMPS_ENV_META_PREFIX -var SQLDefaultQueryBatchSize = 1000 +var ( + SQLPrefix = "SQL-pump" + SQLDefaultENV = PUMPS_ENV_PREFIX + "_SQL" + PUMPS_ENV_META_PREFIX + SQLDefaultQueryBatchSize = 1000 +) func (c *SQLPump) New() Pump { newPump := SQLPump{} @@ -118,6 +120,18 @@ func (c *SQLPump) GetEnvPrefix() string { return c.SQLConf.EnvPrefix } +func (c *SQLPump) SetDecodingRequest(decoding bool) { + if decoding { + log.WithField("pump", c.GetName()).Warn("Decoding request is not supported for SQL pump") + } +} + +func (c *SQLPump) SetDecodingResponse(decoding bool) { + if decoding { + log.WithField("pump", c.GetName()).Warn("Decoding response is not supported for SQL pump") + } +} + func (c *SQLPump) Init(conf interface{}) error { c.SQLConf = &SQLConf{} if c.IsUptime { @@ -158,7 +172,6 @@ func (c *SQLPump) Init(conf interface{}) error { UseJSONTags: true, Logger: gorm_logger.Default.LogMode(logLevel), }) - if err != nil { c.log.Error(err) return err @@ -195,18 +208,18 @@ func (c *SQLPump) WriteData(ctx context.Context, data []interface{}) error { startIndex := 0 endIndex := dataLen - //We iterate dataLen +1 times since we're writing the data after the date change on sharding_table:true + // We iterate dataLen +1 times since we're writing the data after the date change on sharding_table:true for i := 0; i <= dataLen; i++ { if c.SQLConf.TableSharding { recDate := typedData[startIndex].TimeStamp.Format("20060102") var nextRecDate string - //if we're on i == dataLen iteration, it means that we're out of index range. We're going to use the last record date. + // if we're on i == dataLen iteration, it means that we're out of index range. We're going to use the last record date. if i == dataLen { nextRecDate = typedData[dataLen-1].TimeStamp.Format("20060102") } else { nextRecDate = typedData[i].TimeStamp.Format("20060102") - //if both dates are equal, we shouldn't write in the table yet. + // if both dates are equal, we shouldn't write in the table yet. if recDate == nextRecDate { continue } @@ -273,13 +286,13 @@ func (c *SQLPump) WriteUptimeData(data []interface{}) { if c.SQLConf.TableSharding { recDate := typedData[startIndex].TimeStamp.Format("20060102") var nextRecDate string - //if we're on i == dataLen iteration, it means that we're out of index range. We're going to use the last record date. + // if we're on i == dataLen iteration, it means that we're out of index range. We're going to use the last record date. if i == dataLen { nextRecDate = typedData[dataLen-1].TimeStamp.Format("20060102") } else { nextRecDate = typedData[i].TimeStamp.Format("20060102") - //if both dates are equal, we shouldn't write in the table yet. + // if both dates are equal, we shouldn't write in the table yet. if recDate == nextRecDate { continue } @@ -338,5 +351,4 @@ func (c *SQLPump) WriteUptimeData(data []interface{}) { } c.log.Debug("Purged ", len(data), " records...") - } diff --git a/pumps/sql_aggregate.go b/pumps/sql_aggregate.go index 56001a76d..5ed7797b9 100644 --- a/pumps/sql_aggregate.go +++ b/pumps/sql_aggregate.go @@ -39,8 +39,10 @@ type SQLAggregatePump struct { dialect gorm.Dialector } -var SQLAggregatePumpPrefix = "SQL-aggregate-pump" -var SQLAggregateDefaultENV = PUMPS_ENV_PREFIX + "_SQLAGGREGATE" + PUMPS_ENV_META_PREFIX +var ( + SQLAggregatePumpPrefix = "SQL-aggregate-pump" + SQLAggregateDefaultENV = PUMPS_ENV_PREFIX + "_SQLAGGREGATE" + PUMPS_ENV_META_PREFIX +) func (c *SQLAggregatePump) New() Pump { newPump := SQLAggregatePump{} @@ -55,6 +57,18 @@ func (c *SQLAggregatePump) GetEnvPrefix() string { return c.SQLConf.EnvPrefix } +func (c *SQLAggregatePump) SetDecodingRequest(decoding bool) { + if decoding { + log.WithField("pump", c.GetName()).Warn("Decoding request is not supported for SQL Aggregate pump") + } +} + +func (c *SQLAggregatePump) SetDecodingResponse(decoding bool) { + if decoding { + log.WithField("pump", c.GetName()).Warn("Decoding response is not supported for SQL Aggregate pump") + } +} + func (c *SQLAggregatePump) Init(conf interface{}) error { c.SQLConf = &SQLAggregatePumpConf{} c.log = log.WithField("prefix", SQLAggregatePumpPrefix) @@ -88,7 +102,6 @@ func (c *SQLAggregatePump) Init(conf interface{}) error { UseJSONTags: true, Logger: gorm_logger.Default.LogMode(logLevel), }) - if err != nil { c.log.Error(err) return err @@ -126,13 +139,13 @@ func (c *SQLAggregatePump) WriteData(ctx context.Context, data []interface{}) er if c.SQLConf.TableSharding { recDate := data[startIndex].(analytics.AnalyticsRecord).TimeStamp.Format("20060102") var nextRecDate string - //if we're on i == dataLen iteration, it means that we're out of index range. We're going to use the last record date. + // if we're on i == dataLen iteration, it means that we're out of index range. We're going to use the last record date. if i == dataLen { nextRecDate = data[dataLen-1].(analytics.AnalyticsRecord).TimeStamp.Format("20060102") } else { nextRecDate = data[i].(analytics.AnalyticsRecord).TimeStamp.Format("20060102") - //if both dates are equal, we shouldn't write in the table yet. + // if both dates are equal, we shouldn't write in the table yet. if recDate == nextRecDate { continue } @@ -202,7 +215,7 @@ func (c *SQLAggregatePump) DoAggregatedWriting(ctx context.Context, table, orgID ends = len(recs) } - //we use excluded as temp table since it's supported by our SQL storages https://www.postgresql.org/docs/9.5/sql-insert.html#SQL-ON-CONFLICT https://www.sqlite.org/lang_UPSERT.html + // we use excluded as temp table since it's supported by our SQL storages https://www.postgresql.org/docs/9.5/sql-insert.html#SQL-ON-CONFLICT https://www.sqlite.org/lang_UPSERT.html tx := c.db.WithContext(ctx).Clauses(clause.OnConflict{ Columns: []clause.Column{{Name: "id"}}, DoUpdates: clause.Assignments(analytics.OnConflictAssignments(table, "excluded")), @@ -214,5 +227,4 @@ func (c *SQLAggregatePump) DoAggregatedWriting(ctx context.Context, table, orgID } return nil - } diff --git a/pumps/sql_aggregate_test.go b/pumps/sql_aggregate_test.go index 8eef12a1f..a3da85333 100644 --- a/pumps/sql_aggregate_test.go +++ b/pumps/sql_aggregate_test.go @@ -27,12 +27,12 @@ func TestSQLAggregateInit(t *testing.T) { assert.NotNil(t, pmp.db) assert.Equal(t, "sqlite", pmp.db.Dialector.Name()) - //Checking with invalid type + // Checking with invalid type cfg["type"] = "invalid" pmp2 := SQLAggregatePump{} invalidDialectErr := pmp2.Init(cfg) assert.NotNil(t, invalidDialectErr) - //TODO check how to test postgres connection - it's going to requiere to have some postgres up + // TODO check how to test postgres connection - it's going to requiere to have some postgres up } @@ -144,7 +144,7 @@ func TestSQLAggregateWriteData(t *testing.T) { Record: analytics.AnalyticsRecord{OrgID: "1", APIID: "api1", TimeStamp: nowPlus1}, RecordsAmountToWrite: 3, RowsLen: 4, - HitsPerHour: 3, //since we're going to write in a new hour, it should mean a different aggregation. + HitsPerHour: 3, // since we're going to write in a new hour, it should mean a different aggregation. }, } @@ -161,7 +161,7 @@ func TestSQLAggregateWriteData(t *testing.T) { pmp.WriteData(context.TODO(), keys) table := analytics.AggregateSQLTable - //check if the table exists + // check if the table exists assert.Equal(t, true, pmp.db.Migrator().HasTable(table)) dbRecords := []analytics.SQLAnalyticsRecordAggregate{} @@ -169,17 +169,16 @@ func TestSQLAggregateWriteData(t *testing.T) { t.Fatal("Error getting analytics records from SQL") } - //check amount of rows in the table + // check amount of rows in the table assert.Equal(t, tests[testName].RowsLen, len(dbRecords)) - //iterate over the records and check total of hits + // iterate over the records and check total of hits for _, dbRecord := range dbRecords { if dbRecord.TimeStamp == tests[testName].Record.TimeStamp.Unix() && dbRecord.DimensionValue == "total" { assert.Equal(t, tests[testName].HitsPerHour, dbRecord.Hits) break } } - }) } } @@ -305,3 +304,25 @@ func TestSQLAggregateWriteDataValues(t *testing.T) { }) } } + +func TestDecodeRequestAndDecodeResponseSQLAggregate(t *testing.T) { + newPump := &SQLAggregatePump{} + cfg := make(map[string]interface{}) + cfg["type"] = "sqlite" + cfg["connection_string"] = "" + cfg["table_sharding"] = true + err := newPump.Init(cfg) + assert.Nil(t, err) + + // checking if the default values are false + assert.False(t, newPump.GetDecodedRequest()) + assert.False(t, newPump.GetDecodedResponse()) + + // trying to set the values to true + newPump.SetDecodingRequest(true) + newPump.SetDecodingResponse(true) + + // checking if the values are still false as expected because this pump doesn't support decoding requests/responses + assert.False(t, newPump.GetDecodedRequest()) + assert.False(t, newPump.GetDecodedResponse()) +} diff --git a/pumps/sql_test.go b/pumps/sql_test.go index cd29b6c5f..2f31764f5 100644 --- a/pumps/sql_test.go +++ b/pumps/sql_test.go @@ -27,12 +27,11 @@ func TestSQLInit(t *testing.T) { assert.NotNil(t, pmp.db) assert.Equal(t, "sqlite", pmp.db.Dialector.Name()) - //Checking with invalid type + // Checking with invalid type cfg["type"] = "invalid" pmp2 := SQLPump{} invalidDialectErr := pmp2.Init(cfg) assert.NotNil(t, invalidDialectErr) - } func TestSQLWriteData(t *testing.T) { @@ -69,7 +68,6 @@ func TestSQLWriteData(t *testing.T) { err := pmp.db.Table(table).Find(&dbRecords).Error assert.Nil(t, err) assert.Equal(t, 3, len(dbRecords)) - }) t.Run("table_content", func(t *testing.T) { @@ -85,7 +83,6 @@ func TestSQLWriteData(t *testing.T) { assert.Equal(t, keys[i].(analytics.AnalyticsRecord).OrgID, dbRecords[i].OrgID) } }) - } func TestSQLWriteDataSharded(t *testing.T) { @@ -149,7 +146,6 @@ func TestSQLWriteDataSharded(t *testing.T) { assert.Equal(t, data.RowsLen, len(dbRecords)) }) } - } func TestSQLWriteUptimeData(t *testing.T) { @@ -197,7 +193,7 @@ func TestSQLWriteUptimeData(t *testing.T) { Record: analytics.UptimeReportData{OrgID: "1", URL: "url1", TimeStamp: nowPlus1}, RecordsAmountToWrite: 3, RowsLen: 4, - HitsPerHour: 3, //since we're going to write in a new hour, it should mean a different aggregation. + HitsPerHour: 3, // since we're going to write in a new hour, it should mean a different aggregation. }, } @@ -207,7 +203,7 @@ func TestSQLWriteUptimeData(t *testing.T) { t.Run(testName, func(t *testing.T) { pmp := pmp keys := []interface{}{} - //encode the records in the way uptime pump consume them + // encode the records in the way uptime pump consume them for i := 0; i < tests[testName].RecordsAmountToWrite; i++ { encoded, _ := msgpack.Marshal(tests[testName].Record) keys = append(keys, string(encoded)) @@ -215,7 +211,7 @@ func TestSQLWriteUptimeData(t *testing.T) { pmp.WriteUptimeData(keys) table := analytics.UptimeSQLTable - //check if the table exists + // check if the table exists assert.Equal(t, true, pmp.db.Migrator().HasTable(table)) dbRecords := []analytics.UptimeReportAggregateSQL{} @@ -223,17 +219,16 @@ func TestSQLWriteUptimeData(t *testing.T) { t.Fatal("Error getting analytics records from SQL") } - //check amount of rows in the table + // check amount of rows in the table assert.Equal(t, tests[testName].RowsLen, len(dbRecords)) - //iterate over the records and check total of hits + // iterate over the records and check total of hits for _, dbRecord := range dbRecords { if dbRecord.TimeStamp == tests[testName].Record.TimeStamp.Unix() && dbRecord.DimensionValue == "total" { assert.Equal(t, tests[testName].HitsPerHour, dbRecord.Hits) break } } - }) } } @@ -298,7 +293,6 @@ func TestSQLWriteUptimeDataSharded(t *testing.T) { assert.Equal(t, data.RowsLen, len(dbRecords)) }) } - } func TestSQLWriteUptimeDataAggregations(t *testing.T) { @@ -346,5 +340,26 @@ func TestSQLWriteUptimeDataAggregations(t *testing.T) { assert.Equal(t, 2, dbRecords[0].ErrorTotal) assert.Equal(t, 14.0, dbRecords[0].RequestTime) assert.Equal(t, 70.0, dbRecords[0].TotalRequestTime) +} + +func TestDecodeRequestAndDecodeResponseSQL(t *testing.T) { + newPump := &SQLPump{} + cfg := make(map[string]interface{}) + cfg["type"] = "sqlite" + cfg["connection_string"] = "" + cfg["table_sharding"] = true + err := newPump.Init(cfg) + assert.Nil(t, err) + + // checking if the default values are false + assert.False(t, newPump.GetDecodedRequest()) + assert.False(t, newPump.GetDecodedResponse()) + + // trying to set the values to true + newPump.SetDecodingRequest(true) + newPump.SetDecodingResponse(true) + // checking if the values are still false as expected because this pump doesn't support decoding requests/responses + assert.False(t, newPump.GetDecodedRequest()) + assert.False(t, newPump.GetDecodedResponse()) } From 1fd5ba92499b53377930ed2eece518f0c430bb6d Mon Sep 17 00:00:00 2001 From: Matias <83959431+mativm02@users.noreply.github.com> Date: Thu, 27 Apr 2023 12:45:53 -0300 Subject: [PATCH 063/102] [TT-8793] Fixing Pump 1.8 bugs (#616) * writing records to the right collection * writing to the right collection when pump is mixed * updating to storage 1.0.2 --- go.mod | 2 +- go.sum | 2 ++ pumps/mongo_aggregate.go | 1 + pumps/mongo_selective.go | 7 ++++--- pumps/mongo_selective_test.go | 9 +++++++-- 5 files changed, 15 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 9ea852f5b..2bd4a64a8 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( github.com/TykTechnologies/gorpc v0.0.0-20210624160652-fe65bda0ccb9 github.com/TykTechnologies/graphql-go-tools v1.6.2-0.20230320143102-7a16078ce517 github.com/TykTechnologies/murmur3 v0.0.0-20230310161213-aad17efd5632 - github.com/TykTechnologies/storage v1.0.1 + github.com/TykTechnologies/storage v1.0.2 github.com/aws/aws-sdk-go-v2 v1.16.14 github.com/aws/aws-sdk-go-v2/config v1.9.0 github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.9.0 diff --git a/go.sum b/go.sum index 76afc6b58..dece24842 100644 --- a/go.sum +++ b/go.sum @@ -60,6 +60,8 @@ github.com/TykTechnologies/storage v0.0.0-20230410152719-1e659ae95643 h1:vFml52J github.com/TykTechnologies/storage v0.0.0-20230410152719-1e659ae95643/go.mod h1:+0S3KuNlLGBTMTSFREuZFm315zzXjuuCO4QSAPy+d3M= github.com/TykTechnologies/storage v1.0.1 h1:YI85mHMofwIrF0QgrRYqKKd2xuPO/lxGe+SR4w2kKkg= github.com/TykTechnologies/storage v1.0.1/go.mod h1:+0S3KuNlLGBTMTSFREuZFm315zzXjuuCO4QSAPy+d3M= +github.com/TykTechnologies/storage v1.0.2 h1:bWaLbpDmsjxT/8QVl9Fpuz1w1orqa/COvs1Gih+fvYE= +github.com/TykTechnologies/storage v1.0.2/go.mod h1:+0S3KuNlLGBTMTSFREuZFm315zzXjuuCO4QSAPy+d3M= github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/agnivade/levenshtein v1.1.1 h1:QY8M92nrzkmr798gCo3kmMyqXFzdQVpxLlGPRBij0P8= github.com/agnivade/levenshtein v1.1.1/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo= diff --git a/pumps/mongo_aggregate.go b/pumps/mongo_aggregate.go index b6bb89f2c..5afe48de3 100644 --- a/pumps/mongo_aggregate.go +++ b/pumps/mongo_aggregate.go @@ -356,6 +356,7 @@ func (m *MongoAggregatePump) DoAggregatedWriting(ctx context.Context, filteredDa withTimeUpdate := analytics.AnalyticsRecordAggregate{ OrgID: filteredData.OrgID, + Mixed: mixed, } err = m.store.Upsert(ctx, &withTimeUpdate, query, avgUpdateDoc) diff --git a/pumps/mongo_selective.go b/pumps/mongo_selective.go index aff49e246..d34c1bbd8 100644 --- a/pumps/mongo_selective.go +++ b/pumps/mongo_selective.go @@ -217,12 +217,11 @@ func (m *MongoSelectivePump) WriteData(ctx context.Context, data []interface{}) } for colName, filteredData := range analyticsPerOrg { - for _, dataSet := range m.AccumulateSet(filteredData) { + for _, dataSet := range m.AccumulateSet(filteredData, colName) { indexCreateErr := m.ensureIndexes(colName) if indexCreateErr != nil { m.log.WithField("collection", colName).Error(indexCreateErr) } - err := m.store.Insert(context.Background(), dataSet...) if err != nil { m.log.WithField("collection", colName).Error("Problem inserting to mongo collection: ", err) @@ -236,7 +235,7 @@ func (m *MongoSelectivePump) WriteData(ctx context.Context, data []interface{}) } // AccumulateSet organizes analytics data into a set of chunks based on their size. -func (m *MongoSelectivePump) AccumulateSet(data []interface{}) [][]model.DBObject { +func (m *MongoSelectivePump) AccumulateSet(data []interface{}, collectionName string) [][]model.DBObject { accumulatorTotal := 0 returnArray := make([][]model.DBObject, 0) thisResultSet := make([]model.DBObject, 0) @@ -248,6 +247,8 @@ func (m *MongoSelectivePump) AccumulateSet(data []interface{}) [][]model.DBObjec continue } + thisItem.CollectionName = collectionName + sizeBytes := m.getItemSizeBytes(thisItem) accumulatorTotal, thisResultSet, returnArray = m.accumulate(thisResultSet, returnArray, thisItem, sizeBytes, accumulatorTotal, i == (len(data)-1)) } diff --git a/pumps/mongo_selective_test.go b/pumps/mongo_selective_test.go index eee8ee41d..a37cc747f 100644 --- a/pumps/mongo_selective_test.go +++ b/pumps/mongo_selective_test.go @@ -35,7 +35,7 @@ func TestMongoSelectivePump_AccumulateSet(t *testing.T) { expectedGraphRecordSkips++ } } - set := mPump.AccumulateSet(data) + set := mPump.AccumulateSet(data, analytics.SQLTable) // SQLTable = "tyk_analytics" recordsCount := 0 for _, setEntry := range set { @@ -271,7 +271,12 @@ func TestWriteData(t *testing.T) { assert.NoError(t, err) var results []analytics.AnalyticsRecord - err = mPump.store.Query(context.Background(), &analytics.AnalyticsRecord{}, &results, nil) + colName, colErr := mPump.GetCollectionName("abc") + assert.NoError(t, colErr) + d := dummyObject{ + tableName: colName, + } + err = mPump.store.Query(context.Background(), &d, &results, nil) assert.NoError(t, err) assert.Len(t, results, 3) assert.Equal(t, "123", results[0].APIID) From 2622025facdf0f6c77b4e4534335e4389360751d Mon Sep 17 00:00:00 2001 From: Matias <83959431+mativm02@users.noreply.github.com> Date: Fri, 28 Apr 2023 11:25:52 -0300 Subject: [PATCH 064/102] Changing conf 'driver_type' to 'driver' (#618) * changing conf 'driver_type' to 'driver' * updating readme --- README.md | 20 +++++++++++++++++++- pumps/influx2.go | 1 - pumps/mongo.go | 5 +++-- pumps/mongo_aggregate.go | 3 ++- pumps/mongo_aggregate_test.go | 10 ++++++++++ pumps/mongo_selective.go | 4 ++-- pumps/mongo_selective_test.go | 10 ++++++++++ pumps/mongo_test.go | 10 ++++++++++ 8 files changed, 56 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index c32316330..b9f5e2f69 100644 --- a/README.md +++ b/README.md @@ -136,7 +136,6 @@ TYK_PMP_DONTPURGEUPTIMEDATA=true - ## Base Configuration Fields Explained ### analytics_storage_config @@ -470,6 +469,7 @@ A sample config looks like this: "mongo_url": "mongodb://localhost:27017/tyk_analytics" } } + } } ``` @@ -1359,6 +1359,24 @@ This can also be set at a pump level. For example: } ``` +### Driver Type + +The `driver` setting defines the driver type to use for Mongo Pumps. It can be one of the following values: + +- `mongo-go` (default): Uses the official MongoDB driver. This driver supports Mongo versions greater or equal to v4. You can get more information about this driver [here](https://github.com/mongodb/mongo-go-driver). +- `mgo`: Uses the mgo driver. This driver is deprecated. This driver supports Mongo versions lower or equal to v4. You can get more information about this driver [here](https://github.com/go-mgo/mgo) + +```json +"mongo": { + "type": "mongo", + "meta": { + "mongo_url": "mongodb://tyk-mongo:27017/tyk_analytics", + "collection_name": "tyk_analytics", + "driver": "mongo-go" + } +} +``` + ### Ignore Fields `ignore_fields` defines a list of analytics fields that will be ignored when writing to the pump. This can be used to avoid writing sensitive information to the Database, or data that you don't really need to have. diff --git a/pumps/influx2.go b/pumps/influx2.go index 3576d54dd..0b7a61e54 100644 --- a/pumps/influx2.go +++ b/pumps/influx2.go @@ -155,7 +155,6 @@ func (i *Influx2Pump) connect() influxdb2.Client { func (i *Influx2Pump) createBucket(ctx context.Context, orgID *string) (*domain.Bucket, error) { bucketConf := i.dbConf.NewBucketConfig - fmt.Println(bucketConf) rp := "" schemaType := domain.SchemaTypeImplicit retentionRules := make(domain.RetentionRules, len(bucketConf.RetentionRules)) diff --git a/pumps/mongo.go b/pumps/mongo.go index af7738185..cfe96bc45 100644 --- a/pumps/mongo.go +++ b/pumps/mongo.go @@ -80,7 +80,7 @@ type BaseMongoConf struct { // Set the consistency mode for the session, it defaults to `Strong`. The valid values are: strong, monotonic, eventual. MongoSessionConsistency string `json:"mongo_session_consistency" mapstructure:"mongo_session_consistency"` // MongoDriverType is the type of the driver (library) to use. The valid values are: "mongo-go" and "mgo". - MongoDriverType string `json:"driver_type" mapstructure:"driver_type"` + MongoDriverType string `json:"driver" mapstructure:"driver"` } type dbObject struct { tableName string @@ -347,7 +347,8 @@ func (m *MongoPump) ensureIndexes(collectionName string) error { func (m *MongoPump) connect() { if m.dbConf.MongoDriverType == "" { - m.dbConf.MongoDriverType = persistent.Mgo + // Default to mongo-go + m.dbConf.MongoDriverType = persistent.OfficialMongo } store, err := persistent.NewPersistentStorage(&persistent.ClientOpts{ diff --git a/pumps/mongo_aggregate.go b/pumps/mongo_aggregate.go index 5afe48de3..8284416b2 100644 --- a/pumps/mongo_aggregate.go +++ b/pumps/mongo_aggregate.go @@ -216,7 +216,8 @@ func (m *MongoAggregatePump) connect() { var err error if m.dbConf.MongoDriverType == "" { - m.dbConf.MongoDriverType = "mgo" + // Default to mongo-go + m.dbConf.MongoDriverType = persistent.OfficialMongo } m.store, err = persistent.NewPersistentStorage(&persistent.ClientOpts{ diff --git a/pumps/mongo_aggregate_test.go b/pumps/mongo_aggregate_test.go index 1e50b74b6..5a8a726a6 100644 --- a/pumps/mongo_aggregate_test.go +++ b/pumps/mongo_aggregate_test.go @@ -7,6 +7,7 @@ import ( "testing" "time" + "github.com/TykTechnologies/storage/persistent" "github.com/TykTechnologies/storage/persistent/model" "github.com/TykTechnologies/tyk-pump/analytics" "github.com/TykTechnologies/tyk-pump/analytics/demo" @@ -502,3 +503,12 @@ func TestDecodeRequestAndDecodeResponseMongoAggregate(t *testing.T) { assert.False(t, newPump.GetDecodedRequest()) assert.False(t, newPump.GetDecodedResponse()) } + +func TestDefaultDriverAggregate(t *testing.T) { + newPump := &MongoAggregatePump{} + defaultConf := defaultConf() + defaultConf.MongoDriverType = "" + err := newPump.Init(defaultConf) + assert.Nil(t, err) + assert.Equal(t, persistent.OfficialMongo, newPump.dbConf.MongoDriverType) +} diff --git a/pumps/mongo_selective.go b/pumps/mongo_selective.go index d34c1bbd8..c8c35ea29 100644 --- a/pumps/mongo_selective.go +++ b/pumps/mongo_selective.go @@ -114,8 +114,8 @@ func (m *MongoSelectivePump) connect() { var err error if m.dbConf.MongoDriverType == "" { - // Default to mgo - m.dbConf.MongoDriverType = persistent.Mgo + // Default to mongo-go + m.dbConf.MongoDriverType = persistent.OfficialMongo } m.store, err = persistent.NewPersistentStorage(&persistent.ClientOpts{ diff --git a/pumps/mongo_selective_test.go b/pumps/mongo_selective_test.go index a37cc747f..70f560435 100644 --- a/pumps/mongo_selective_test.go +++ b/pumps/mongo_selective_test.go @@ -6,6 +6,7 @@ import ( "testing" "time" + "github.com/TykTechnologies/storage/persistent" "github.com/TykTechnologies/storage/persistent/model" "github.com/TykTechnologies/tyk-pump/analytics" "github.com/stretchr/testify/assert" @@ -381,3 +382,12 @@ func TestDecodeRequestAndDecodeResponseMongoSelective(t *testing.T) { assert.False(t, newPump.GetDecodedRequest()) assert.False(t, newPump.GetDecodedResponse()) } + +func TestDefaultDriverSelective(t *testing.T) { + newPump := &MongoSelectivePump{} + defaultConf := defaultConf() + defaultConf.MongoDriverType = "" + err := newPump.Init(defaultConf) + assert.Nil(t, err) + assert.Equal(t, persistent.OfficialMongo, newPump.dbConf.MongoDriverType) +} diff --git a/pumps/mongo_test.go b/pumps/mongo_test.go index 186f6a208..f888b28a9 100644 --- a/pumps/mongo_test.go +++ b/pumps/mongo_test.go @@ -10,6 +10,7 @@ import ( "github.com/stretchr/testify/assert" "gopkg.in/vmihailenco/msgpack.v2" + "github.com/TykTechnologies/storage/persistent" "github.com/TykTechnologies/storage/persistent/model" "github.com/TykTechnologies/tyk-pump/analytics" ) @@ -555,3 +556,12 @@ func TestDecodeRequestAndDecodeResponseMongo(t *testing.T) { assert.False(t, newPump.GetDecodedRequest()) assert.False(t, newPump.GetDecodedResponse()) } + +func TestDefaultDriver(t *testing.T) { + newPump := &MongoPump{} + defaultConf := defaultConf() + defaultConf.MongoDriverType = "" + err := newPump.Init(defaultConf) + assert.Nil(t, err) + assert.Equal(t, persistent.OfficialMongo, newPump.dbConf.MongoDriverType) +} From bab7af35b5d2432ab0611cc58b32152291d2b581 Mon Sep 17 00:00:00 2001 From: Matias <83959431+mativm02@users.noreply.github.com> Date: Thu, 4 May 2023 11:36:47 -0300 Subject: [PATCH 065/102] updating MongoDriverType comment (#622) --- pumps/mongo.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pumps/mongo.go b/pumps/mongo.go index cfe96bc45..42a726650 100644 --- a/pumps/mongo.go +++ b/pumps/mongo.go @@ -79,7 +79,8 @@ type BaseMongoConf struct { OmitIndexCreation bool `json:"omit_index_creation" mapstructure:"omit_index_creation"` // Set the consistency mode for the session, it defaults to `Strong`. The valid values are: strong, monotonic, eventual. MongoSessionConsistency string `json:"mongo_session_consistency" mapstructure:"mongo_session_consistency"` - // MongoDriverType is the type of the driver (library) to use. The valid values are: "mongo-go" and "mgo". + // MongoDriverType is the type of the driver (library) to use. The valid values are: “mongo-go” and “mgo”. Default to “mongo-go”. + // You can find more details about the drivers [here](https://github.com/TykTechnologies/tyk-pump#driver-type). MongoDriverType string `json:"driver" mapstructure:"driver"` } type dbObject struct { From 6269449d37ac477e400aefbcf7802a0c2df4e812 Mon Sep 17 00:00:00 2001 From: Matias <83959431+mativm02@users.noreply.github.com> Date: Thu, 4 May 2023 12:40:16 -0300 Subject: [PATCH 066/102] Removing blank line from MongoDriverType comment (#624) * updating MongoDriverType comment * removing blank space --- pumps/mongo.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pumps/mongo.go b/pumps/mongo.go index 42a726650..c43b27e49 100644 --- a/pumps/mongo.go +++ b/pumps/mongo.go @@ -79,8 +79,7 @@ type BaseMongoConf struct { OmitIndexCreation bool `json:"omit_index_creation" mapstructure:"omit_index_creation"` // Set the consistency mode for the session, it defaults to `Strong`. The valid values are: strong, monotonic, eventual. MongoSessionConsistency string `json:"mongo_session_consistency" mapstructure:"mongo_session_consistency"` - // MongoDriverType is the type of the driver (library) to use. The valid values are: “mongo-go” and “mgo”. Default to “mongo-go”. - // You can find more details about the drivers [here](https://github.com/TykTechnologies/tyk-pump#driver-type). + // MongoDriverType is the type of the driver (library) to use. The valid values are: “mongo-go” and “mgo”. Default to “mongo-go”. You can find more details about the drivers [here](https://github.com/TykTechnologies/tyk-pump#driver-type). MongoDriverType string `json:"driver" mapstructure:"driver"` } type dbObject struct { From b580100b3264f4277802567ecc7daa4bf70233df Mon Sep 17 00:00:00 2001 From: Matias <83959431+mativm02@users.noreply.github.com> Date: Fri, 5 May 2023 12:45:53 -0300 Subject: [PATCH 067/102] updating MongoDriverType comment (#626) --- pumps/mongo.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pumps/mongo.go b/pumps/mongo.go index c43b27e49..1906cd6e8 100644 --- a/pumps/mongo.go +++ b/pumps/mongo.go @@ -79,7 +79,8 @@ type BaseMongoConf struct { OmitIndexCreation bool `json:"omit_index_creation" mapstructure:"omit_index_creation"` // Set the consistency mode for the session, it defaults to `Strong`. The valid values are: strong, monotonic, eventual. MongoSessionConsistency string `json:"mongo_session_consistency" mapstructure:"mongo_session_consistency"` - // MongoDriverType is the type of the driver (library) to use. The valid values are: “mongo-go” and “mgo”. Default to “mongo-go”. You can find more details about the drivers [here](https://github.com/TykTechnologies/tyk-pump#driver-type). + // MongoDriverType is the type of the driver (library) to use. The valid values are: “mongo-go” and “mgo”. + // Default to “mongo-go”. Check out this guide to [learn about different MongoDB drivers Tyk Pump support](https://github.com/TykTechnologies/tyk-pump#driver-type). MongoDriverType string `json:"driver" mapstructure:"driver"` } type dbObject struct { From 84331f1d54a37720ac1bd88bd586f47c75be0cf2 Mon Sep 17 00:00:00 2001 From: Asutosh <1187055+asutosh@users.noreply.github.com> Date: Wed, 10 May 2023 12:14:19 +0530 Subject: [PATCH 068/102] [TD-1648]: Updating sync automation - tyk-pump:master (#620) * Updating sync automation * Update sync-automation --------- Co-authored-by: Gromit --- .github/workflows/sync-automation.yml | 41 ++++++++++++++++----------- 1 file changed, 25 insertions(+), 16 deletions(-) diff --git a/.github/workflows/sync-automation.yml b/.github/workflows/sync-automation.yml index 8e355f72d..79489dfce 100644 --- a/.github/workflows/sync-automation.yml +++ b/.github/workflows/sync-automation.yml @@ -1,5 +1,5 @@ # Generated by: gromit policy -# Generated on: Thu Dec 15 07:12:58 UTC 2022 +# Generated on: Wed May 10 06:06:05 UTC 2023 name: Sync automation @@ -15,6 +15,7 @@ on: jobs: sync: runs-on: ubuntu-latest + container: tykio/gromit:v1.5 strategy: fail-fast: false @@ -31,6 +32,7 @@ jobs: - name: sync ${{matrix.branch}} from master id: sync-changes run: | + git config --global --add safe.directory "$GITHUB_WORKSPACE" git config --local user.email "policy@gromit" git config --local user.name "Bender" git fetch origin ${{ matrix.branch }} @@ -43,11 +45,20 @@ jobs: git restore --source master -- .github/workflows/release.yml rm -f .github/dependabot.yml git restore --source master -- .github/dependabot.yml - git add -A && git commit -m "[CI]: Syncing CI changes to ${{ matrix.branch }}" - git push origin $prbranch + git add -A echo "prbranch=$prbranch" >> $GITHUB_OUTPUT echo "::debug::Commit ${{ github.sha }} syncd for ${{matrix.branch}}" - exit 0 + echo "::debug::Generate releng bundle using latest gromit templates for ${{matrix.branch}}" + # get the tarball for the latest gromit master - it will have the latest version of + # the templates. + mkdir /tmp/gromit-src && gh api -H "Accept: application/vnd.github+json" /repos/TykTechnologies/gromit/tarball/master | tar --strip-components 1 -C /tmp/gromit-src -xzf - + gromit bundle gen --branch ${{ matrix.branch }} --bundle /tmp/gromit-src/policy/templates/releng --repo tyk-pump . + git add -A && git commit -m "[CI]: Syncing ${{ matrix.branch }}" + gromit git push . tyk-pump $prbranch --branch $prbranch + env: + GITHUB_TOKEN: ${{ secrets.ORG_GH_TOKEN }} + # Needed for gh CLI + GH_TOKEN: ${{ secrets.ORG_GH_TOKEN }} - name: Create PR from the branch. id: create-pr @@ -56,22 +67,20 @@ jobs: github-token: ${{ secrets.ORG_GH_TOKEN }} result-encoding: string script: | + const msg = `${{ github.event.head_commit.message }}`; + const title = `[CI] Sync ${context.repo.repo}:${{ matrix.branch }} ` + msg.split("\n")[0]; const response = await github.rest.pulls.create({ - title: '[CI] Sync automation: Syncing commits from master', + title: title, owner: context.repo.owner, repo: context.repo.repo, head: '${{ steps.sync-changes.outputs.prbranch }}', base: '${{ matrix.branch }}', - body: ` PR auto generated by the CI Sync automation. - Picks the CI changes from the commit ${{ github.sha }} - on master. - Please make any additional changes required before - merging. `}); - github.rest.pulls.requestReviewers({ - owner: context.repo.owner, - repo: context.repo.repo, - pull_number: response.data.number, - team_reviewers: ['TykTechnologies/devops'] }); + body: `PR auto generated by the CI Sync automation. + Picks the CI changes from the commit ${{ github.sha }} on master. + Please make any additional changes required before merging. + ### _Commit message body of the picked commit:_ + ${msg} + `}); github.rest.issues.addLabels({ owner: context.repo.owner, repo: context.repo.repo, @@ -85,6 +94,6 @@ jobs: run: | gh pr merge $PULL --auto --squash --subject "[CI] Sync automation: Syncing commits from master" --body "Picking CI changes from the commit $COMMIT" env: - GITHUB_TOKEN: ${{ secrets.ORG_GH_TOKEN }} + GH_TOKEN: ${{ secrets.ORG_GH_TOKEN }} PULL: ${{ steps.create-pr.outputs.result }} COMMIT: ${{ github.sha }} From 3bd84f7ce89c60fb9ea3ad63541b38d8bc87951d Mon Sep 17 00:00:00 2001 From: Asutosh <1187055+asutosh@users.noreply.github.com> Date: Wed, 10 May 2023 12:26:50 +0530 Subject: [PATCH 069/102] [TD-1648]: Updating releng tyk-pump:master (#621) * Updating releng --------- Co-authored-by: Gromit --- .github/dependabot.yml | 2 +- .github/workflows/del-env.yml | 2 +- .github/workflows/release.yml | 19 +++++++++++++------ ci/Dockerfile.std | 2 +- ci/aws/byol.pkr.hcl | 2 +- ci/goreleaser/goreleaser-el7.yml | 2 +- ci/goreleaser/goreleaser.yml | 5 ++--- ci/install/before_install.sh | 2 +- ci/install/post_install.sh | 2 +- ci/install/post_remove.sh | 2 +- ci/install/post_trans.sh | 2 +- ci/terraform/outputs.tf | 2 +- 12 files changed, 25 insertions(+), 19 deletions(-) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index f820c41c3..b9d2b23e3 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -1,6 +1,6 @@ # Generated by: gromit policy -# Generated on: Wed Dec 14 23:43:05 UTC 2022 +# Generated on: Wed May 10 06:24:08 UTC 2023 version: 2 updates: diff --git a/.github/workflows/del-env.yml b/.github/workflows/del-env.yml index e3f39f6f8..99549e193 100644 --- a/.github/workflows/del-env.yml +++ b/.github/workflows/del-env.yml @@ -1,6 +1,6 @@ # Generated by: gromit policy -# Generated on: Wed Dec 14 23:43:05 UTC 2022 +# Generated on: Wed May 10 06:24:08 UTC 2023 name: Retiring dev env diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 2f37c9cab..9c87fdf67 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -1,6 +1,6 @@ # Generated by: gromit policy -# Generated on: Thu Mar 23 17:02:26 UTC 2023 +# Generated on: Wed May 10 06:24:08 UTC 2023 # Distribution channels covered by this workflow @@ -38,9 +38,9 @@ jobs: golang_cross: [ 1.16 ] include: - golang_cross: 1.16 - goreleaser: 'ci/goreleaser/goreleaser.yml' - rpmvers: 'el/7 el/8' - debvers: 'ubuntu/xenial ubuntu/bionic debian/jessie ubuntu/focal debian/buster debian/bullseye' + goreleaser: 'ci/goreleaser/goreleaser.yml' + rpmvers: ' el/7 el/8 el/9' + debvers: 'ubuntu/xenial ubuntu/bionic ubuntu/focal ubuntu/jammy debian/jessie debian/buster debian/bullseye' outputs: tag: ${{ steps.targets.outputs.tag }} @@ -223,6 +223,15 @@ jobs: --title 'Failed to add new build for CD' \ --title-link 'https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}' + sbom: + needs: ci + uses: TykTechnologies/github-actions/.github/workflows/sbom.yaml@main + secrets: + TF_API_TOKEN: ${{ secrets.TF_API_TOKEN }} + DEPDASH_URL: ${{ secrets.DEPDASH_URL }} + DEPDASH_KEY: ${{ secrets.DEPDASH_KEY }} + ORG_GH_TOKEN: ${{ secrets.ORG_GH_TOKEN }} + upgrade-deb: if: startsWith(github.ref, 'refs/tags') && !github.event.pull_request.draft runs-on: ubuntu-latest @@ -234,7 +243,6 @@ jobs: - amd64 - arm64 distro: - - ubuntu:xenial - ubuntu:bionic - ubuntu:focal - debian:bullseye @@ -278,7 +286,6 @@ jobs: fail-fast: false matrix: distro: - - ubi7/ubi - ubi8/ubi steps: diff --git a/ci/Dockerfile.std b/ci/Dockerfile.std index 08c448f1a..462deed07 100644 --- a/ci/Dockerfile.std +++ b/ci/Dockerfile.std @@ -1,6 +1,6 @@ # Generated by: gromit policy -# Generated on: Thu Mar 23 17:02:26 UTC 2023 +# Generated on: Wed May 10 06:24:08 UTC 2023 FROM debian:bullseye-slim ARG TARGETARCH diff --git a/ci/aws/byol.pkr.hcl b/ci/aws/byol.pkr.hcl index b08dd7972..b8b5509f9 100644 --- a/ci/aws/byol.pkr.hcl +++ b/ci/aws/byol.pkr.hcl @@ -1,6 +1,6 @@ # Generated by: gromit policy -# Generated on: Wed Dec 14 23:43:05 UTC 2022 +# Generated on: Wed May 10 06:24:08 UTC 2023 packer { required_plugins { diff --git a/ci/goreleaser/goreleaser-el7.yml b/ci/goreleaser/goreleaser-el7.yml index a1783a3e1..eb244204b 100644 --- a/ci/goreleaser/goreleaser-el7.yml +++ b/ci/goreleaser/goreleaser-el7.yml @@ -1,5 +1,5 @@ # Generated by: gromit policy -# Generated on: Wed Dec 14 23:43:05 UTC 2022 +# Generated on: Wed May 10 06:24:08 UTC 2023 # Check the documentation at http://goreleaser.com # This project needs CGO_ENABLED=1 and the cross-compiler toolchains for diff --git a/ci/goreleaser/goreleaser.yml b/ci/goreleaser/goreleaser.yml index 007c251c2..c2338c84e 100644 --- a/ci/goreleaser/goreleaser.yml +++ b/ci/goreleaser/goreleaser.yml @@ -1,5 +1,5 @@ # Generated by: gromit policy -# Generated on: Wed Dec 14 23:43:05 UTC 2022 +# Generated on: Wed May 10 06:24:08 UTC 2023 # Check the documentation at http://goreleaser.com # This project needs CGO_ENABLED=1 and the cross-compiler toolchains for @@ -77,8 +77,7 @@ nfpms: vendor: "Tyk Technologies Ltd" homepage: "https://tyk.io" maintainer: "Tyk " - description: Archive analytics for the Tyk API Gateway - + description: Tyk Analytics Pump to move analytics data from Redis to any supported back end (multiple back ends can be written to at once). package_name: tyk-pump builds: - std diff --git a/ci/install/before_install.sh b/ci/install/before_install.sh index 8dc19105a..bc736fbbb 100755 --- a/ci/install/before_install.sh +++ b/ci/install/before_install.sh @@ -1,7 +1,7 @@ #!/bin/bash # Generated by: gromit policy -# Generated on: Wed Dec 14 23:43:05 UTC 2022 +# Generated on: Wed May 10 06:24:08 UTC 2023 echo "Creating user and group..." GROUPNAME="tyk" diff --git a/ci/install/post_install.sh b/ci/install/post_install.sh index da8368ee9..ce08381ca 100755 --- a/ci/install/post_install.sh +++ b/ci/install/post_install.sh @@ -2,7 +2,7 @@ # Generated by: gromit policy -# Generated on: Wed Dec 14 23:43:05 UTC 2022 +# Generated on: Wed May 10 06:24:08 UTC 2023 # If "True" the install directory ownership will be changed to "tyk:tyk" change_ownership="True" diff --git a/ci/install/post_remove.sh b/ci/install/post_remove.sh index e6d7ca3f4..e8745fabe 100755 --- a/ci/install/post_remove.sh +++ b/ci/install/post_remove.sh @@ -1,7 +1,7 @@ #!/bin/sh # Generated by: gromit policy -# Generated on: Wed Dec 14 23:43:05 UTC 2022 +# Generated on: Wed May 10 06:24:08 UTC 2023 cleanRemove() { diff --git a/ci/install/post_trans.sh b/ci/install/post_trans.sh index 94235be3d..213b898a3 100644 --- a/ci/install/post_trans.sh +++ b/ci/install/post_trans.sh @@ -1,7 +1,7 @@ #!/bin/sh # Generated by: gromit policy -# Generated on: Wed Dec 14 23:43:05 UTC 2022 +# Generated on: Wed May 10 06:24:08 UTC 2023 if command -V systemctl >/dev/null 2>&1; then if [ ! -f /lib/systemd/system/tyk-pump.service ]; then diff --git a/ci/terraform/outputs.tf b/ci/terraform/outputs.tf index b4bfe7e6f..c57a49df4 100644 --- a/ci/terraform/outputs.tf +++ b/ci/terraform/outputs.tf @@ -1,6 +1,6 @@ # Generated by: gromit policy -# Generated on: Wed Dec 14 23:43:05 UTC 2022 +# Generated on: Wed May 10 06:24:08 UTC 2023 From 3cecf3c56df143290919c70648afe1c3442c17f3 Mon Sep 17 00:00:00 2001 From: Asutosh <1187055+asutosh@users.noreply.github.com> Date: Wed, 10 May 2023 17:22:52 +0530 Subject: [PATCH 070/102] Update sync automation workflow (#631) --- .github/workflows/sync-automation.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/sync-automation.yml b/.github/workflows/sync-automation.yml index 79489dfce..12626ad6f 100644 --- a/.github/workflows/sync-automation.yml +++ b/.github/workflows/sync-automation.yml @@ -1,5 +1,5 @@ # Generated by: gromit policy -# Generated on: Wed May 10 06:06:05 UTC 2023 +# Generated on: Wed May 10 11:44:13 UTC 2023 name: Sync automation @@ -67,7 +67,7 @@ jobs: github-token: ${{ secrets.ORG_GH_TOKEN }} result-encoding: string script: | - const msg = `${{ github.event.head_commit.message }}`; + const msg = ${{ toJSON(github.event.head_commit.message) }}; const title = `[CI] Sync ${context.repo.repo}:${{ matrix.branch }} ` + msg.split("\n")[0]; const response = await github.rest.pulls.create({ title: title, From ccd57f959d714ff8c4517286467b006bb4c134ed Mon Sep 17 00:00:00 2001 From: Sredny M Date: Wed, 10 May 2023 19:44:32 -0400 Subject: [PATCH 071/102] TT-8905 changed the default mongo driver to mgo (#632) * changed the default mongo driver to mgo * change log msg to point mgo --- pumps/mongo.go | 4 ++-- pumps/mongo_aggregate.go | 4 ++-- pumps/mongo_aggregate_test.go | 2 +- pumps/mongo_selective.go | 4 ++-- pumps/mongo_selective_test.go | 2 +- pumps/mongo_test.go | 2 +- 6 files changed, 9 insertions(+), 9 deletions(-) diff --git a/pumps/mongo.go b/pumps/mongo.go index 1906cd6e8..dbb3296ac 100644 --- a/pumps/mongo.go +++ b/pumps/mongo.go @@ -348,8 +348,8 @@ func (m *MongoPump) ensureIndexes(collectionName string) error { func (m *MongoPump) connect() { if m.dbConf.MongoDriverType == "" { - // Default to mongo-go - m.dbConf.MongoDriverType = persistent.OfficialMongo + // Default to mgo + m.dbConf.MongoDriverType = persistent.Mgo } store, err := persistent.NewPersistentStorage(&persistent.ClientOpts{ diff --git a/pumps/mongo_aggregate.go b/pumps/mongo_aggregate.go index 8284416b2..271e1d610 100644 --- a/pumps/mongo_aggregate.go +++ b/pumps/mongo_aggregate.go @@ -216,8 +216,8 @@ func (m *MongoAggregatePump) connect() { var err error if m.dbConf.MongoDriverType == "" { - // Default to mongo-go - m.dbConf.MongoDriverType = persistent.OfficialMongo + // Default to mgo + m.dbConf.MongoDriverType = persistent.Mgo } m.store, err = persistent.NewPersistentStorage(&persistent.ClientOpts{ diff --git a/pumps/mongo_aggregate_test.go b/pumps/mongo_aggregate_test.go index 5a8a726a6..78d8dbd20 100644 --- a/pumps/mongo_aggregate_test.go +++ b/pumps/mongo_aggregate_test.go @@ -510,5 +510,5 @@ func TestDefaultDriverAggregate(t *testing.T) { defaultConf.MongoDriverType = "" err := newPump.Init(defaultConf) assert.Nil(t, err) - assert.Equal(t, persistent.OfficialMongo, newPump.dbConf.MongoDriverType) + assert.Equal(t, persistent.Mgo, newPump.dbConf.MongoDriverType) } diff --git a/pumps/mongo_selective.go b/pumps/mongo_selective.go index c8c35ea29..d34c1bbd8 100644 --- a/pumps/mongo_selective.go +++ b/pumps/mongo_selective.go @@ -114,8 +114,8 @@ func (m *MongoSelectivePump) connect() { var err error if m.dbConf.MongoDriverType == "" { - // Default to mongo-go - m.dbConf.MongoDriverType = persistent.OfficialMongo + // Default to mgo + m.dbConf.MongoDriverType = persistent.Mgo } m.store, err = persistent.NewPersistentStorage(&persistent.ClientOpts{ diff --git a/pumps/mongo_selective_test.go b/pumps/mongo_selective_test.go index 70f560435..5580c1209 100644 --- a/pumps/mongo_selective_test.go +++ b/pumps/mongo_selective_test.go @@ -389,5 +389,5 @@ func TestDefaultDriverSelective(t *testing.T) { defaultConf.MongoDriverType = "" err := newPump.Init(defaultConf) assert.Nil(t, err) - assert.Equal(t, persistent.OfficialMongo, newPump.dbConf.MongoDriverType) + assert.Equal(t, persistent.Mgo, newPump.dbConf.MongoDriverType) } diff --git a/pumps/mongo_test.go b/pumps/mongo_test.go index f888b28a9..a4b59cae6 100644 --- a/pumps/mongo_test.go +++ b/pumps/mongo_test.go @@ -563,5 +563,5 @@ func TestDefaultDriver(t *testing.T) { defaultConf.MongoDriverType = "" err := newPump.Init(defaultConf) assert.Nil(t, err) - assert.Equal(t, persistent.OfficialMongo, newPump.dbConf.MongoDriverType) + assert.Equal(t, persistent.Mgo, newPump.dbConf.MongoDriverType) } From 7f274767b56bca6fff2c9a66b9fe71c6897f04b9 Mon Sep 17 00:00:00 2001 From: Matias <83959431+mativm02@users.noreply.github.com> Date: Thu, 11 May 2023 10:13:47 -0300 Subject: [PATCH 072/102] Adding Direct Connection (#634) --- README.md | 17 +++++++++++++++++ go.mod | 2 +- go.sum | 2 ++ pumps/mongo.go | 7 +++++++ pumps/mongo_aggregate.go | 1 + pumps/mongo_selective.go | 1 + 6 files changed, 29 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index b9f5e2f69..b6460c59f 100644 --- a/README.md +++ b/README.md @@ -1377,6 +1377,23 @@ The `driver` setting defines the driver type to use for Mongo Pumps. It can be o } ``` +### Direct Connection + +`MongoDirectConnection` informs whether to establish connections only with the specified seed servers or to obtain information for the whole cluster and establish connections with further servers too. If true, the client will only connect to the host provided in the ConnectionString and won't attempt to discover other hosts in the cluster. Useful when network restrictions prevent discovery, such as with SSH tunneling. Default is `false`. +You can get more info from the [official MongoDB driver docs](https://www.mongodb.com/docs/drivers/go/current/fundamentals/connection/#direct-connection). + +```json +"mongo": { + "type": "mongo", + "meta": { + "mongo_url": "mongodb://tyk-mongo:27017/tyk_analytics", + "collection_name": "tyk_analytics", + "driver": "mongo-go", + "mongo_direct_connection": true + } +} +``` + ### Ignore Fields `ignore_fields` defines a list of analytics fields that will be ignored when writing to the pump. This can be used to avoid writing sensitive information to the Database, or data that you don't really need to have. diff --git a/go.mod b/go.mod index 2bd4a64a8..b3dca20be 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( github.com/TykTechnologies/gorpc v0.0.0-20210624160652-fe65bda0ccb9 github.com/TykTechnologies/graphql-go-tools v1.6.2-0.20230320143102-7a16078ce517 github.com/TykTechnologies/murmur3 v0.0.0-20230310161213-aad17efd5632 - github.com/TykTechnologies/storage v1.0.2 + github.com/TykTechnologies/storage v1.0.3 github.com/aws/aws-sdk-go-v2 v1.16.14 github.com/aws/aws-sdk-go-v2/config v1.9.0 github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.9.0 diff --git a/go.sum b/go.sum index dece24842..47a0c1fe7 100644 --- a/go.sum +++ b/go.sum @@ -62,6 +62,8 @@ github.com/TykTechnologies/storage v1.0.1 h1:YI85mHMofwIrF0QgrRYqKKd2xuPO/lxGe+S github.com/TykTechnologies/storage v1.0.1/go.mod h1:+0S3KuNlLGBTMTSFREuZFm315zzXjuuCO4QSAPy+d3M= github.com/TykTechnologies/storage v1.0.2 h1:bWaLbpDmsjxT/8QVl9Fpuz1w1orqa/COvs1Gih+fvYE= github.com/TykTechnologies/storage v1.0.2/go.mod h1:+0S3KuNlLGBTMTSFREuZFm315zzXjuuCO4QSAPy+d3M= +github.com/TykTechnologies/storage v1.0.3 h1:UMEKTA9C8ir636qUS3aAfbBH/KKegH+KmC81LYLnSUg= +github.com/TykTechnologies/storage v1.0.3/go.mod h1:+0S3KuNlLGBTMTSFREuZFm315zzXjuuCO4QSAPy+d3M= github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/agnivade/levenshtein v1.1.1 h1:QY8M92nrzkmr798gCo3kmMyqXFzdQVpxLlGPRBij0P8= github.com/agnivade/levenshtein v1.1.1/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo= diff --git a/pumps/mongo.go b/pumps/mongo.go index dbb3296ac..3a084e2bb 100644 --- a/pumps/mongo.go +++ b/pumps/mongo.go @@ -82,6 +82,12 @@ type BaseMongoConf struct { // MongoDriverType is the type of the driver (library) to use. The valid values are: “mongo-go” and “mgo”. // Default to “mongo-go”. Check out this guide to [learn about different MongoDB drivers Tyk Pump support](https://github.com/TykTechnologies/tyk-pump#driver-type). MongoDriverType string `json:"driver" mapstructure:"driver"` + // MongoDirectConnection informs whether to establish connections only with the specified seed servers, + // or to obtain information for the whole cluster and establish connections with further servers too. + // If true, the client will only connect to the host provided in the ConnectionString + // and won't attempt to discover other hosts in the cluster. Useful when network restrictions + // prevent discovery, such as with SSH tunneling. Default is false. + MongoDirectConnection bool `json:"mongo_direct_connection" mapstructure:"mongo_direct_connection"` } type dbObject struct { tableName string @@ -362,6 +368,7 @@ func (m *MongoPump) connect() { SessionConsistency: m.dbConf.MongoSessionConsistency, ConnectionTimeout: m.timeout, Type: m.dbConf.MongoDriverType, + DirectConnection: m.dbConf.MongoDirectConnection, }) if err != nil { m.log.Fatal("Failed to connect: ", err) diff --git a/pumps/mongo_aggregate.go b/pumps/mongo_aggregate.go index 271e1d610..97362447f 100644 --- a/pumps/mongo_aggregate.go +++ b/pumps/mongo_aggregate.go @@ -230,6 +230,7 @@ func (m *MongoAggregatePump) connect() { SessionConsistency: m.dbConf.MongoSessionConsistency, ConnectionTimeout: m.timeout, Type: m.dbConf.MongoDriverType, + DirectConnection: m.dbConf.MongoDirectConnection, }) if err != nil { m.log.Fatal("Failed to connect to mongo: ", err) diff --git a/pumps/mongo_selective.go b/pumps/mongo_selective.go index d34c1bbd8..1f0d78650 100644 --- a/pumps/mongo_selective.go +++ b/pumps/mongo_selective.go @@ -128,6 +128,7 @@ func (m *MongoSelectivePump) connect() { SessionConsistency: m.dbConf.MongoSessionConsistency, ConnectionTimeout: m.timeout, Type: m.dbConf.MongoDriverType, + DirectConnection: m.dbConf.MongoDirectConnection, }) if err != nil { m.log.Fatal("Failed to connect to mongo: ", err) From 8173c7e0208dc9acd0cc19b264226700a4124b82 Mon Sep 17 00:00:00 2001 From: Tomas Buchaillot Date: Mon, 15 May 2023 12:53:17 -0300 Subject: [PATCH 073/102] TT-876 Fix/prometheus cardinality (#636) * Fix: Tyk prometheus pump separator issue * linting --------- Co-authored-by: Kaushal Prajapati Co-authored-by: mativm02 --- pumps/prometheus.go | 36 +++++++++----- pumps/prometheus_test.go | 101 +++++++++++++++++++++++++-------------- 2 files changed, 91 insertions(+), 46 deletions(-) diff --git a/pumps/prometheus.go b/pumps/prometheus.go index 3e140b7e8..b1b64adec 100644 --- a/pumps/prometheus.go +++ b/pumps/prometheus.go @@ -72,7 +72,7 @@ type PrometheusMetric struct { counterVec *prometheus.CounterVec histogramVec *prometheus.HistogramVec - counterMap map[string]uint64 + counterMap map[string]counterStruct histogramMap map[string]histogramCounter aggregatedObservations bool @@ -82,6 +82,12 @@ type PrometheusMetric struct { type histogramCounter struct { totalRequestTime uint64 hits uint64 + labelValues []string +} + +type counterStruct struct { + labelValues []string + count uint64 } const ( @@ -304,7 +310,7 @@ func (pm *PrometheusMetric) InitVec() error { }, pm.Labels, ) - pm.counterMap = make(map[string]uint64) + pm.counterMap = make(map[string]counterStruct) prometheus.MustRegister(pm.counterVec) case histogramType: bkts := pm.Buckets @@ -387,7 +393,17 @@ func (pm *PrometheusMetric) GetLabelsValues(decoded analytics.AnalyticsRecord) [ func (pm *PrometheusMetric) Inc(values ...string) error { switch pm.MetricType { case counterType: - pm.counterMap[strings.Join(values, "--")] += 1 + // We use a map to store the counter values, the unique key is the label values joined by "--" + key := strings.Join(values, "--") + if currentValue, ok := pm.counterMap[key]; ok { + currentValue.count++ + pm.counterMap[key] = currentValue + } else { + pm.counterMap[key] = counterStruct{ + count: 1, + labelValues: values, + } + } default: return errors.New("invalid metric type:" + pm.MetricType) } @@ -412,6 +428,7 @@ func (pm *PrometheusMetric) Observe(requestTime int64, values ...string) error { pm.histogramMap[key] = histogramCounter{ hits: 1, totalRequestTime: uint64(requestTime), + labelValues: labelValues, } } } else { @@ -431,17 +448,14 @@ func (pm *PrometheusMetric) Observe(requestTime int64, values ...string) error { func (pm *PrometheusMetric) Expose() error { switch pm.MetricType { case counterType: - for key, value := range pm.counterMap { - - labelsValue := strings.Split(key, "--") - pm.counterVec.WithLabelValues(labelsValue...).Add(float64(value)) + for _, value := range pm.counterMap { + pm.counterVec.WithLabelValues(value.labelValues...).Add(float64(value.count)) } - pm.counterMap = make(map[string]uint64) + pm.counterMap = make(map[string]counterStruct) case histogramType: if pm.aggregatedObservations { - for key, value := range pm.histogramMap { - labelsValue := strings.Split(key, "--") - pm.histogramVec.WithLabelValues(labelsValue...).Observe(value.getAverageRequestTime()) + for _, value := range pm.histogramMap { + pm.histogramVec.WithLabelValues(value.labelValues...).Observe(value.getAverageRequestTime()) } pm.histogramMap = make(map[string]histogramCounter) } diff --git a/pumps/prometheus_test.go b/pumps/prometheus_test.go index 4d89dc2a0..043fa5f55 100644 --- a/pumps/prometheus_test.go +++ b/pumps/prometheus_test.go @@ -351,7 +351,7 @@ func TestPrometheusCounterMetric(t *testing.T) { metric *PrometheusMetric analyticsRecords []analytics.AnalyticsRecord expectedMetricsAmount int - expectedMetrics map[string]uint64 + expectedMetrics map[string]counterStruct }{ { testName: "HTTP status codes per API", @@ -368,10 +368,10 @@ func TestPrometheusCounterMetric(t *testing.T) { {APIID: "api_2", ResponseCode: 404}, }, expectedMetricsAmount: 3, - expectedMetrics: map[string]uint64{ - "500--api_1": 2, - "200--api_1": 1, - "404--api_2": 1, + expectedMetrics: map[string]counterStruct{ + "500--api_1": {labelValues: []string{"500", "api_1"}, count: 2}, + "200--api_1": {labelValues: []string{"200", "api_1"}, count: 1}, + "404--api_2": {labelValues: []string{"404", "api_2"}, count: 1}, }, }, { @@ -391,12 +391,12 @@ func TestPrometheusCounterMetric(t *testing.T) { {APIID: "api_2", ResponseCode: 200, Path: "test", Method: "GET"}, }, expectedMetricsAmount: 5, - expectedMetrics: map[string]uint64{ - "500--api_1--test--GET": 2, - "500--api_1--test--POST": 1, - "500--api_1--test2--GET": 1, - "200--api_1--test2--GET": 1, - "200--api_2--test--GET": 1, + expectedMetrics: map[string]counterStruct{ + "500--api_1--test--GET": {labelValues: []string{"500", "api_1", "test", "GET"}, count: 2}, + "500--api_1--test--POST": {labelValues: []string{"500", "api_1", "test", "POST"}, count: 1}, + "500--api_1--test2--GET": {labelValues: []string{"500", "api_1", "test2", "GET"}, count: 1}, + "200--api_1--test2--GET": {labelValues: []string{"200", "api_1", "test2", "GET"}, count: 1}, + "200--api_2--test--GET": {labelValues: []string{"200", "api_2", "test", "GET"}, count: 1}, }, }, { @@ -415,10 +415,10 @@ func TestPrometheusCounterMetric(t *testing.T) { {APIID: "api_2", ResponseCode: 200, APIKey: "key1"}, }, expectedMetricsAmount: 3, - expectedMetrics: map[string]uint64{ - "500--key1": 2, - "200--key1": 2, - "500--key2": 1, + expectedMetrics: map[string]counterStruct{ + "500--key1": {labelValues: []string{"500", "key1"}, count: 2}, + "200--key1": {labelValues: []string{"200", "key1"}, count: 2}, + "500--key2": {labelValues: []string{"500", "key2"}, count: 1}, }, }, { @@ -437,10 +437,10 @@ func TestPrometheusCounterMetric(t *testing.T) { {APIID: "api_2", ResponseCode: 200, OauthID: "oauth1"}, }, expectedMetricsAmount: 3, - expectedMetrics: map[string]uint64{ - "500--oauth1": 2, - "200--oauth1": 2, - "500--oauth2": 1, + expectedMetrics: map[string]counterStruct{ + "500--oauth1": {labelValues: []string{"500", "oauth1"}, count: 2}, + "200--oauth1": {labelValues: []string{"200", "oauth1"}, count: 2}, + "500--oauth2": {labelValues: []string{"500", "oauth2"}, count: 1}, }, }, { @@ -459,11 +459,11 @@ func TestPrometheusCounterMetric(t *testing.T) { {APIID: "api_2", ResponseCode: 500, Alias: "alias1"}, }, expectedMetricsAmount: 4, - expectedMetrics: map[string]uint64{ - "500--api_1--alias1": 2, - "500--api_1--alias2": 1, - "200--api_1--alias1": 1, - "500--api_2--alias1": 1, + expectedMetrics: map[string]counterStruct{ + "500--api_1--alias1": {labelValues: []string{"500", "api_1", "alias1"}, count: 2}, + "500--api_1--alias2": {labelValues: []string{"500", "api_1", "alias2"}, count: 1}, + "200--api_1--alias1": {labelValues: []string{"200", "api_1", "alias1"}, count: 1}, + "500--api_2--alias1": {labelValues: []string{"500", "api_2", "alias1"}, count: 1}, }, }, } @@ -520,8 +520,16 @@ func TestPrometheusHistogramMetric(t *testing.T) { }, expectedMetricsAmount: 2, expectedMetrics: map[string]histogramCounter{ - "total--api_1": {hits: 3, totalRequestTime: 300}, - "total--api_2": {hits: 1, totalRequestTime: 323}, + "total--api_1": { + hits: 3, + totalRequestTime: 300, + labelValues: []string{"total", "api_1"}, + }, + "total--api_2": { + hits: 1, + totalRequestTime: 323, + labelValues: []string{"total", "api_2"}, + }, }, expectedAverages: map[string]float64{ "total--api_1": 100, @@ -565,19 +573,42 @@ func TestPrometheusHistogramMetric(t *testing.T) { {APIID: "api_2", Method: "GET", Path: "ping", RequestTime: 10}, {APIID: "api_2", Method: "GET", Path: "ping", RequestTime: 20}, {APIID: "api_2", Method: "GET", Path: "health", RequestTime: 400}, + {APIID: "api--3", Method: "GET", Path: "health", RequestTime: 300}, }, - expectedMetricsAmount: 4, + expectedMetricsAmount: 5, expectedMetrics: map[string]histogramCounter{ - "total--api_1--GET--test": {hits: 2, totalRequestTime: 200}, - "total--api_1--POST--test": {hits: 1, totalRequestTime: 200}, - "total--api_2--GET--ping": {hits: 2, totalRequestTime: 30}, - "total--api_2--GET--health": {hits: 1, totalRequestTime: 400}, + "total--api_1--GET--test": { + hits: 2, + totalRequestTime: 200, + labelValues: []string{"total", "api_1", "GET", "test"}, + }, + "total--api_1--POST--test": { + hits: 1, + totalRequestTime: 200, + labelValues: []string{"total", "api_1", "POST", "test"}, + }, + "total--api_2--GET--ping": { + hits: 2, + totalRequestTime: 30, + labelValues: []string{"total", "api_2", "GET", "ping"}, + }, + "total--api_2--GET--health": { + hits: 1, + totalRequestTime: 400, + labelValues: []string{"total", "api_2", "GET", "health"}, + }, + "total--api--3--GET--health": { + hits: 1, + totalRequestTime: 300, + labelValues: []string{"total", "api--3", "GET", "health"}, + }, }, expectedAverages: map[string]float64{ - "total--api_1--GET--test": 100, - "total--api_1--POST--test": 200, - "total--api_2--GET--ping": 15, - "total--api_2--GET--health": 400, + "total--api_1--GET--test": 100, + "total--api_1--POST--test": 200, + "total--api_2--GET--ping": 15, + "total--api_2--GET--health": 400, + "total--api--3--GET--health": 300, }, }, } From 0ed84b9d1a5e7c6ec8f844c7f059c8b1d9a7465a Mon Sep 17 00:00:00 2001 From: Kofo Okesola Date: Wed, 17 May 2023 08:13:49 +0100 Subject: [PATCH 074/102] fix: include grap records in mongo pump (#638) --- pumps/mongo.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pumps/mongo.go b/pumps/mongo.go index 3a084e2bb..613d3fd44 100644 --- a/pumps/mongo.go +++ b/pumps/mongo.go @@ -385,7 +385,7 @@ func (m *MongoPump) WriteData(ctx context.Context, data []interface{}) error { m.log.Debug("Attempting to write ", len(data), " records...") - accumulateSet := m.AccumulateSet(data, false) + accumulateSet := m.AccumulateSet(data, true) errCh := make(chan error, len(accumulateSet)) for _, dataSet := range accumulateSet { @@ -430,7 +430,7 @@ func (m *MongoPump) AccumulateSet(data []interface{}, isForGraphRecords bool) [] for i, item := range data { // Process the current item and determine if it should be skipped - thisItem, skip := m.processItem(item, isForGraphRecords) + thisItem, skip := m.shouldProcessItem(item, isForGraphRecords) if skip { continue } @@ -455,9 +455,9 @@ func (m *MongoPump) AccumulateSet(data []interface{}, isForGraphRecords bool) [] return returnArray } -// processItem checks if the item should be processed based on its ResponseCode and if it's a graph record. +// shouldProcessItem checks if the item should be processed based on its ResponseCode and if it's a graph record. // It returns the processed item and a boolean indicating if the item should be skipped. -func (m *MongoPump) processItem(item interface{}, isForGraphRecords bool) (*analytics.AnalyticsRecord, bool) { +func (m *MongoPump) shouldProcessItem(item interface{}, isForGraphRecords bool) (*analytics.AnalyticsRecord, bool) { thisItem, ok := item.(analytics.AnalyticsRecord) if !ok { m.log.Error("Couldn't convert item to analytics.AnalyticsRecord") From 2b40aebd36eb71ed55ea39be26098ca4503d28c8 Mon Sep 17 00:00:00 2001 From: Matias <83959431+mativm02@users.noreply.github.com> Date: Wed, 17 May 2023 13:46:16 -0300 Subject: [PATCH 075/102] Updating to storage v1.0.4 (#639) --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index b3dca20be..f4e8a0beb 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( github.com/TykTechnologies/gorpc v0.0.0-20210624160652-fe65bda0ccb9 github.com/TykTechnologies/graphql-go-tools v1.6.2-0.20230320143102-7a16078ce517 github.com/TykTechnologies/murmur3 v0.0.0-20230310161213-aad17efd5632 - github.com/TykTechnologies/storage v1.0.3 + github.com/TykTechnologies/storage v1.0.4 github.com/aws/aws-sdk-go-v2 v1.16.14 github.com/aws/aws-sdk-go-v2/config v1.9.0 github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.9.0 diff --git a/go.sum b/go.sum index 47a0c1fe7..80c715022 100644 --- a/go.sum +++ b/go.sum @@ -64,6 +64,8 @@ github.com/TykTechnologies/storage v1.0.2 h1:bWaLbpDmsjxT/8QVl9Fpuz1w1orqa/COvs1 github.com/TykTechnologies/storage v1.0.2/go.mod h1:+0S3KuNlLGBTMTSFREuZFm315zzXjuuCO4QSAPy+d3M= github.com/TykTechnologies/storage v1.0.3 h1:UMEKTA9C8ir636qUS3aAfbBH/KKegH+KmC81LYLnSUg= github.com/TykTechnologies/storage v1.0.3/go.mod h1:+0S3KuNlLGBTMTSFREuZFm315zzXjuuCO4QSAPy+d3M= +github.com/TykTechnologies/storage v1.0.4 h1:Pb12Nyh1vQjkadOI4wpU/gQzE5LRNKkj1IZ80xVQjt8= +github.com/TykTechnologies/storage v1.0.4/go.mod h1:+0S3KuNlLGBTMTSFREuZFm315zzXjuuCO4QSAPy+d3M= github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/agnivade/levenshtein v1.1.1 h1:QY8M92nrzkmr798gCo3kmMyqXFzdQVpxLlGPRBij0P8= github.com/agnivade/levenshtein v1.1.1/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo= From 37855036c58b9545717fddd71ae55f16fff5751c Mon Sep 17 00:00:00 2001 From: Tomas Buchaillot Date: Fri, 19 May 2023 16:11:54 +0200 Subject: [PATCH 076/102] TT-8942 Change default RPC pool size (#642) * Change default RPC pool size * add TestHybridConfigParsing test case --- pumps/hybrid.go | 9 +++++---- pumps/hybrid_test.go | 20 ++++++++++++++++++++ 2 files changed, 25 insertions(+), 4 deletions(-) diff --git a/pumps/hybrid.go b/pumps/hybrid.go index bcc211807..b78313c65 100644 --- a/pumps/hybrid.go +++ b/pumps/hybrid.go @@ -77,7 +77,7 @@ type HybridPumpConf struct { // Hybrid pump RPC calls timeout in seconds. Defaults to `10` seconds. CallTimeout int `mapstructure:"call_timeout"` - // Hybrid pump connection pool size + // Hybrid pump connection pool size. Defaults to `5`. RPCPoolSize int `mapstructure:"rpc_pool_size"` // aggregationTime is to specify the frequency of the aggregation in minutes if `aggregated` is set to `true`. aggregationTime int @@ -107,6 +107,10 @@ func (conf *HybridPumpConf) CheckDefaults() { conf.aggregationTime = 1 } } + + if conf.RPCPoolSize == 0 { + conf.RPCPoolSize = 5 + } } func (p *HybridPump) GetName() string { @@ -187,9 +191,6 @@ func (p *HybridPump) connectRPC() error { p.clientSingleton.OnConnect = p.onConnectFunc p.clientSingleton.Conns = p.hybridConfig.RPCPoolSize - if p.clientSingleton.Conns == 0 { - p.clientSingleton.Conns = 20 - } p.clientSingleton.Dial = getDialFn(connID, p.hybridConfig) diff --git a/pumps/hybrid_test.go b/pumps/hybrid_test.go index 340bccc36..bcbed2b3c 100644 --- a/pumps/hybrid_test.go +++ b/pumps/hybrid_test.go @@ -388,6 +388,7 @@ func TestHybridConfigCheckDefaults(t *testing.T) { expectedConfig: &HybridPumpConf{ CallTimeout: DefaultRPCCallTimeout, Aggregated: false, + RPCPoolSize: 5, }, }, { @@ -401,6 +402,7 @@ func TestHybridConfigCheckDefaults(t *testing.T) { Aggregated: true, StoreAnalyticsPerMinute: true, aggregationTime: 1, + RPCPoolSize: 5, }, }, @@ -415,6 +417,7 @@ func TestHybridConfigCheckDefaults(t *testing.T) { Aggregated: true, StoreAnalyticsPerMinute: false, aggregationTime: 60, + RPCPoolSize: 5, }, }, { @@ -424,6 +427,19 @@ func TestHybridConfigCheckDefaults(t *testing.T) { }, expectedConfig: &HybridPumpConf{ CallTimeout: 20, + RPCPoolSize: 5, + }, + }, + + { + testName: "custom rpc_pool_size", + givenConfig: &HybridPumpConf{ + CallTimeout: 20, + RPCPoolSize: 20, + }, + expectedConfig: &HybridPumpConf{ + CallTimeout: 20, + RPCPoolSize: 20, }, }, } @@ -464,6 +480,7 @@ func TestHybridConfigParsing(t *testing.T) { APIKey: "testapikey", Aggregated: true, aggregationTime: 60, + RPCPoolSize: 5, }, }, { @@ -483,6 +500,7 @@ func TestHybridConfigParsing(t *testing.T) { APIKey: "testapikey", Aggregated: true, aggregationTime: 60, + RPCPoolSize: 5, }, }, @@ -498,6 +516,7 @@ func TestHybridConfigParsing(t *testing.T) { "aggregated": true, "store_analytics_per_minute": true, "track_all_paths": true, + "rpc_pool_size": 20, }, expectedConfig: &HybridPumpConf{ ConnectionString: svAddress, @@ -508,6 +527,7 @@ func TestHybridConfigParsing(t *testing.T) { StoreAnalyticsPerMinute: true, aggregationTime: 1, TrackAllPaths: true, + RPCPoolSize: 20, }, }, } From 93b8aee7645a514854728798403befc290b7e557 Mon Sep 17 00:00:00 2001 From: Matias <83959431+mativm02@users.noreply.github.com> Date: Mon, 22 May 2023 12:03:50 -0300 Subject: [PATCH 077/102] Updating to storage v1.0.5 (#646) --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index f4e8a0beb..2b020eb2e 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( github.com/TykTechnologies/gorpc v0.0.0-20210624160652-fe65bda0ccb9 github.com/TykTechnologies/graphql-go-tools v1.6.2-0.20230320143102-7a16078ce517 github.com/TykTechnologies/murmur3 v0.0.0-20230310161213-aad17efd5632 - github.com/TykTechnologies/storage v1.0.4 + github.com/TykTechnologies/storage v1.0.5 github.com/aws/aws-sdk-go-v2 v1.16.14 github.com/aws/aws-sdk-go-v2/config v1.9.0 github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.9.0 diff --git a/go.sum b/go.sum index 80c715022..71fc495d9 100644 --- a/go.sum +++ b/go.sum @@ -66,6 +66,8 @@ github.com/TykTechnologies/storage v1.0.3 h1:UMEKTA9C8ir636qUS3aAfbBH/KKegH+KmC8 github.com/TykTechnologies/storage v1.0.3/go.mod h1:+0S3KuNlLGBTMTSFREuZFm315zzXjuuCO4QSAPy+d3M= github.com/TykTechnologies/storage v1.0.4 h1:Pb12Nyh1vQjkadOI4wpU/gQzE5LRNKkj1IZ80xVQjt8= github.com/TykTechnologies/storage v1.0.4/go.mod h1:+0S3KuNlLGBTMTSFREuZFm315zzXjuuCO4QSAPy+d3M= +github.com/TykTechnologies/storage v1.0.5 h1:lfMljPueySAW7Mpc70g1/qC5n2LKNcKgQs+Xw30apP8= +github.com/TykTechnologies/storage v1.0.5/go.mod h1:+0S3KuNlLGBTMTSFREuZFm315zzXjuuCO4QSAPy+d3M= github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/agnivade/levenshtein v1.1.1 h1:QY8M92nrzkmr798gCo3kmMyqXFzdQVpxLlGPRBij0P8= github.com/agnivade/levenshtein v1.1.1/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo= From 2d3c2961cbe516192df734a7ce50d578c25c7754 Mon Sep 17 00:00:00 2001 From: Kofo Okesola Date: Thu, 25 May 2023 14:24:53 +0100 Subject: [PATCH 078/102] [TT-8884]: added write data test for mongo pump and remove constraint (#645) * added write data test for mongo pump and remove constraint Added a test to cover the write data function of the mongo pump, and removed a bug where he mongo pump only recorded graph records * run fmt --- pumps/mongo.go | 7 ++- pumps/mongo_test.go | 105 +++++++++++++++++++++++++++++++++++++++----- 2 files changed, 98 insertions(+), 14 deletions(-) diff --git a/pumps/mongo.go b/pumps/mongo.go index 613d3fd44..52a49a133 100644 --- a/pumps/mongo.go +++ b/pumps/mongo.go @@ -385,7 +385,7 @@ func (m *MongoPump) WriteData(ctx context.Context, data []interface{}) error { m.log.Debug("Attempting to write ", len(data), " records...") - accumulateSet := m.AccumulateSet(data, true) + accumulateSet := m.AccumulateSet(data, false) errCh := make(chan error, len(accumulateSet)) for _, dataSet := range accumulateSet { @@ -457,7 +457,7 @@ func (m *MongoPump) AccumulateSet(data []interface{}, isForGraphRecords bool) [] // shouldProcessItem checks if the item should be processed based on its ResponseCode and if it's a graph record. // It returns the processed item and a boolean indicating if the item should be skipped. -func (m *MongoPump) shouldProcessItem(item interface{}, isForGraphRecords bool) (*analytics.AnalyticsRecord, bool) { +func (m *MongoPump) shouldProcessItem(item interface{}, isForGraphRecords bool) (records *analytics.AnalyticsRecord, shouldSKip bool) { thisItem, ok := item.(analytics.AnalyticsRecord) if !ok { m.log.Error("Couldn't convert item to analytics.AnalyticsRecord") @@ -468,10 +468,9 @@ func (m *MongoPump) shouldProcessItem(item interface{}, isForGraphRecords bool) } isGraphRecord := thisItem.IsGraphRecord() - if isGraphRecord != isForGraphRecords { + if isForGraphRecords && !isGraphRecord { return &thisItem, true } - return &thisItem, false } diff --git a/pumps/mongo_test.go b/pumps/mongo_test.go index a4b59cae6..1d1d19008 100644 --- a/pumps/mongo_test.go +++ b/pumps/mongo_test.go @@ -7,6 +7,10 @@ import ( "testing" "time" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/assert" "gopkg.in/vmihailenco/msgpack.v2" @@ -319,17 +323,10 @@ func TestMongoPump_AccumulateSet(t *testing.T) { mPump.log = log.WithField("prefix", mongoPrefix) data := recordsGenerator(numRecords) - expectedGraphRecordSkips := 0 - for _, recordData := range data { - record := recordData.(analytics.AnalyticsRecord) - if record.IsGraphRecord() { - expectedGraphRecordSkips++ - } - } // assumed from sizeBytes in AccumulateSet const dataSize = 1024 - totalData := dataSize * (numRecords - expectedGraphRecordSkips) + totalData := dataSize * (numRecords) set := mPump.AccumulateSet(data, false) @@ -357,7 +354,7 @@ func TestMongoPump_AccumulateSet(t *testing.T) { 100, )) - t.Run("should skip all graph analytics records", run( + t.Run("should include all graph analytics records", run( func(numRecords int) []interface{} { data := make([]interface{}, 0) for i := 0; i < numRecords; i++ { @@ -369,7 +366,7 @@ func TestMongoPump_AccumulateSet(t *testing.T) { } return data }, - 50, + 100, )) } @@ -565,3 +562,91 @@ func TestDefaultDriver(t *testing.T) { assert.Nil(t, err) assert.Equal(t, persistent.Mgo, newPump.dbConf.MongoDriverType) } + +func TestMongoPump_WriteData(t *testing.T) { + sampleRecord := analytics.AnalyticsRecord{ + Method: "GET", + Host: "localhost:9000", + Path: "/test", + Day: 1, + Month: 1, + Year: 2023, + ResponseCode: 200, + APIKey: "testkey", + TimeStamp: time.Date(2023, 1, 1, 0, 0, 0, 0, time.UTC), + APIName: "testapi", + APIID: "testapi", + OrgID: "testorg", + Geo: analytics.GeoData{ + City: analytics.City{ + Names: map[string]string{}, + }, + }, + Tags: []string{}, + } + + run := func(recordGenerator func(count int) []analytics.AnalyticsRecord) func(t *testing.T) { + return func(t *testing.T) { + pump := &MongoPump{} + conf := defaultConf() + pump.dbConf = &conf + pump.log = log.WithField("prefix", mongoPrefix) + + pump.connect() + + t.Cleanup(func() { + if err := pump.store.DropDatabase(context.Background()); err != nil { + pump.log.WithError(err).Warn("error dropping collection") + } + }) + + data := recordGenerator(100) + interfaceRecords := make([]interface{}, len(data)) + for i, d := range data { + interfaceRecords[i] = d + } + + err := pump.WriteData(context.Background(), interfaceRecords) + require.NoError(t, err) + + var results []analytics.AnalyticsRecord + + // Using the same collection name as the default pump config + d := dbObject{ + tableName: pump.dbConf.CollectionName, + } + err = pump.store.Query(context.Background(), d, &results, nil) + + assert.Nil(t, err) + + // ensure the length and content are the same + assert.Equal(t, len(data), len(results)) + if diff := cmp.Diff(data, results, cmpopts.IgnoreFields(analytics.AnalyticsRecord{}, "id", "APISchema")); diff != "" { + t.Error(diff) + } + } + } + + t.Run("should write all records", run(func(count int) []analytics.AnalyticsRecord { + records := make([]analytics.AnalyticsRecord, count) + for i := range records { + records[i] = sampleRecord + } + return records + })) + + t.Run("should write graph records as well", run(func(count int) []analytics.AnalyticsRecord { + records := make([]analytics.AnalyticsRecord, count) + for i := range records { + record := sampleRecord + if i%2 == 0 { + record.RawRequest = rawGQLRequest + record.RawResponse = rawGQLResponse + record.APISchema = schema + record.Tags = []string{analytics.PredefinedTagGraphAnalytics} + } + records[i] = record + } + return records + })) +} From 54d0455505f8c864e9742ce608c9428560e6a101 Mon Sep 17 00:00:00 2001 From: Matias <83959431+mativm02@users.noreply.github.com> Date: Mon, 29 May 2023 10:00:16 -0300 Subject: [PATCH 079/102] [TT-8999] Overriding pumps with the same name (#648) * overriding pumps with the same name * linting * adding more test cases * moving unit test function to the top of the file * separating overriding test --- config.go | 9 ++++++- config_test.go | 65 +++++++++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 70 insertions(+), 4 deletions(-) diff --git a/config.go b/config.go index ab347ffaf..cbeb407c5 100644 --- a/config.go +++ b/config.go @@ -251,6 +251,13 @@ func LoadConfig(filePath *string, configStruct *TykPumpConfiguration) { log.Error("Couldn't unmarshal configuration: ", marshalErr) } + toUpperMap := make(map[string]PumpConfig) + for pumpName := range configStruct.Pumps { + upperPumpName := strings.ToUpper(pumpName) + toUpperMap[upperPumpName] = configStruct.Pumps[pumpName] + } + configStruct.Pumps = toUpperMap + shouldOmit, omitEnvExist := os.LookupEnv(ENV_PREVIX + "_OMITCONFIGFILE") if configStruct.OmitConfigFile || (omitEnvExist && strings.ToLower(shouldOmit) == "true") { *configStruct = TykPumpConfiguration{} @@ -289,7 +296,7 @@ func (cfg *TykPumpConfiguration) LoadPumpsByEnv() error { } //The name of the pump is always going to be the first keyword after the PUMPS_ENV_PREFIX - pmpName := envSplit[0] + pmpName := strings.ToUpper(envSplit[0]) osPumpsEnvNames[pmpName] = true } diff --git a/config_test.go b/config_test.go index 0840019e9..931317309 100644 --- a/config_test.go +++ b/config_test.go @@ -2,13 +2,74 @@ package main import ( "os" + "strings" "testing" "github.com/stretchr/testify/assert" ) -func TestConfigEnv(t *testing.T) { +func TestToUpperPumps(t *testing.T) { + pumpNames := []string{"test1", "test2", "TEST3", "Test4", "test3"} // index 4 must override index 2 + + initialConfig := &TykPumpConfiguration{ + Pumps: map[string]PumpConfig{ + pumpNames[0]: { + Type: "mongo", + Name: "mongo-pump", + Meta: map[string]interface{}{ + "meta_env_prefix": "test", + }, + }, + pumpNames[1]: { + Type: "sql", + Name: "sql-pump", + Meta: map[string]interface{}{ + "meta_env_prefix": "test2", + }, + }, + pumpNames[2]: { + Type: "mongo", + }, + pumpNames[3]: { + Type: "sql", + }, + pumpNames[4]: { + Type: "sql", + }, + }, + } + defaultPath := "" + LoadConfig(&defaultPath, initialConfig) + assert.Equal(t, len(pumpNames)-1, len(initialConfig.Pumps)) + assert.Equal(t, initialConfig.Pumps[strings.ToUpper(pumpNames[0])].Type, "mongo") + assert.Equal(t, initialConfig.Pumps[strings.ToUpper(pumpNames[1])].Type, "sql") + assert.Equal(t, initialConfig.Pumps[strings.ToUpper(pumpNames[3])].Type, "sql") + assert.Equal(t, initialConfig.Pumps[strings.ToUpper(pumpNames[0])].Name, "mongo-pump") + assert.Equal(t, initialConfig.Pumps[strings.ToUpper(pumpNames[1])].Name, "sql-pump") + assert.Equal(t, initialConfig.Pumps[strings.ToUpper(pumpNames[0])].Meta["meta_env_prefix"], "test") + assert.Equal(t, initialConfig.Pumps[strings.ToUpper(pumpNames[1])].Meta["meta_env_prefix"], "test2") + // Check if the pumps with lower case are empty (don't appear in the map) + assert.Equal(t, initialConfig.Pumps[pumpNames[0]], PumpConfig{}) + assert.Equal(t, initialConfig.Pumps[pumpNames[1]], PumpConfig{}) + + // Checking if the index 4 overrides the index 2 (the original value was 'mongo') + assert.Equal(t, initialConfig.Pumps[strings.ToUpper(pumpNames[2])].Type, "sql") +} + +func TestLoadExampleConf(t *testing.T) { + defaultPath := "./pump.example.conf" + initialConfig := &TykPumpConfiguration{} + LoadConfig(&defaultPath, initialConfig) + assert.NotZero(t, len(initialConfig.Pumps)) + for k, pump := range initialConfig.Pumps { + assert.NotNil(t, pump) + // Checking if the key of the map is equal to the pump type but upper case + assert.Equal(t, k, strings.ToUpper(pump.Type)) + } +} + +func TestConfigEnv(t *testing.T) { pumpNameCSV := "CSV" pumpNameTest := "TEST" @@ -60,7 +121,6 @@ func TestConfigEnv(t *testing.T) { } func TestIgnoreConfig(t *testing.T) { - config := TykPumpConfiguration{ PurgeDelay: 10, } @@ -77,5 +137,4 @@ func TestIgnoreConfig(t *testing.T) { LoadConfig(&defaultPath, &config) assert.Equal(t, 30, config.PurgeDelay, "TYK_OMITCONFIGFILE should not have unset the configuation") - } From 8ebd118a0e624c37fb3955b60051a7606314bab8 Mon Sep 17 00:00:00 2001 From: Matias <83959431+mativm02@users.noreply.github.com> Date: Thu, 1 Jun 2023 09:25:28 -0300 Subject: [PATCH 080/102] [TT-8999] - Converting pump name to lower case when looking for it (#651) * converting pump name to lower case when looking for it * improving unit tests --- pumps/pump.go | 3 ++- pumps/pump_test.go | 24 ++++++++++++++---------- 2 files changed, 16 insertions(+), 11 deletions(-) diff --git a/pumps/pump.go b/pumps/pump.go index 41f783fee..58de2a6e9 100644 --- a/pumps/pump.go +++ b/pumps/pump.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "strings" "github.com/TykTechnologies/tyk-pump/analytics" "github.com/kelseyhightower/envconfig" @@ -45,7 +46,7 @@ type UptimePump interface { func GetPumpByName(name string) (Pump, error) { - if pump, ok := AvailablePumps[name]; ok && pump != nil { + if pump, ok := AvailablePumps[strings.ToLower(name)]; ok && pump != nil { return pump, nil } diff --git a/pumps/pump_test.go b/pumps/pump_test.go index c4362b344..11e4f0c65 100644 --- a/pumps/pump_test.go +++ b/pumps/pump_test.go @@ -2,21 +2,25 @@ package pumps import ( "testing" + + "github.com/stretchr/testify/assert" ) func TestGetPumpByName(t *testing.T) { - name := "dummy" - pmpType, err := GetPumpByName(name) + dummyType, err := GetPumpByName("dummy") + assert.NoError(t, err) + assert.Equal(t, dummyType, &DummyPump{}) - if err != nil || pmpType == nil { - t.Fail() - } + invalidPump, err := GetPumpByName("xyz") + assert.Error(t, err) + assert.Nil(t, invalidPump) - name2 := "xyz" - pmpType2, err2 := GetPumpByName(name2) + mongoPump, err := GetPumpByName("MONGO") + assert.NoError(t, err) + assert.Equal(t, mongoPump, &MongoPump{}) - if err2 == nil || pmpType2 != nil { - t.Fail() - } + sqlPump, err := GetPumpByName("SqL") + assert.NoError(t, err) + assert.Equal(t, sqlPump, &SQLPump{}) } From 376711f8cb5aaa02083f993380ccac2bde64003e Mon Sep 17 00:00:00 2001 From: caroltyk <97617859+caroltyk@users.noreply.github.com> Date: Mon, 12 Jun 2023 13:15:28 +0100 Subject: [PATCH 081/102] Update README.md (#655) --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index b6460c59f..1aa196d65 100644 --- a/README.md +++ b/README.md @@ -588,7 +588,7 @@ group_id - This is the “zone” that this instance inhabits, e.g. the DC it li call_timeout - This is the timeout (in milliseconds) for RPC calls. -rpc_pool_size - This is maximum number of connections to MDCB. +rpc_pool_size - This is maximum number of connections to MDCB. Default is 5. ###### Env Variables @@ -1363,8 +1363,8 @@ This can also be set at a pump level. For example: The `driver` setting defines the driver type to use for Mongo Pumps. It can be one of the following values: -- `mongo-go` (default): Uses the official MongoDB driver. This driver supports Mongo versions greater or equal to v4. You can get more information about this driver [here](https://github.com/mongodb/mongo-go-driver). -- `mgo`: Uses the mgo driver. This driver is deprecated. This driver supports Mongo versions lower or equal to v4. You can get more information about this driver [here](https://github.com/go-mgo/mgo) +- `mongo-go`: Uses the official MongoDB driver. This driver supports Mongo versions greater or equal to v4. You can get more information about this driver [here](https://github.com/mongodb/mongo-go-driver). +- `mgo` (default): Uses the mgo driver. This driver is deprecated. This driver supports Mongo versions lower or equal to v4. You can get more information about this driver [here](https://github.com/go-mgo/mgo) ```json "mongo": { From 6820131bfd3cb02ba0371900ee571d7296c1cfb4 Mon Sep 17 00:00:00 2001 From: Matias <83959431+mativm02@users.noreply.github.com> Date: Mon, 12 Jun 2023 09:30:48 -0300 Subject: [PATCH 082/102] [TT-9126] Fix error log when omit_configfile option is enabled (#654) * loading config file only if we shouldn't omit it * fixing flaky test * linting * removing cfg.OmitConfigFile check --- config.go | 29 +++++++++-------- config_test.go | 87 ++++++++++++++++++++++---------------------------- 2 files changed, 54 insertions(+), 62 deletions(-) diff --git a/config.go b/config.go index cbeb407c5..492879fdf 100644 --- a/config.go +++ b/config.go @@ -240,15 +240,16 @@ type TykPumpConfiguration struct { } func LoadConfig(filePath *string, configStruct *TykPumpConfiguration) { + if !configStruct.shouldOmitConfigFile() { + configuration, err := ioutil.ReadFile(*filePath) + if err != nil { + log.Error("Couldn't load configuration file: ", err) + } - configuration, err := ioutil.ReadFile(*filePath) - if err != nil { - log.Error("Couldn't load configuration file: ", err) - } - - marshalErr := json.Unmarshal(configuration, &configStruct) - if marshalErr != nil { - log.Error("Couldn't unmarshal configuration: ", marshalErr) + marshalErr := json.Unmarshal(configuration, &configStruct) + if marshalErr != nil { + log.Error("Couldn't unmarshal configuration: ", marshalErr) + } } toUpperMap := make(map[string]PumpConfig) @@ -258,11 +259,6 @@ func LoadConfig(filePath *string, configStruct *TykPumpConfiguration) { } configStruct.Pumps = toUpperMap - shouldOmit, omitEnvExist := os.LookupEnv(ENV_PREVIX + "_OMITCONFIGFILE") - if configStruct.OmitConfigFile || (omitEnvExist && strings.ToLower(shouldOmit) == "true") { - *configStruct = TykPumpConfiguration{} - } - overrideErr := envconfig.Process(ENV_PREVIX, configStruct) if overrideErr != nil { log.Error("Failed to process environment variables after file load: ", overrideErr) @@ -270,10 +266,15 @@ func LoadConfig(filePath *string, configStruct *TykPumpConfiguration) { errLoadEnvPumps := configStruct.LoadPumpsByEnv() if errLoadEnvPumps != nil { - log.Fatal("error loading pumps env vars:", err) + log.Fatal("error loading pumps env vars:", errLoadEnvPumps) } } +func (cfg *TykPumpConfiguration) shouldOmitConfigFile() bool { + shouldOmit, omitEnvExist := os.LookupEnv(ENV_PREVIX + "_OMITCONFIGFILE") + return omitEnvExist && strings.EqualFold(shouldOmit, "true") +} + func (cfg *TykPumpConfiguration) LoadPumpsByEnv() error { if len(cfg.Pumps) == 0 { cfg.Pumps = make(map[string]PumpConfig) diff --git a/config_test.go b/config_test.go index 931317309..afba56ec4 100644 --- a/config_test.go +++ b/config_test.go @@ -10,50 +10,27 @@ import ( func TestToUpperPumps(t *testing.T) { pumpNames := []string{"test1", "test2", "TEST3", "Test4", "test3"} // index 4 must override index 2 - initialConfig := &TykPumpConfiguration{ - Pumps: map[string]PumpConfig{ - pumpNames[0]: { - Type: "mongo", - Name: "mongo-pump", - Meta: map[string]interface{}{ - "meta_env_prefix": "test", - }, - }, - pumpNames[1]: { - Type: "sql", - Name: "sql-pump", - Meta: map[string]interface{}{ - "meta_env_prefix": "test2", - }, - }, - pumpNames[2]: { - Type: "mongo", - }, - pumpNames[3]: { - Type: "sql", - }, - pumpNames[4]: { - Type: "sql", - }, - }, + Pumps: make(map[string]PumpConfig), } + initialConfig.Pumps[pumpNames[0]] = PumpConfig{Type: "mongo"} + initialConfig.Pumps[pumpNames[1]] = PumpConfig{Type: "sql"} + initialConfig.Pumps[pumpNames[2]] = PumpConfig{Type: "mongo-aggregate"} + initialConfig.Pumps[pumpNames[3]] = PumpConfig{Type: "csv"} + initialConfig.Pumps[pumpNames[4]] = PumpConfig{Type: "sql-aggregate"} + defaultPath := "" LoadConfig(&defaultPath, initialConfig) assert.Equal(t, len(pumpNames)-1, len(initialConfig.Pumps)) assert.Equal(t, initialConfig.Pumps[strings.ToUpper(pumpNames[0])].Type, "mongo") assert.Equal(t, initialConfig.Pumps[strings.ToUpper(pumpNames[1])].Type, "sql") - assert.Equal(t, initialConfig.Pumps[strings.ToUpper(pumpNames[3])].Type, "sql") - assert.Equal(t, initialConfig.Pumps[strings.ToUpper(pumpNames[0])].Name, "mongo-pump") - assert.Equal(t, initialConfig.Pumps[strings.ToUpper(pumpNames[1])].Name, "sql-pump") - assert.Equal(t, initialConfig.Pumps[strings.ToUpper(pumpNames[0])].Meta["meta_env_prefix"], "test") - assert.Equal(t, initialConfig.Pumps[strings.ToUpper(pumpNames[1])].Meta["meta_env_prefix"], "test2") + assert.Equal(t, initialConfig.Pumps[strings.ToUpper(pumpNames[3])].Type, "csv") // Check if the pumps with lower case are empty (don't appear in the map) assert.Equal(t, initialConfig.Pumps[pumpNames[0]], PumpConfig{}) assert.Equal(t, initialConfig.Pumps[pumpNames[1]], PumpConfig{}) // Checking if the index 4 overrides the index 2 (the original value was 'mongo') - assert.Equal(t, initialConfig.Pumps[strings.ToUpper(pumpNames[2])].Type, "sql") + assert.Equal(t, initialConfig.Pumps[strings.ToUpper(pumpNames[2])].Type, "sql-aggregate") } func TestLoadExampleConf(t *testing.T) { @@ -121,20 +98,34 @@ func TestConfigEnv(t *testing.T) { } func TestIgnoreConfig(t *testing.T) { - config := TykPumpConfiguration{ - PurgeDelay: 10, - } - os.Setenv(ENV_PREVIX+"_OMITCONFIGFILE", "true") - defaultPath := "" - LoadConfig(&defaultPath, &config) - - assert.Equal(t, 0, config.PurgeDelay, "TYK_OMITCONFIGFILE should have unset the configuation") - - os.Unsetenv(ENV_PREVIX + "_OMITCONFIGFILE") - - config = TykPumpConfiguration{} - config.PurgeDelay = 30 - LoadConfig(&defaultPath, &config) - - assert.Equal(t, 30, config.PurgeDelay, "TYK_OMITCONFIGFILE should not have unset the configuation") + defaultPath := "pump.example.conf" + + t.Run("Ignoring the config file", func(t *testing.T) { + initialConfig := TykPumpConfiguration{PurgeDelay: 5} + os.Setenv(ENV_PREVIX+"_OMITCONFIGFILE", "true") + defer os.Unsetenv(ENV_PREVIX + "_OMITCONFIGFILE") + LoadConfig(&defaultPath, &initialConfig) + assert.Equal(t, 5, initialConfig.PurgeDelay, "TYK_OMITCONFIGFILE set to true shouldn't have unset the configuration") + }) + + t.Run("Not ignoring the config file", func(t *testing.T) { + initialConfig := TykPumpConfiguration{PurgeDelay: 5} + os.Setenv(ENV_PREVIX+"_OMITCONFIGFILE", "false") + defer os.Unsetenv(ENV_PREVIX + "_OMITCONFIGFILE") + LoadConfig(&defaultPath, &initialConfig) + assert.Equal(t, 10, initialConfig.PurgeDelay, "TYK_OMITCONFIGFILE set to false should overwrite the configuration") + }) + + t.Run("Environment variable not set", func(t *testing.T) { + initialConfig := TykPumpConfiguration{PurgeDelay: 5} + LoadConfig(&defaultPath, &initialConfig) + assert.Equal(t, 10, initialConfig.PurgeDelay, "TYK_OMITCONFIGFILE not set should overwrite the configuration") + }) + + t.Run("Config file does not exist", func(t *testing.T) { + initialConfig := TykPumpConfiguration{PurgeDelay: 5} + nonexistentPath := "nonexistent_config.json" + LoadConfig(&nonexistentPath, &initialConfig) + assert.Equal(t, 5, initialConfig.PurgeDelay, "Nonexistent config file should not affect the configuration") + }) } From 0fc371308139b1cc004d21c74beb35d89b170e46 Mon Sep 17 00:00:00 2001 From: Matias <83959431+mativm02@users.noreply.github.com> Date: Thu, 15 Jun 2023 10:36:02 -0300 Subject: [PATCH 083/102] [TT-8846] Updating to Go 1.19 (#652) * Updating to Go 1.19 * remove formatting golangci.yaml file * reverting unnecessary changes * Fix test names, amend syntax changes * Add historical branches * changing linter go's version --------- Co-authored-by: Esteban Ricardo Mirizio --- .github/workflows/ci-test.yml | 2 +- .github/workflows/linter.yaml | 4 +- .github/workflows/release.yml | 6 +- .golangci.yaml | 4 +- go.mod | 116 ++++++++++++++++++-- go.sum | 20 ---- repo-policy/historical-branches.auto.tfvars | 10 ++ repo-policy/main.tf | 30 +++-- 8 files changed, 143 insertions(+), 49 deletions(-) create mode 100644 repo-policy/historical-branches.auto.tfvars diff --git a/.github/workflows/ci-test.yml b/.github/workflows/ci-test.yml index 3094c3635..697d5f41c 100644 --- a/.github/workflows/ci-test.yml +++ b/.github/workflows/ci-test.yml @@ -21,7 +21,7 @@ jobs: matrix: redis-version: [5] mongodb-version: [4.2] - go: [1.16] + go: [1.19] steps: - name: Checkout Tyk Pump diff --git a/.github/workflows/linter.yaml b/.github/workflows/linter.yaml index 0f47aab4a..633effb19 100644 --- a/.github/workflows/linter.yaml +++ b/.github/workflows/linter.yaml @@ -9,7 +9,7 @@ on: - main pull_request: workflow_dispatch: - + env: TYK_PMP_ANALYTICSSTORAGETYPE: redis TYK_PMP_ANALYTICSSTORAGECONFIG_TYPE: redis @@ -22,7 +22,7 @@ jobs: strategy: fail-fast: false matrix: - go: [1.16, 1.17] + go: [1.18, 1.19] with: go: ${{ matrix.go }} redis: 5 diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 9c87fdf67..4856cbd62 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -35,9 +35,9 @@ jobs: strategy: fail-fast: false matrix: - golang_cross: [ 1.16 ] + golang_cross: [1.19-bullseye] include: - - golang_cross: 1.16 + - golang_cross: 1.19-bullseye goreleaser: 'ci/goreleaser/goreleaser.yml' rpmvers: ' el/7 el/8 el/9' debvers: 'ubuntu/xenial ubuntu/bionic ubuntu/focal ubuntu/jammy debian/jessie debian/buster debian/bullseye' @@ -387,5 +387,3 @@ jobs: export VERSION=${{ needs.goreleaser.outputs.tag }} packer validate -var-file=${{ matrix.flavour }}.vars.json byol.pkr.hcl packer build -var-file=${{ matrix.flavour }}.vars.json byol.pkr.hcl - - diff --git a/.golangci.yaml b/.golangci.yaml index 95b3b33cc..89978c797 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -65,7 +65,7 @@ linters-settings: extra-rules: true # Select the Go version to target. - lang-version: "1.17" + lang-version: "1.19" goimports: auto-fix: false @@ -102,7 +102,7 @@ linters-settings: # https://staticcheck.io/docs/options#checks checks: ["all"] # Select the Go version to target. - go: "1.17" + go: "1.19" whitespace: auto-fix: true diff --git a/go.mod b/go.mod index 2b020eb2e..a3a178518 100644 --- a/go.mod +++ b/go.mod @@ -1,10 +1,9 @@ module github.com/TykTechnologies/tyk-pump -go 1.16 +go 1.19 require ( github.com/DataDog/datadog-go v4.7.0+incompatible - github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d // indirect github.com/TykTechnologies/gorpc v0.0.0-20210624160652-fe65bda0ccb9 github.com/TykTechnologies/graphql-go-tools v1.6.2-0.20230320143102-7a16078ce517 github.com/TykTechnologies/murmur3 v0.0.0-20230310161213-aad17efd5632 @@ -12,12 +11,9 @@ require ( github.com/aws/aws-sdk-go-v2 v1.16.14 github.com/aws/aws-sdk-go-v2/config v1.9.0 github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.9.0 - github.com/beeker1121/goque v0.0.0-20170321141813-4044bc29b280 // indirect - github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 // indirect github.com/buger/jsonparser v1.1.1 github.com/cenkalti/backoff/v4 v4.0.2 github.com/fatih/structs v1.1.0 - github.com/go-ole/go-ole v1.2.4 // indirect github.com/go-redis/redis/v8 v8.3.1 github.com/gocraft/health v0.0.0-20170925182251-8675af27fef0 github.com/gofrs/uuid v3.3.0+incompatible @@ -26,13 +22,10 @@ require ( github.com/gorilla/mux v1.8.0 github.com/influxdata/influxdb v1.8.3 github.com/influxdata/influxdb-client-go/v2 v2.6.0 - github.com/jehiah/go-strftime v0.0.0-20151206194810-2efbe75097a5 // indirect github.com/kelseyhightower/envconfig v1.4.0 - github.com/lintianzhi/graylogd v0.0.0-20180503131252-dc68342f04dc // indirect github.com/logzio/logzio-go v0.0.0-20200316143903-ac8fc0e2910e github.com/mitchellh/mapstructure v1.3.1 github.com/moesif/moesifapi-go v1.0.6 - github.com/olivere/elastic v6.2.31+incompatible // indirect github.com/olivere/elastic/v7 v7.0.28 github.com/oschwald/maxminddb-golang v1.5.0 github.com/pkg/errors v0.9.1 @@ -41,13 +34,9 @@ require ( github.com/resurfaceio/logger-go/v3 v3.2.1 github.com/robertkowalski/graylog-golang v0.0.0-20151121031040-e5295cfa2827 github.com/segmentio/analytics-go v0.0.0-20160711225931-bdb0aeca8a99 - github.com/segmentio/backo-go v0.0.0-20160424052352-204274ad699c // indirect github.com/segmentio/kafka-go v0.3.6 - github.com/shirou/gopsutil v3.20.11+incompatible // indirect github.com/sirupsen/logrus v1.8.1 github.com/stretchr/testify v1.8.1 - github.com/syndtr/goleveldb v0.0.0-20190318030020-c3a204f8e965 // indirect - github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c // indirect golang.org/x/net v0.0.0-20220722155237-a158d28d115b google.golang.org/protobuf v1.28.1 gopkg.in/alecthomas/kingpin.v2 v2.2.6 @@ -61,5 +50,108 @@ require ( gorm.io/gorm v1.21.10 ) +require ( + github.com/Masterminds/goutils v1.1.1 // indirect + github.com/Masterminds/semver v1.5.0 // indirect + github.com/Masterminds/sprig v2.22.0+incompatible // indirect + github.com/Microsoft/go-winio v0.5.2 // indirect + github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d // indirect + github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 // indirect + github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d // indirect + github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.5.0 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.7.0 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.2 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.0.2 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.2.5 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.3.3 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.4.0 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.5.0 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.8.0 // indirect + github.com/aws/smithy-go v1.13.2 // indirect + github.com/beeker1121/goque v0.0.0-20170321141813-4044bc29b280 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 // indirect + github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/deepmap/oapi-codegen v1.8.2 // indirect + github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect + github.com/eclipse/paho.mqtt.golang v1.2.0 // indirect + github.com/go-ole/go-ole v1.2.4 // indirect + github.com/go-sql-driver/mysql v1.5.0 // indirect + github.com/golang/snappy v0.0.3 // indirect + github.com/google/uuid v1.1.2 // indirect + github.com/hashicorp/golang-lru v0.5.4 // indirect + github.com/helloeave/json v1.15.3 // indirect + github.com/huandu/xstrings v1.2.1 // indirect + github.com/imdario/mergo v0.3.8 // indirect + github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 // indirect + github.com/jackc/chunkreader/v2 v2.0.1 // indirect + github.com/jackc/pgconn v1.7.0 // indirect + github.com/jackc/pgio v1.0.0 // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgproto3/v2 v2.0.5 // indirect + github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b // indirect + github.com/jackc/pgtype v1.5.0 // indirect + github.com/jackc/pgx/v4 v4.9.0 // indirect + github.com/jehiah/go-strftime v0.0.0-20151206194810-2efbe75097a5 // indirect + github.com/jensneuse/abstractlogger v0.0.4 // indirect + github.com/jensneuse/byte-template v0.0.0-20200214152254-4f3cf06e5c68 // indirect + github.com/jensneuse/pipeline v0.0.0-20200117120358-9fb4de085cd6 // indirect + github.com/jinzhu/inflection v1.0.0 // indirect + github.com/jinzhu/now v1.1.2 // indirect + github.com/joho/godotenv v1.4.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/klauspost/compress v1.13.6 // indirect + github.com/lintianzhi/graylogd v0.0.0-20180503131252-dc68342f04dc // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/mattn/go-sqlite3 v1.14.3 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect + github.com/mitchellh/copystructure v1.0.0 // indirect + github.com/mitchellh/reflectwalk v1.0.0 // indirect + github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe // indirect + github.com/nats-io/nats.go v1.11.1-0.20210623165838-4b75fc59ae30 // indirect + github.com/nats-io/nkeys v0.3.0 // indirect + github.com/nats-io/nuid v1.0.1 // indirect + github.com/olivere/elastic v6.2.31+incompatible // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/prometheus/client_model v0.2.0 // indirect + github.com/prometheus/common v0.26.0 // indirect + github.com/prometheus/procfs v0.6.0 // indirect + github.com/qri-io/jsonpointer v0.1.1 // indirect + github.com/qri-io/jsonschema v0.2.1 // indirect + github.com/r3labs/sse/v2 v2.8.1 // indirect + github.com/segmentio/backo-go v0.0.0-20160424052352-204274ad699c // indirect + github.com/shirou/gopsutil v3.20.11+incompatible // indirect + github.com/syndtr/goleveldb v0.0.0-20190318030020-c3a204f8e965 // indirect + github.com/tidwall/gjson v1.11.0 // indirect + github.com/tidwall/match v1.1.1 // indirect + github.com/tidwall/pretty v1.2.0 // indirect + github.com/tidwall/sjson v1.0.4 // indirect + github.com/xdg-go/pbkdf2 v1.0.0 // indirect + github.com/xdg-go/scram v1.1.1 // indirect + github.com/xdg-go/stringprep v1.0.3 // indirect + github.com/xdg/scram v1.0.3 // indirect + github.com/xdg/stringprep v1.0.3 // indirect + github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c // indirect + github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d // indirect + go.mongodb.org/mongo-driver v1.11.2 // indirect + go.opentelemetry.io/otel v0.13.0 // indirect + go.uber.org/atomic v1.9.0 // indirect + go.uber.org/multierr v1.6.0 // indirect + go.uber.org/zap v1.18.1 // indirect + golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d // indirect + golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 // indirect + golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab // indirect + golang.org/x/text v0.3.7 // indirect + golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect + google.golang.org/appengine v1.6.5 // indirect + gopkg.in/cenkalti/backoff.v1 v1.1.0 // indirect + gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + nhooyr.io/websocket v1.8.7 // indirect +) + //replace gorm.io/gorm => ../gorm replace gorm.io/gorm => github.com/TykTechnologies/gorm v1.20.7-0.20210910090358-06148e82dc85 diff --git a/go.sum b/go.sum index 71fc495d9..d34a3920b 100644 --- a/go.sum +++ b/go.sum @@ -50,22 +50,6 @@ github.com/TykTechnologies/graphql-go-tools v1.6.2-0.20230320143102-7a16078ce517 github.com/TykTechnologies/graphql-go-tools v1.6.2-0.20230320143102-7a16078ce517/go.mod h1:ZiFZcrue3+n2mHH+KLHRipbYVULkgy3Myko5S7IIs74= github.com/TykTechnologies/murmur3 v0.0.0-20230310161213-aad17efd5632 h1:T5NWziFusj8au5nxAqMMh/bZyX9CAyYnBkaMSsfH6BA= github.com/TykTechnologies/murmur3 v0.0.0-20230310161213-aad17efd5632/go.mod h1:UsPYgOFBpNzDXLEti7MKOwHLpVSqdzuNGkVFPspQmnQ= -github.com/TykTechnologies/storage v0.0.0-20230330163006-a30b9b9f5c67 h1:KCdNrHczqffUg0Yg4ueN77zEyZJ9h86gF4zCNqZJy2Q= -github.com/TykTechnologies/storage v0.0.0-20230330163006-a30b9b9f5c67/go.mod h1:+0S3KuNlLGBTMTSFREuZFm315zzXjuuCO4QSAPy+d3M= -github.com/TykTechnologies/storage v0.0.0-20230405205718-1a2c0ac56c2a h1:b09GAESqHDrUcLyNPWT8K5DZuclc2x8wv3/MK5LGoXQ= -github.com/TykTechnologies/storage v0.0.0-20230405205718-1a2c0ac56c2a/go.mod h1:+0S3KuNlLGBTMTSFREuZFm315zzXjuuCO4QSAPy+d3M= -github.com/TykTechnologies/storage v0.0.0-20230410132731-c13ef37ecbd9 h1:MN+4v/nbMzqc9IyxpjG/e4yBwhJfBNRkEOB+L8u481w= -github.com/TykTechnologies/storage v0.0.0-20230410132731-c13ef37ecbd9/go.mod h1:+0S3KuNlLGBTMTSFREuZFm315zzXjuuCO4QSAPy+d3M= -github.com/TykTechnologies/storage v0.0.0-20230410152719-1e659ae95643 h1:vFml52JVqB1yOMUyq10o5JytEfC93KattU/xTfzxAlM= -github.com/TykTechnologies/storage v0.0.0-20230410152719-1e659ae95643/go.mod h1:+0S3KuNlLGBTMTSFREuZFm315zzXjuuCO4QSAPy+d3M= -github.com/TykTechnologies/storage v1.0.1 h1:YI85mHMofwIrF0QgrRYqKKd2xuPO/lxGe+SR4w2kKkg= -github.com/TykTechnologies/storage v1.0.1/go.mod h1:+0S3KuNlLGBTMTSFREuZFm315zzXjuuCO4QSAPy+d3M= -github.com/TykTechnologies/storage v1.0.2 h1:bWaLbpDmsjxT/8QVl9Fpuz1w1orqa/COvs1Gih+fvYE= -github.com/TykTechnologies/storage v1.0.2/go.mod h1:+0S3KuNlLGBTMTSFREuZFm315zzXjuuCO4QSAPy+d3M= -github.com/TykTechnologies/storage v1.0.3 h1:UMEKTA9C8ir636qUS3aAfbBH/KKegH+KmC81LYLnSUg= -github.com/TykTechnologies/storage v1.0.3/go.mod h1:+0S3KuNlLGBTMTSFREuZFm315zzXjuuCO4QSAPy+d3M= -github.com/TykTechnologies/storage v1.0.4 h1:Pb12Nyh1vQjkadOI4wpU/gQzE5LRNKkj1IZ80xVQjt8= -github.com/TykTechnologies/storage v1.0.4/go.mod h1:+0S3KuNlLGBTMTSFREuZFm315zzXjuuCO4QSAPy+d3M= github.com/TykTechnologies/storage v1.0.5 h1:lfMljPueySAW7Mpc70g1/qC5n2LKNcKgQs+Xw30apP8= github.com/TykTechnologies/storage v1.0.5/go.mod h1:+0S3KuNlLGBTMTSFREuZFm315zzXjuuCO4QSAPy+d3M= github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= @@ -135,12 +119,10 @@ github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx2 github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= -github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.0.2 h1:JIufpQLbh4DkbQoii76ItQIUFzevQSqOLZca4eamEDs= github.com/cenkalti/backoff/v4 v4.0.2/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8Bog/87DQnVg= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= @@ -358,7 +340,6 @@ github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19y github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE= github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0= github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po= -github.com/jackc/chunkreader v1.0.0 h1:4s39bBR8ByfqH+DKm8rQA3E1LHZWB9XWcrz8fqaZbe0= github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= @@ -377,7 +358,6 @@ github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2 h1:JVX6jT/XfzNqIjye47 github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= -github.com/jackc/pgproto3 v1.1.0 h1:FYYE4yRw+AgI8wXIinMlNjBbp/UitDJwfj5LqqewP1A= github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= diff --git a/repo-policy/historical-branches.auto.tfvars b/repo-policy/historical-branches.auto.tfvars new file mode 100644 index 000000000..786bc9d56 --- /dev/null +++ b/repo-policy/historical-branches.auto.tfvars @@ -0,0 +1,10 @@ +# This file contains the branches that are no longer active with respect to releng +# Branches here are required for the gpac bundle to work but it is not necessary to clutter the gromit +# config file or main.tf with these. +historical_branches = [ +{ branch = "release-1.7", + reviewers = "0", + convos = "false", + source_branch = "master", + required_tests = ["1.15","Go 1.16 tests"]} +] diff --git a/repo-policy/main.tf b/repo-policy/main.tf index 5d22e9127..25b8fd37b 100644 --- a/repo-policy/main.tf +++ b/repo-policy/main.tf @@ -12,7 +12,6 @@ terraform { required_providers { github = { source = "integrations/github" - version = "5.16.0" } } } @@ -21,6 +20,20 @@ provider "github" { owner = "TykTechnologies" } +# Copypasta from modules/github-repos/variables.tf +# FIXME: Unmodularise the github-repos module +variable "historical_branches" { + type = list(object({ + branch = string # Name of the branch + source_branch = optional(string) # Source of the branch, needed when creating it + reviewers = number # Min number of reviews needed + required_tests = list(string) # Workflows that need to pass before merging + convos = bool # Should conversations be resolved before merging + + })) + description = "List of branches managed by terraform" +} + module "tyk-pump" { source = "./modules/github-repos" repo = "tyk-pump" @@ -32,14 +45,15 @@ module "tyk-pump" { vulnerability_alerts = true squash_merge_commit_message = "COMMIT_MESSAGES" squash_merge_commit_title = "COMMIT_OR_PR_TITLE" - release_branches = [ + release_branches = concat(var.historical_branches, [ { branch = "master", - reviewers = "2", + reviewers = "1", convos = "false", - required_tests = ["1.16","Go 1.16 tests"]}, -{ branch = "release-1.7", + required_tests = ["1.19-bullseye","Go 1.19 tests"]}, +{ branch = "release-1.8", reviewers = "0", convos = "false", - required_tests = ["1.15","Go 1.16 tests"]}, -] -} \ No newline at end of file + source_branch = "master", + required_tests = ["1.16","Go 1.16 tests"]}, +]) +} From 6571f356191d64725857c34a3daf486256c15ba7 Mon Sep 17 00:00:00 2001 From: Esteban Ricardo Mirizio Date: Thu, 15 Jun 2023 16:35:12 -0300 Subject: [PATCH 084/102] add (#665) --- repo-policy/main.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/repo-policy/main.tf b/repo-policy/main.tf index 25b8fd37b..737861a4b 100644 --- a/repo-policy/main.tf +++ b/repo-policy/main.tf @@ -54,6 +54,6 @@ module "tyk-pump" { reviewers = "0", convos = "false", source_branch = "master", - required_tests = ["1.16","Go 1.16 tests"]}, + required_tests = ["1.19-bullseye","Go 1.19 tests"]}, ]) } From 254bc0a90be4928c5133bda489f6c2a91e3299fb Mon Sep 17 00:00:00 2001 From: Matias <83959431+mativm02@users.noreply.github.com> Date: Fri, 16 Jun 2023 12:09:51 -0300 Subject: [PATCH 085/102] Fixing TestToUpperPumps flaky test (#658) * overriding pump type through env vars * removing unused pump name --- config_test.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/config_test.go b/config_test.go index afba56ec4..a4b1b0562 100644 --- a/config_test.go +++ b/config_test.go @@ -9,7 +9,7 @@ import ( ) func TestToUpperPumps(t *testing.T) { - pumpNames := []string{"test1", "test2", "TEST3", "Test4", "test3"} // index 4 must override index 2 + pumpNames := []string{"test1", "test2", "tEst3", "Test4"} initialConfig := &TykPumpConfiguration{ Pumps: make(map[string]PumpConfig), } @@ -17,17 +17,19 @@ func TestToUpperPumps(t *testing.T) { initialConfig.Pumps[pumpNames[1]] = PumpConfig{Type: "sql"} initialConfig.Pumps[pumpNames[2]] = PumpConfig{Type: "mongo-aggregate"} initialConfig.Pumps[pumpNames[3]] = PumpConfig{Type: "csv"} - initialConfig.Pumps[pumpNames[4]] = PumpConfig{Type: "sql-aggregate"} + os.Setenv(ENV_PREVIX+"_PUMPS_TEST3_TYPE", "sql-aggregate") + defer os.Unsetenv(ENV_PREVIX + "_PUMPS_TEST3_TYPE") defaultPath := "" LoadConfig(&defaultPath, initialConfig) - assert.Equal(t, len(pumpNames)-1, len(initialConfig.Pumps)) + assert.Equal(t, len(pumpNames), len(initialConfig.Pumps)) assert.Equal(t, initialConfig.Pumps[strings.ToUpper(pumpNames[0])].Type, "mongo") assert.Equal(t, initialConfig.Pumps[strings.ToUpper(pumpNames[1])].Type, "sql") assert.Equal(t, initialConfig.Pumps[strings.ToUpper(pumpNames[3])].Type, "csv") // Check if the pumps with lower case are empty (don't appear in the map) assert.Equal(t, initialConfig.Pumps[pumpNames[0]], PumpConfig{}) assert.Equal(t, initialConfig.Pumps[pumpNames[1]], PumpConfig{}) + assert.Equal(t, initialConfig.Pumps[pumpNames[3]], PumpConfig{}) // Checking if the index 4 overrides the index 2 (the original value was 'mongo') assert.Equal(t, initialConfig.Pumps[strings.ToUpper(pumpNames[2])].Type, "sql-aggregate") From 4b2b078206ba763578dfd3cb7616ad73e90f9ed6 Mon Sep 17 00:00:00 2001 From: Matias <83959431+mativm02@users.noreply.github.com> Date: Fri, 16 Jun 2023 12:19:05 -0300 Subject: [PATCH 086/102] Updating struct comments (#666) * Updating comments * adding comments to 'name' and 'type' --- config.go | 14 +++++++++----- pumps/mongo.go | 6 +++--- 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/config.go b/config.go index 492879fdf..c669b2e37 100644 --- a/config.go +++ b/config.go @@ -19,9 +19,14 @@ const PUMPS_ENV_PREFIX = pumps.PUMPS_ENV_PREFIX const PUMPS_ENV_META_PREFIX = pumps.PUMPS_ENV_META_PREFIX type PumpConfig struct { - // Deprecated. + // The name of the pump. This is used to identify the pump in the logs. + // Deprecated, use `type` instead. Name string `json:"name"` // Sets the pump type. This is needed when the pump key does not equal to the pump name type. + // Current valid types are: `mongo`, `mongo-pump-selective`, `mongo-pump-aggregate`, `csv`, + // `elasticsearch`, `influx`, `influx2`, `moesif`, `statsd`, `segment`, `graylog`, `splunk`, `hybrid`, `prometheus`, + // `logzio`, `dogstatsd`, `kafka`, `syslog`, `sql`, `sql_aggregate`, `stdout`, `timestream`, `mongo-graph`, + // `sql-graph`, `sql-graph-aggregate`, `resurfaceio`. Type string `json:"type"` // This feature adds a new configuration field in each pump called filters and its structure is // the following: @@ -90,18 +95,17 @@ type PumpConfig struct { // } // ``` MaxRecordSize int `json:"max_record_size"` - Meta map[string]interface{} `json:"meta"` // TODO: convert this to json.RawMessage and use regular json.Unmarshal + Meta map[string]interface{} `json:"meta"` // IgnoreFields defines a list of analytics fields that will be ignored when writing to the pump. // This can be used to avoid writing sensitive information to the Database, or data that you don't really need to have. // The field names must be the same as the JSON tags of the analytics record fields. // For example: `["api_key", "api_version"]`. IgnoreFields []string `json:"ignore_fields"` - // Setting this to True allows the Raw Request to be decoded from base 64 - // for all pumps. This is set to false by default. + // Setting this to true allows the Raw Request to be decoded from base 64 for all pumps. This is set to false by default. DecodeRawRequest bool `json:"raw_request_decoded"` - // Setting this to True allows the Raw Response to be decoded from base 64 for all pumps. This is set to False by default. + // Setting this to true allows the Raw Response to be decoded from base 64 for all pumps. This is set to false by default. DecodeRawResponse bool `json:"raw_response_decoded"` } diff --git a/pumps/mongo.go b/pumps/mongo.go index 52a49a133..0f8df5976 100644 --- a/pumps/mongo.go +++ b/pumps/mongo.go @@ -72,7 +72,7 @@ type BaseMongoConf struct { // Path to the PEM file which contains both client certificate and private key. This is // required for Mutual TLS. MongoSSLPEMKeyfile string `json:"mongo_ssl_pem_keyfile" mapstructure:"mongo_ssl_pem_keyfile"` - // Specifies the mongo DB Type. If it's 0, it means that you are using standard mongo db, if it's 1 it means you are using AWS Document DB, if it's 2, it means you are using CosmosDB. + // Specifies the mongo DB Type. If it's 0, it means that you are using standard mongo db. If it's 1 it means you are using AWS Document DB. If it's 2, it means you are using CosmosDB. // Defaults to Standard mongo (0). MongoDBType MongoType `json:"mongo_db_type" mapstructure:"mongo_db_type"` // Set to true to disable the default tyk index creation. @@ -80,7 +80,7 @@ type BaseMongoConf struct { // Set the consistency mode for the session, it defaults to `Strong`. The valid values are: strong, monotonic, eventual. MongoSessionConsistency string `json:"mongo_session_consistency" mapstructure:"mongo_session_consistency"` // MongoDriverType is the type of the driver (library) to use. The valid values are: “mongo-go” and “mgo”. - // Default to “mongo-go”. Check out this guide to [learn about different MongoDB drivers Tyk Pump support](https://github.com/TykTechnologies/tyk-pump#driver-type). + // Default to “mgo”. Check out this guide to [learn about MongoDB drivers supported by Tyk Pump](https://github.com/TykTechnologies/tyk-pump#driver-type). MongoDriverType string `json:"driver" mapstructure:"driver"` // MongoDirectConnection informs whether to establish connections only with the specified seed servers, // or to obtain information for the whole cluster and establish connections with further servers too. @@ -128,7 +128,7 @@ type MongoConf struct { // Specifies the mongo collection name. CollectionName string `json:"collection_name" mapstructure:"collection_name"` - // Maximum insert batch size for mongo selective pump. If the batch we are writing surpass this value, it will be send in multiple batchs. + // Maximum insert batch size for mongo selective pump. If the batch we are writing surpasses this value, it will be sent in multiple batches. // Defaults to 10Mb. MaxInsertBatchSizeBytes int `json:"max_insert_batch_size_bytes" mapstructure:"max_insert_batch_size_bytes"` // Maximum document size. If the document exceed this value, it will be skipped. From 332f7b61b69527bb3a991de63d796dbdca8cce7e Mon Sep 17 00:00:00 2001 From: Matias <83959431+mativm02@users.noreply.github.com> Date: Mon, 19 Jun 2023 11:07:44 -0300 Subject: [PATCH 087/102] Adding comment to 'meta' field (#668) * Updating comments * adding comments to 'name' and 'type' * adding comment to Meta field * changing IgnoreFields position * fixing False syntax error * fixing True syntax error --- config.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/config.go b/config.go index c669b2e37..0c9331824 100644 --- a/config.go +++ b/config.go @@ -94,17 +94,17 @@ type PumpConfig struct { // } // } // ``` - MaxRecordSize int `json:"max_record_size"` - Meta map[string]interface{} `json:"meta"` + MaxRecordSize int `json:"max_record_size"` // IgnoreFields defines a list of analytics fields that will be ignored when writing to the pump. // This can be used to avoid writing sensitive information to the Database, or data that you don't really need to have. // The field names must be the same as the JSON tags of the analytics record fields. // For example: `["api_key", "api_version"]`. IgnoreFields []string `json:"ignore_fields"` - + // Meta is a map of configuration values that are specific to each pump. For example, the + // `csv` pump requires a `csv_dir` value to be set, that need to be set in the `meta` map. + Meta map[string]interface{} `json:"meta"` // Setting this to true allows the Raw Request to be decoded from base 64 for all pumps. This is set to false by default. DecodeRawRequest bool `json:"raw_request_decoded"` - // Setting this to true allows the Raw Response to be decoded from base 64 for all pumps. This is set to false by default. DecodeRawResponse bool `json:"raw_response_decoded"` } @@ -235,11 +235,11 @@ type TykPumpConfiguration struct { // Enable debugging of Tyk Pump by exposing profiling information, the same as the gateway https://tyk.io/docs/troubleshooting/tyk-gateway/profiling/ HTTPProfile bool `json:"enable_http_profiler"` - // Setting this to True allows the Raw Request to be decoded from base 64 + // Setting this to true allows the Raw Request to be decoded from base 64 // for all pumps. This is set to false by default. DecodeRawRequest bool `json:"raw_request_decoded"` - // Setting this to True allows the Raw Response to be decoded from base 64 for all pumps. This is set to False by default. + // Setting this to true allows the Raw Response to be decoded from base 64 for all pumps. This is set to false by default. DecodeRawResponse bool `json:"raw_response_decoded"` } From ee66e7b27f0dd371e5560e1545c395dc8b2f20f8 Mon Sep 17 00:00:00 2001 From: Esteban Ricardo Mirizio Date: Tue, 4 Jul 2023 09:01:11 -0300 Subject: [PATCH 088/102] pin point goreleaser version (#677) --- .github/workflows/release.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 4856cbd62..8f0626d1b 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -104,10 +104,10 @@ jobs: *.txt.sig *.txt - - uses: goreleaser/goreleaser-action@v3 + - uses: goreleaser/goreleaser-action@v4 with: - version: latest - args: release --rm-dist -f ${{ matrix.goreleaser }} + version: 1.18.2 + args: release --clean -f ${{ matrix.goreleaser }} ${{ !startsWith(github.ref, 'refs/tags/') && ' --snapshot' || '' }} env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} CGO_ENABLED: 0 From c70cf3afae62699fc3c1eec618951b2114c11935 Mon Sep 17 00:00:00 2001 From: Sredny M Date: Tue, 18 Jul 2023 16:36:52 -0400 Subject: [PATCH 089/102] TT-9334 upgraded influx to v1.8.10 (#682) * upgraded influx to v1.8.10 * go mod tidy --- go.mod | 12 ++++++------ go.sum | 35 +++++++++++++++++++++++++++-------- 2 files changed, 33 insertions(+), 14 deletions(-) diff --git a/go.mod b/go.mod index a3a178518..81f086367 100644 --- a/go.mod +++ b/go.mod @@ -20,7 +20,7 @@ require ( github.com/golang/protobuf v1.5.2 github.com/google/go-cmp v0.5.9 github.com/gorilla/mux v1.8.0 - github.com/influxdata/influxdb v1.8.3 + github.com/influxdata/influxdb v1.8.10 github.com/influxdata/influxdb-client-go/v2 v2.6.0 github.com/kelseyhightower/envconfig v1.4.0 github.com/logzio/logzio-go v0.0.0-20200316143903-ac8fc0e2910e @@ -80,11 +80,11 @@ require ( github.com/go-ole/go-ole v1.2.4 // indirect github.com/go-sql-driver/mysql v1.5.0 // indirect github.com/golang/snappy v0.0.3 // indirect - github.com/google/uuid v1.1.2 // indirect + github.com/google/uuid v1.2.0 // indirect github.com/hashicorp/golang-lru v0.5.4 // indirect github.com/helloeave/json v1.15.3 // indirect - github.com/huandu/xstrings v1.2.1 // indirect - github.com/imdario/mergo v0.3.8 // indirect + github.com/huandu/xstrings v1.3.2 // indirect + github.com/imdario/mergo v0.3.12 // indirect github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 // indirect github.com/jackc/chunkreader/v2 v2.0.1 // indirect github.com/jackc/pgconn v1.7.0 // indirect @@ -107,8 +107,8 @@ require ( github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-sqlite3 v1.14.3 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect - github.com/mitchellh/copystructure v1.0.0 // indirect - github.com/mitchellh/reflectwalk v1.0.0 // indirect + github.com/mitchellh/copystructure v1.1.1 // indirect + github.com/mitchellh/reflectwalk v1.0.1 // indirect github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe // indirect github.com/nats-io/nats.go v1.11.1-0.20210623165838-4b75fc59ae30 // indirect github.com/nats-io/nkeys v0.3.0 // indirect diff --git a/go.sum b/go.sum index d34a3920b..c40e33e45 100644 --- a/go.sum +++ b/go.sum @@ -30,6 +30,7 @@ github.com/DataDog/datadog-go v4.7.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3 github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60= @@ -108,6 +109,7 @@ github.com/beeker1121/goque v0.0.0-20170321141813-4044bc29b280 h1:ZgW7EEoTQvz27w github.com/beeker1121/goque v0.0.0-20170321141813-4044bc29b280/go.mod h1:L6dOWBhDOnxUVQsb0wkLve0VCnt2xJW/MI8pdRX4ANw= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/tmpl v1.1.0/go.mod h1:N7W0NUGWuG26caFrID5sE4tvyLaKVp1fbV3Vr+MCul8= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -115,7 +117,6 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c= -github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= @@ -159,6 +160,7 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/deepmap/oapi-codegen v1.8.2 h1:SegyeYGcdi0jLLrpbCMoJxnUUn8GBXHsvr4rbzjuhfU= github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1/go.mod h1:+hnT3ywWDTAFrW5aE+u2Sa/wT555ZqwoCS+pk3p6ry4= github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= @@ -166,6 +168,7 @@ github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48/go.mod h1:if7Fbed8 github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= @@ -241,6 +244,7 @@ github.com/gofrs/uuid v3.3.0+incompatible h1:8K4tyRfvU1CYPgJsveYFQMhpFd/wXNM7iK6 github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= @@ -297,8 +301,9 @@ github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= @@ -319,23 +324,26 @@ github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T github.com/helloeave/json v1.15.3 h1:roUxUEGhsSvhuhi80c4qmLiW633d5uf0mkzUGzBMfX8= github.com/helloeave/json v1.15.3/go.mod h1:uTHhuUsgnrpm9cc7Gi3tfIUwgf1dq/7+uLfpUFLBFEQ= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huandu/xstrings v1.2.1 h1:v6IdmkCnDhJG/S0ivr58PeIfg+tyhqQYy4YsCsQ0Pdc= github.com/huandu/xstrings v1.2.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/huandu/xstrings v1.3.2 h1:L18LIDzqlW6xN2rEkpdV8+oL/IXWJ1APd+vsdYy4Wdw= +github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.8 h1:CGgOkSJeqMRmt0D9XLWExdT4m4F1vd3FV3VPt+0VxkQ= github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= +github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7mYlOw2LqY= -github.com/influxdata/influxdb v1.8.3 h1:WEypI1BQFTT4teLM+1qkEcvUi0dAvopAI/ir0vAiBg8= -github.com/influxdata/influxdb v1.8.3/go.mod h1:JugdFhsvvI8gadxOI6noqNeeBHvWNTbfYGtiAn+2jhI= +github.com/influxdata/influxdb v1.8.10 h1:a4wLNWRE9ooQnq0jCfKtowX1SWtQsxYp4hW3QHEXaTA= +github.com/influxdata/influxdb v1.8.10/go.mod h1:X3tAnsLazhWHxc4fsAkyMMd/pNhvzxiafq4VVE9y/bY= github.com/influxdata/influxdb-client-go/v2 v2.6.0 h1:bIOaGTgvvv1Na2hG+nIvqyv7PK2UiU2WrJN1ck1ykyM= github.com/influxdata/influxdb-client-go/v2 v2.6.0/go.mod h1:Y/0W1+TZir7ypoQZYd2IrnVOKB3Tq6oegAQeSVN/+EU= github.com/influxdata/influxql v1.1.1-0.20200828144457-65d3ef77d385/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk= github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE= github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 h1:W9WBk7wlPfJLvMCdtV4zPulc4uCPrlywQOmbFOhgQNU= github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo= +github.com/influxdata/pkg-config v0.2.8/go.mod h1:EMS7Ll0S4qkzDk53XS3Z72/egBsPInt+BeRxb0WeSwk= github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19ybifQhZoQNF5D8= github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE= github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0= @@ -436,6 +444,7 @@ github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dv github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= github.com/kevinmbeaulieu/eq-go v1.0.0/go.mod h1:G3S8ajA56gKBZm4UB9AOyoOS37JO3roToPzKNM8dtdM= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.9.8/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= @@ -509,14 +518,16 @@ github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0j github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/minio/highwayhash v1.0.1 h1:dZ6IIu8Z14VlC0VpfKofAhCy74wu/Qb5gcn52yWoz/0= github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= -github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/copystructure v1.1.1 h1:Bp6x9R1Wn16SIz3OfeDr0b7RnCG2OB66Y7PQyC/cvq4= +github.com/mitchellh/copystructure v1.1.1/go.mod h1:EBArHfARyrSWO/+Wyr9zwEkc6XMFB9XyNgFNmRkZZU4= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.3.1 h1:cCBH2gTD2K0OtLlv/Y5H01VQCqmlDxz30kS5Y5bqfLA= github.com/mitchellh/mapstructure v1.3.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.1 h1:FVzMWA5RllMAKIdUSC8mdWo3XtwoecrH79BY70sEEpE= +github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= @@ -749,6 +760,7 @@ github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c h1:3lbZUMbMiGUW/LMkfsEAB github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c/go.mod h1:UrdRz5enIKZ63MEE3IF9l2/ebyx59GyGgPi+tICQdmM= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d h1:splanxYIlg+5LfHAM6xpdFEAYOk8iySO56hMFq6uLyA= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= @@ -765,6 +777,7 @@ go.opentelemetry.io/otel v0.13.0/go.mod h1:dlSNewoRYikTkotEnxdmuBHgzT+k/idJSfDv/ go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= @@ -773,6 +786,7 @@ go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= @@ -799,6 +813,7 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20201112155050-0c6587e931a9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d h1:sK3txAijHtOK88l68nt020reeT1ZdKLIYetKl95FzVY= @@ -830,6 +845,7 @@ golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCc golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= @@ -852,6 +868,7 @@ golang.org/x/net v0.0.0-20191116160921-f9c825593386/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= @@ -991,7 +1008,9 @@ golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= From af48e86edef933d4497c054a2be917236ce84466 Mon Sep 17 00:00:00 2001 From: Sredny M Date: Fri, 21 Jul 2023 09:16:38 -0400 Subject: [PATCH 090/102] TT-9490 change name from APISchema to ApiSchema (#681) * change name from APISchema to ApiSchema --- analytics/aggregate_test.go | 4 ++-- analytics/analytics.go | 4 ++-- analytics/analytics_test.go | 2 +- analytics/graph_record.go | 2 +- analytics/graph_record_test.go | 6 +++--- go.mod | 6 +++--- go.sum | 10 ++++++---- pumps/graph_mongo.go | 2 +- pumps/graph_mongo_test.go | 2 +- pumps/graph_sql_aggregate_test.go | 4 ++-- pumps/graph_sql_test.go | 4 ++-- pumps/mongo_test.go | 6 +++--- serializer/protobuf.go | 4 ++-- 13 files changed, 29 insertions(+), 27 deletions(-) diff --git a/analytics/aggregate_test.go b/analytics/aggregate_test.go index b97ee099d..cd99c8d1f 100644 --- a/analytics/aggregate_test.go +++ b/analytics/aggregate_test.go @@ -98,7 +98,7 @@ func TestAggregateGraphData(t *testing.T) { RawPath: "/", APIName: "test-api", APIID: "test-api", - APISchema: base64.StdEncoding.EncodeToString([]byte(sampleSchema)), + ApiSchema: base64.StdEncoding.EncodeToString([]byte(sampleSchema)), Tags: []string{PredefinedTagGraphAnalytics}, ResponseCode: 200, Day: 1, @@ -285,7 +285,7 @@ func TestAggregateGraphData_Dimension(t *testing.T) { RawPath: "/", APIName: "test-api", APIID: "test-api", - APISchema: base64.StdEncoding.EncodeToString([]byte(sampleSchema)), + ApiSchema: base64.StdEncoding.EncodeToString([]byte(sampleSchema)), Tags: []string{PredefinedTagGraphAnalytics}, ResponseCode: 200, Day: 1, diff --git a/analytics/analytics.go b/analytics/analytics.go index 403006be7..bc8029668 100644 --- a/analytics/analytics.go +++ b/analytics/analytics.go @@ -72,7 +72,7 @@ type AnalyticsRecord struct { Alias string `json:"alias"` TrackPath bool `json:"track_path" gorm:"column:trackpath"` ExpireAt time.Time `bson:"expireAt" json:"expireAt"` - APISchema string `json:"api_schema" bson:"-" gorm:"-:all"` + ApiSchema string `json:"api_schema" bson:"-" gorm:"-:all"` //nolint CollectionName string `json:"-" bson:"-" gorm:"-:all"` } @@ -237,7 +237,7 @@ func (a *AnalyticsRecord) GetLineValues() []string { fields = append(fields, a.Alias) fields = append(fields, strconv.FormatBool(a.TrackPath)) fields = append(fields, a.ExpireAt.String()) - fields = append(fields, a.APISchema) + fields = append(fields, a.ApiSchema) return fields } diff --git a/analytics/analytics_test.go b/analytics/analytics_test.go index 380a59e61..49b40da56 100644 --- a/analytics/analytics_test.go +++ b/analytics/analytics_test.go @@ -161,7 +161,7 @@ func TestAnalyticsRecord_GetLineValues(t *testing.T) { APIVersion: "v1", APIName: "api_name", TimeStamp: time.Now(), - APISchema: "http", + ApiSchema: "http", } fields := rec.GetLineValues() diff --git a/analytics/graph_record.go b/analytics/graph_record.go index 1b6706853..a867e7660 100644 --- a/analytics/graph_record.go +++ b/analytics/graph_record.go @@ -171,7 +171,7 @@ func (a *AnalyticsRecord) ToGraphRecord() GraphRecord { record.HasErrors = true } - record.parseRequest(a.RawRequest, a.APISchema) + record.parseRequest(a.RawRequest, a.ApiSchema) record.parseResponse(a.RawResponse) diff --git a/analytics/graph_record_test.go b/analytics/graph_record_test.go index 57cbb2b26..8b851226e 100644 --- a/analytics/graph_record_test.go +++ b/analytics/graph_record_test.go @@ -129,7 +129,7 @@ func TestAnalyticsRecord_ToGraphRecord(t *testing.T) { RawPath: "/", APIName: "test-api", APIID: "test-api", - APISchema: base64.StdEncoding.EncodeToString([]byte(sampleSchema)), + ApiSchema: base64.StdEncoding.EncodeToString([]byte(sampleSchema)), ResponseCode: 200, Day: 1, Month: 1, @@ -195,7 +195,7 @@ func TestAnalyticsRecord_ToGraphRecord(t *testing.T) { return g }, modifyRecord: func(a AnalyticsRecord) AnalyticsRecord { - a.APISchema = base64.StdEncoding.EncodeToString([]byte(subgraphSchema)) + a.ApiSchema = base64.StdEncoding.EncodeToString([]byte(subgraphSchema)) return a }, }, @@ -381,7 +381,7 @@ func TestAnalyticsRecord_ToGraphRecord(t *testing.T) { request: `{"query":"query main {\ncharacters {\ninfo\n}\n}\n\nquery second {\nlistCharacters{\ninfo\n}\n}","variables":null,"operationName":""}`, response: `{"errors":[{"message":"no operation specified"}]}`, modifyRecord: func(a AnalyticsRecord) AnalyticsRecord { - a.APISchema = "this isn't a base64 is it?" + a.ApiSchema = "this isn't a base64 is it?" return a }, expected: func() GraphRecord { diff --git a/go.mod b/go.mod index 81f086367..17f003a61 100644 --- a/go.mod +++ b/go.mod @@ -27,7 +27,7 @@ require ( github.com/mitchellh/mapstructure v1.3.1 github.com/moesif/moesifapi-go v1.0.6 github.com/olivere/elastic/v7 v7.0.28 - github.com/oschwald/maxminddb-golang v1.5.0 + github.com/oschwald/maxminddb-golang v1.11.0 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.11.0 github.com/quipo/statsd v0.0.0-20160923160612-75b7afedf0d2 @@ -36,7 +36,7 @@ require ( github.com/segmentio/analytics-go v0.0.0-20160711225931-bdb0aeca8a99 github.com/segmentio/kafka-go v0.3.6 github.com/sirupsen/logrus v1.8.1 - github.com/stretchr/testify v1.8.1 + github.com/stretchr/testify v1.8.4 golang.org/x/net v0.0.0-20220722155237-a158d28d115b google.golang.org/protobuf v1.28.1 gopkg.in/alecthomas/kingpin.v2 v2.2.6 @@ -142,7 +142,7 @@ require ( go.uber.org/zap v1.18.1 // indirect golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d // indirect golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 // indirect - golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab // indirect + golang.org/x/sys v0.9.0 // indirect golang.org/x/text v0.3.7 // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect google.golang.org/appengine v1.6.5 // indirect diff --git a/go.sum b/go.sum index c40e33e45..66a9e7d31 100644 --- a/go.sum +++ b/go.sum @@ -591,8 +591,8 @@ github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mo github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= -github.com/oschwald/maxminddb-golang v1.5.0 h1:rmyoIV6z2/s9TCJedUuDiKht2RN12LWJ1L7iRGtWY64= -github.com/oschwald/maxminddb-golang v1.5.0/go.mod h1:3jhIUymTJ5VREKyIhWm66LJiQt04F0UCDdodShpjWsY= +github.com/oschwald/maxminddb-golang v1.11.0 h1:aSXMqYR/EPNjGE8epgqwDay+P30hCBZIveY0WZbAWh0= +github.com/oschwald/maxminddb-golang v1.11.0/go.mod h1:YmVI+H0zh3ySFR3w+oz8PCfglAFj3PuCmui13+P9zDg= github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.6.0/go.mod h1:5N711Q9dKgbdkxHL+MEfF31hpT7l0S0s/t2kKREewys= @@ -707,8 +707,9 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/goleveldb v0.0.0-20190318030020-c3a204f8e965 h1:V/AztY/q2oW5ghho7YMgUJQkKvSACHRxpeDyT5DxpIo= github.com/syndtr/goleveldb v0.0.0-20190318030020-c3a204f8e965/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA= @@ -956,8 +957,9 @@ golang.org/x/sys v0.0.0-20220405210540-1e041c57c461/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab h1:2QkjZIsXupsJbJIdSjjUOgWK3aEtzyuh2mPt3l/CkeU= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s= +golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= diff --git a/pumps/graph_mongo.go b/pumps/graph_mongo.go index 7b1ab42df..a22d7fcc1 100644 --- a/pumps/graph_mongo.go +++ b/pumps/graph_mongo.go @@ -116,7 +116,7 @@ func (g *GraphMongoPump) WriteData(ctx context.Context, data []interface{}) erro gr analytics.GraphRecord err error ) - if r.RawRequest == "" || r.RawResponse == "" || r.APISchema == "" { + if r.RawRequest == "" || r.RawResponse == "" || r.ApiSchema == "" { g.log.Warn("skipping record parsing") gr = analytics.GraphRecord{AnalyticsRecord: *r} } else { diff --git a/pumps/graph_mongo_test.go b/pumps/graph_mongo_test.go index 30105ad54..4e6bd752a 100644 --- a/pumps/graph_mongo_test.go +++ b/pumps/graph_mongo_test.go @@ -307,7 +307,7 @@ func TestGraphMongoPump_WriteData(t *testing.T) { Path: "POST", RawRequest: base64.StdEncoding.EncodeToString([]byte(cr.rawRequest)), RawResponse: base64.StdEncoding.EncodeToString([]byte(cr.rawResponse)), - APISchema: base64.StdEncoding.EncodeToString([]byte(cr.schema)), + ApiSchema: base64.StdEncoding.EncodeToString([]byte(cr.schema)), Tags: cr.tags, } if cr.responseCode != 0 { diff --git a/pumps/graph_sql_aggregate_test.go b/pumps/graph_sql_aggregate_test.go index 55e0a3cfd..7ebfd4d2d 100644 --- a/pumps/graph_sql_aggregate_test.go +++ b/pumps/graph_sql_aggregate_test.go @@ -187,7 +187,7 @@ func TestSqlGraphAggregatePump_WriteData(t *testing.T) { RawPath: "/", APIName: "test-api", APIID: "test-api", - APISchema: base64.StdEncoding.EncodeToString([]byte(sampleSchema)), + ApiSchema: base64.StdEncoding.EncodeToString([]byte(sampleSchema)), Tags: []string{analytics.PredefinedTagGraphAnalytics}, ResponseCode: 200, Day: 1, @@ -456,7 +456,7 @@ func TestGraphSQLAggregatePump_WriteData_Sharded(t *testing.T) { RawPath: "/", APIName: "test-api", APIID: "test-api", - APISchema: base64.StdEncoding.EncodeToString([]byte(sampleSchema)), + ApiSchema: base64.StdEncoding.EncodeToString([]byte(sampleSchema)), Tags: []string{analytics.PredefinedTagGraphAnalytics}, ResponseCode: 200, Day: 1, diff --git a/pumps/graph_sql_test.go b/pumps/graph_sql_test.go index 04b9d5d6f..6e4eb4ff8 100644 --- a/pumps/graph_sql_test.go +++ b/pumps/graph_sql_test.go @@ -253,7 +253,7 @@ func TestGraphSQLPump_WriteData(t *testing.T) { } if !item.isHTTP { r.RawRequest = convToBase64(rawGQLRequest) - r.APISchema = convToBase64(schema) + r.ApiSchema = convToBase64(schema) } else { r.RawRequest = convToBase64(rawHTTPReq) r.RawResponse = convToBase64(rawHTTPResponse) @@ -323,7 +323,7 @@ func TestGraphSQLPump_Sharded(t *testing.T) { Path: "/test-api", RawRequest: convToBase64(rawGQLRequest), RawResponse: convToBase64(rawGQLResponse), - APISchema: convToBase64(schema), + ApiSchema: convToBase64(schema), Tags: []string{analytics.PredefinedTagGraphAnalytics}, APIName: "test-api", ResponseCode: 200, diff --git a/pumps/mongo_test.go b/pumps/mongo_test.go index 1d1d19008..e8fdc16f2 100644 --- a/pumps/mongo_test.go +++ b/pumps/mongo_test.go @@ -387,7 +387,7 @@ func TestMongoPump_AccumulateSetIgnoreDocSize(t *testing.T) { record.Tags = []string{analytics.PredefinedTagGraphAnalytics} record.RawRequest = bloat record.RawResponse = bloat - record.APISchema = bloat + record.ApiSchema = bloat } dataSet[i] = record } @@ -621,7 +621,7 @@ func TestMongoPump_WriteData(t *testing.T) { // ensure the length and content are the same assert.Equal(t, len(data), len(results)) - if diff := cmp.Diff(data, results, cmpopts.IgnoreFields(analytics.AnalyticsRecord{}, "id", "APISchema")); diff != "" { + if diff := cmp.Diff(data, results, cmpopts.IgnoreFields(analytics.AnalyticsRecord{}, "id", "ApiSchema")); diff != "" { t.Error(diff) } } @@ -642,7 +642,7 @@ func TestMongoPump_WriteData(t *testing.T) { if i%2 == 0 { record.RawRequest = rawGQLRequest record.RawResponse = rawGQLResponse - record.APISchema = schema + record.ApiSchema = schema record.Tags = []string{analytics.PredefinedTagGraphAnalytics} } records[i] = record diff --git a/serializer/protobuf.go b/serializer/protobuf.go index 3e830c815..a90e2ae45 100644 --- a/serializer/protobuf.go +++ b/serializer/protobuf.go @@ -85,7 +85,7 @@ func (pb *ProtobufSerializer) TransformSingleRecordToProto(rec analytics.Analyti Alias: rec.Alias, TrackPath: rec.TrackPath, OauthID: rec.OauthID, - ApiSchema: rec.APISchema, + ApiSchema: rec.ApiSchema, } rec.TimestampToProto(&record) @@ -143,7 +143,7 @@ func (pb *ProtobufSerializer) TransformSingleProtoToAnalyticsRecord(rec analytic Tags: rec.Tags, Alias: rec.Alias, TrackPath: rec.TrackPath, - APISchema: rec.ApiSchema, + ApiSchema: rec.ApiSchema, } tmpRecord.TimeStampFromProto(rec) *record = tmpRecord From 1a6d80bde0f4225efb24120f93e477589bf964be Mon Sep 17 00:00:00 2001 From: Sredny M Date: Tue, 25 Jul 2023 20:06:02 -0400 Subject: [PATCH 091/102] update regex to blur mongo credentials (#686) --- pumps/mongo.go | 6 +++--- pumps/mongo_test.go | 5 +++++ 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/pumps/mongo.go b/pumps/mongo.go index 0f8df5976..479b087e9 100644 --- a/pumps/mongo.go +++ b/pumps/mongo.go @@ -112,9 +112,9 @@ func createDBObject(tableName string) dbObject { } func (b *BaseMongoConf) GetBlurredURL() string { - // mongo uri match with regex ^(mongodb:(?:\/{2})?)((\w+?):(\w+?)@|:?@?)(\S+?):(\d+)(\/(\S+?))?(\?replicaSet=(\S+?))?$ - // but we need only a segment, so regex explanation: https://regex101.com/r/8Uzwtw/1 - regex := `^(mongodb:(?:\/{2})?)((...+?):(...+?)@)` + // mongo uri match with regex ^(mongodb\S*(+srv)*:(?:\/{2})?)((\w+?):(\w+?)@|:?@?)(\S+?):(\d+)(\/(\S+?))?(\?replicaSet=(\S+?))?$ + // but we need only a segment, so regex explanation: https://regex101.com/r/C4GQvi/1 + regex := `^(mongodb\S*(srv)*:(?:\/{2})?)((...+?):(...+?)@)` re := regexp.MustCompile(regex) blurredUrl := re.ReplaceAllString(b.MongoURL, "***:***@") diff --git a/pumps/mongo_test.go b/pumps/mongo_test.go index e8fdc16f2..00a63a452 100644 --- a/pumps/mongo_test.go +++ b/pumps/mongo_test.go @@ -459,6 +459,11 @@ func TestGetBlurredURL(t *testing.T) { givenURL: "mongodb://UserName:Password@sample-cluster-instance.cluster-corlsfccjozr.us-east-1.docdb.amazonaws.com:27017?replicaSet=rs0&ssl_ca_certs=rds-combined-ca-bundle.pem", expectedBlurredURL: "***:***@sample-cluster-instance.cluster-corlsfccjozr.us-east-1.docdb.amazonaws.com:27017?replicaSet=rs0&ssl_ca_certs=rds-combined-ca-bundle.pem", }, + { + testName: "DNS seed list connection", + givenURL: "mongodb+srv://admin:pass@server.example.com/?connectTimeoutMS=300000", + expectedBlurredURL: "***:***@server.example.com/?connectTimeoutMS=300000", + }, } for _, tc := range tcs { From 9cfce6e6d2a0f3109953ddd3bff444b9e9cb7bde Mon Sep 17 00:00:00 2001 From: Sredny M Date: Tue, 25 Jul 2023 20:47:22 -0400 Subject: [PATCH 092/102] updated prometheus to 1.16 (#688) --- go.mod | 27 ++++++++++----------- go.sum | 74 ++++++++++++++++++++++++---------------------------------- 2 files changed, 45 insertions(+), 56 deletions(-) diff --git a/go.mod b/go.mod index 17f003a61..6653bb6b9 100644 --- a/go.mod +++ b/go.mod @@ -17,7 +17,7 @@ require ( github.com/go-redis/redis/v8 v8.3.1 github.com/gocraft/health v0.0.0-20170925182251-8675af27fef0 github.com/gofrs/uuid v3.3.0+incompatible - github.com/golang/protobuf v1.5.2 + github.com/golang/protobuf v1.5.3 github.com/google/go-cmp v0.5.9 github.com/gorilla/mux v1.8.0 github.com/influxdata/influxdb v1.8.10 @@ -29,7 +29,7 @@ require ( github.com/olivere/elastic/v7 v7.0.28 github.com/oschwald/maxminddb-golang v1.11.0 github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.11.0 + github.com/prometheus/client_golang v1.16.0 github.com/quipo/statsd v0.0.0-20160923160612-75b7afedf0d2 github.com/resurfaceio/logger-go/v3 v3.2.1 github.com/robertkowalski/graylog-golang v0.0.0-20151121031040-e5295cfa2827 @@ -37,8 +37,8 @@ require ( github.com/segmentio/kafka-go v0.3.6 github.com/sirupsen/logrus v1.8.1 github.com/stretchr/testify v1.8.4 - golang.org/x/net v0.0.0-20220722155237-a158d28d115b - google.golang.org/protobuf v1.28.1 + golang.org/x/net v0.7.0 + google.golang.org/protobuf v1.30.0 gopkg.in/alecthomas/kingpin.v2 v2.2.6 gopkg.in/olivere/elastic.v3 v3.0.56 gopkg.in/olivere/elastic.v5 v5.0.85 @@ -57,7 +57,7 @@ require ( github.com/Microsoft/go-winio v0.5.2 // indirect github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d // indirect github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 // indirect - github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d // indirect + github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d // indirect github.com/aws/aws-sdk-go-v2/credentials v1.5.0 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.7.0 // indirect @@ -72,7 +72,7 @@ require ( github.com/beeker1121/goque v0.0.0-20170321141813-4044bc29b280 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 // indirect - github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/deepmap/oapi-codegen v1.8.2 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect @@ -106,7 +106,7 @@ require ( github.com/lintianzhi/graylogd v0.0.0-20180503131252-dc68342f04dc // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-sqlite3 v1.14.3 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/mitchellh/copystructure v1.1.1 // indirect github.com/mitchellh/reflectwalk v1.0.1 // indirect github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe // indirect @@ -115,12 +115,13 @@ require ( github.com/nats-io/nuid v1.0.1 // indirect github.com/olivere/elastic v6.2.31+incompatible // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_model v0.2.0 // indirect - github.com/prometheus/common v0.26.0 // indirect - github.com/prometheus/procfs v0.6.0 // indirect + github.com/prometheus/client_model v0.3.0 // indirect + github.com/prometheus/common v0.42.0 // indirect + github.com/prometheus/procfs v0.10.1 // indirect github.com/qri-io/jsonpointer v0.1.1 // indirect github.com/qri-io/jsonschema v0.2.1 // indirect github.com/r3labs/sse/v2 v2.8.1 // indirect + github.com/rogpeppe/go-internal v1.11.0 // indirect github.com/segmentio/backo-go v0.0.0-20160424052352-204274ad699c // indirect github.com/shirou/gopsutil v3.20.11+incompatible // indirect github.com/syndtr/goleveldb v0.0.0-20190318030020-c3a204f8e965 // indirect @@ -141,11 +142,11 @@ require ( go.uber.org/multierr v1.6.0 // indirect go.uber.org/zap v1.18.1 // indirect golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d // indirect - golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 // indirect + golang.org/x/sync v0.2.0 // indirect golang.org/x/sys v0.9.0 // indirect - golang.org/x/text v0.3.7 // indirect + golang.org/x/text v0.7.0 // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect - google.golang.org/appengine v1.6.5 // indirect + google.golang.org/appengine v1.6.7 // indirect gopkg.in/cenkalti/backoff.v1 v1.1.0 // indirect gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/go.sum b/go.sum index 66a9e7d31..0bb908c71 100644 --- a/go.sum +++ b/go.sum @@ -61,9 +61,8 @@ github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuy github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d h1:UQZhZ2O0vMHr2cI+DC1Mbh0TJxzA3RcLoMsFw+aXw7E= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= +github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0= github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE= @@ -126,8 +125,9 @@ github.com/cenkalti/backoff/v4 v4.0.2/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8 github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= @@ -202,11 +202,8 @@ github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJ github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI= github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= @@ -270,8 +267,9 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= @@ -426,18 +424,14 @@ github.com/joho/godotenv v1.4.0 h1:3l4+N6zfMWnkbPEXKng2o2/MR5mSwTrBih4ZEkkz1lg= github.com/joho/godotenv v1.4.0/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0= github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8= @@ -458,12 +452,11 @@ github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPR github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= @@ -514,8 +507,9 @@ github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsO github.com/mattn/go-sqlite3 v1.14.3 h1:j7a/xn1U6TKA/PHHxqZuzh64CdtRc7rU9M+AvkOl5bA= github.com/mattn/go-sqlite3 v1.14.3/go.mod h1:WVKg1VTActs4Qso6iwGbiFih2UIHo0ENGwNd0Lj+XmI= github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= -github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/minio/highwayhash v1.0.1 h1:dZ6IIu8Z14VlC0VpfKofAhCy74wu/Qb5gcn52yWoz/0= github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= @@ -533,8 +527,8 @@ github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/moesif/moesifapi-go v1.0.6 h1:r3ppy6p5jxzdauziRI3lMtcjDpVH/zW2an2rYXLkNWE= github.com/moesif/moesifapi-go v1.0.6/go.mod h1:wRGgVy0QeiCgnjFEiD13HD2Aa7reI8nZXtCnddNnZGs= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe h1:iruDEfMl2E6fbMZ9s0scYfZQ84/6SPL6zC8ACM2oIL0= @@ -542,7 +536,6 @@ github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJ github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/nats-io/jwt v1.2.2 h1:w3GMTO969dFg+UOKTmmyuu7IGdusK+7Ytlt//OYH/uU= github.com/nats-io/jwt v1.2.2/go.mod h1:/xX356yQA6LuXI9xWW7mZNpxgF2mBmGecH+Fj34sP5Q= github.com/nats-io/jwt/v2 v2.0.2 h1:ejVCLO8gu6/4bOKIHQpmB5UhhUJfAQw55yvLWpfmKjI= @@ -610,24 +603,21 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= -github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= +github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= +github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= +github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= github.com/qri-io/jsonpointer v0.1.1 h1:prVZBZLL6TW5vsSB9fFHFAMBLI4b0ri5vribQlTJiBA= github.com/qri-io/jsonpointer v0.1.1/go.mod h1:DnJPaYgiKu56EuDp8TU5wFLdZIcAnb/uH9v37ZaMV64= github.com/qri-io/jsonschema v0.2.1 h1:NNFoKms+kut6ABPf6xiKNM5214jzxAhDBrPHCJ97Wg0= @@ -643,6 +633,8 @@ github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1 github.com/robertkowalski/graylog-golang v0.0.0-20151121031040-e5295cfa2827 h1:D2Xs0bSuqpKnUOOlK4yu6lloeOs4+oD+pjbOfsxgWu0= github.com/robertkowalski/graylog-golang v0.0.0-20151121031040-e5295cfa2827/go.mod h1:jONcYFk83vUF1lv0aERAwaFtDM9wUW4BMGmlnpLJyZY= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= +github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= @@ -673,7 +665,6 @@ github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeV github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= @@ -871,7 +862,6 @@ golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= @@ -885,8 +875,9 @@ golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b h1:PxfKdU9lEEDYjdIzOtC4qFWgkU2rGHdKlKowJSMN9h0= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -899,10 +890,10 @@ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= +golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -930,21 +921,17 @@ golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -970,8 +957,9 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1040,8 +1028,9 @@ google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9Ywl google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -1078,8 +1067,8 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/cenkalti/backoff.v1 v1.1.0 h1:Arh75ttbsvlpVA7WtVpH4u9h6Zl46xuptxqLxPiSo4Y= @@ -1107,7 +1096,6 @@ gopkg.in/vmihailenco/msgpack.v2 v2.9.1/go.mod h1:/3Dn1Npt9+MYyLpYYXjInO/5jvMLamn gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= From 1145397f4ab10904287b77f48edc2f6fb6759cf2 Mon Sep 17 00:00:00 2001 From: Alok G Singh Date: Wed, 26 Jul 2023 17:11:50 +0800 Subject: [PATCH 093/102] Auto generated from templates by gromit (#692) Co-authored-by: Gromit --- .github/dependabot.yml | 2 +- .github/workflows/release.yml | 144 +++++++++++----------------------- ci/Dockerfile.std | 2 +- ci/aws/byol.pkr.hcl | 2 +- ci/goreleaser/goreleaser.yml | 127 +++++++++++++----------------- ci/install/before_install.sh | 2 +- ci/install/post_install.sh | 2 +- ci/install/post_remove.sh | 2 +- ci/install/post_trans.sh | 2 +- 9 files changed, 108 insertions(+), 177 deletions(-) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index b9d2b23e3..bffb4d534 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -1,6 +1,6 @@ # Generated by: gromit policy -# Generated on: Wed May 10 06:24:08 UTC 2023 +# Generated on: Wed Jul 26 08:25:03 UTC 2023 version: 2 updates: diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 8f0626d1b..f6509fa51 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -1,15 +1,12 @@ - # Generated by: gromit policy -# Generated on: Wed May 10 06:24:08 UTC 2023 - +# Generated on: Wed Jul 26 08:25:03 UTC 2023 # Distribution channels covered by this workflow # - Ubuntu and Debian -# - RHEL/OL -# - tarballs +# - RHEL and AL # - docker hub # - devenv ECR -# - AWS mktplace +# - AWS mktplace (not active atm) # - Cloudsmith name: Release @@ -35,11 +32,11 @@ jobs: strategy: fail-fast: false matrix: - golang_cross: [1.19-bullseye] + golang_cross: [ 1.19-bullseye ] include: - golang_cross: 1.19-bullseye goreleaser: 'ci/goreleaser/goreleaser.yml' - rpmvers: ' el/7 el/8 el/9' + rpmvers: 'el/7 el/8 el/9 amazon/2 amazon/2023' debvers: 'ubuntu/xenial ubuntu/bionic ubuntu/focal ubuntu/jammy debian/jessie debian/buster debian/bullseye' outputs: tag: ${{ steps.targets.outputs.tag }} @@ -106,7 +103,7 @@ jobs: - uses: goreleaser/goreleaser-action@v4 with: - version: 1.18.2 + version: latest args: release --clean -f ${{ matrix.goreleaser }} ${{ !startsWith(github.ref, 'refs/tags/') && ' --snapshot' || '' }} env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} @@ -119,7 +116,6 @@ jobs: DEBVERS: ${{ matrix.debvers }} RPMVERS: ${{ matrix.rpmvers }} PACKAGECLOUD_TOKEN: ${{ secrets.PACKAGECLOUD_TOKEN }} - - uses: actions/upload-artifact@v3 with: name: deb @@ -139,37 +135,21 @@ jobs: needs: - goreleaser runs-on: ubuntu-latest + permissions: + id-token: write # This is required for requesting the JWT + contents: read # This is required for actions/checkout steps: - name: Shallow checkout of tyk-pump uses: actions/checkout@v3 with: fetch-depth: 1 - - name: Setup Terraform - uses: hashicorp/setup-terraform@v2 - with: - cli_config_credentials_token: ${{ secrets.TF_API_TOKEN }} - terraform_wrapper: false - - name: Get AWS creds from Terraform remote state - id: aws-creds - run: | - cd ci/terraform - terraform init -input=false - terraform refresh 2>&1 >/dev/null - eval $(terraform output -json tyk-pump | jq -r 'to_entries[] | [.key,.value] | join("=")') - region=$(terraform output region | xargs) - [ -z "$key" -o -z "$secret" -o -z "$region" ] && exit 1 - echo "secret=$secret" >> $GITHUB_OUTPUT - echo "key=$key" >> $GITHUB_OUTPUT - echo "region=$region" >> $GITHUB_OUTPUT - - - name: Configure AWS credentials for use - uses: aws-actions/configure-aws-credentials@v1 + - uses: aws-actions/configure-aws-credentials@v2 with: - aws-access-key-id: ${{ steps.aws-creds.outputs.key }} - aws-secret-access-key: ${{ steps.aws-creds.outputs.secret }} - aws-region: ${{ steps.aws-creds.outputs.region }} + role-to-assume: arn:aws:iam::754489498669:role/ecr_rw_tyk + role-session-name: cipush + aws-region: eu-central-1 - name: Login to Amazon ECR id: login-ecr @@ -179,50 +159,36 @@ jobs: with: name: deb + - name: Docker metadata + id: metadata + uses: docker/metadata-action@v4 + with: + images: ${{ steps.login-ecr.outputs.registry }}/tyk-pump + flavor: | + latest=false + prefix=v + tags: | + type=semver,pattern=v{{version}} + type=semver,pattern=v{{major}}.{{minor}} + type=semver,pattern=v{{major}} + type=ref,event=branch + - uses: docker/setup-qemu-action@v2 - uses: docker/setup-buildx-action@v2 - name: CI build - uses: docker/build-push-action@v3 + uses: docker/build-push-action@v4 with: push: true context: "." file: ci/Dockerfile.std platforms: linux/amd64,linux/arm64 tags: | + ${{ steps.metadata.outputs.tags }} ${{ steps.login-ecr.outputs.registry }}/tyk-pump:${{ needs.goreleaser.outputs.tag }} ${{ steps.login-ecr.outputs.registry }}/tyk-pump:${{ github.sha }} - - name: Tell gromit about new build - id: gromit - run: | - # Remember to remove the true when TD-626 is fixed - curl -fsSL -H "Authorization: ${{secrets.GROMIT_TOKEN}}" 'https://domu-kun.cloud.tyk.io/gromit/newbuild' \ - -X POST -d '{ "repo": "${{ github.repository}}", "ref": "${{ github.ref }}", "sha": "${{ github.sha }}" }' || true - - - name: Tell integration channel - if: ${{ failure() }} - run: | - colour=bad - pretext=":boom: Could not add new build $${{ github.ref }} from ${{ github.repository }} to CD. Please review this run and correct it if needed. See https://github.com/TykTechnologies/tyk-ci/wiki/IntegrationEnvironment for what this is about." - curl https://raw.githubusercontent.com/rockymadden/slack-cli/master/src/slack -o /tmp/slack && chmod +x /tmp/slack - /tmp/slack chat send \ - --actions '{"type": "button", "style": "primary", "text": "See log", "url": "https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"}' \ - --author 'Bender' \ - --author-icon 'https://hcoop.net/~alephnull/bender/bender-arms.jpg' \ - --author-link 'https://github.com/TykTechnologies/tyk-ci' \ - --channel '#service-integration' \ - --color $colour \ - --fields '{"title": "Repo", "value": "${{ github.repository }}", "short": false}' \ - --footer 'github-actions' \ - --footer-icon 'https://assets-cdn.github.com/images/modules/logos_page/Octocat.png' \ - --image 'https://assets-cdn.github.com/images/modules/logos_page/Octocat.png' \ - --pretext "$pretext" \ - --text 'Commit message: ${{ github.event.head_commit.message }}' \ - --title 'Failed to add new build for CD' \ - --title-link 'https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}' - sbom: needs: ci uses: TykTechnologies/github-actions/.github/workflows/sbom.yaml@main @@ -232,8 +198,12 @@ jobs: DEPDASH_KEY: ${{ secrets.DEPDASH_KEY }} ORG_GH_TOKEN: ${{ secrets.ORG_GH_TOKEN }} + upgrade-deb: - if: startsWith(github.ref, 'refs/tags') && !github.event.pull_request.draft + if: startsWith(github.ref, 'refs/tags') + services: + httpbin.org: + image: kennethreitz/httpbin runs-on: ubuntu-latest needs: goreleaser strategy: @@ -243,6 +213,7 @@ jobs: - amd64 - arm64 distro: + - ubuntu:jammy - ubuntu:bionic - ubuntu:focal - debian:bullseye @@ -271,7 +242,7 @@ jobs: ' > Dockerfile - name: install on ${{ matrix.distro }} - uses: docker/build-push-action@v3 + uses: docker/build-push-action@v4 with: context: "." platforms: linux/${{ matrix.arch }} @@ -279,13 +250,17 @@ jobs: push: false upgrade-rpm: - if: startsWith(github.ref, 'refs/tags') && !github.event.pull_request.draft + if: startsWith(github.ref, 'refs/tags') + services: + httpbin.org: + image: kennethreitz/httpbin needs: goreleaser runs-on: ubuntu-latest strategy: fail-fast: false matrix: distro: + - ubi9/ubi - ubi8/ubi steps: @@ -303,7 +278,7 @@ jobs: run: | echo 'FROM registry.access.redhat.com/${{ matrix.distro }} COPY tyk-pump*.x86_64.rpm /tyk-pump.rpm - RUN yum install -y curl + RUN yum install --allowerasing -y curl RUN curl -fsSL https://packagecloud.io/install/repositories/tyk/tyk-pump/script.rpm.sh | bash && yum install -y tyk-pump-1.6.0-1 RUN curl https://keyserver.tyk.io/tyk.io.rpm.signing.key.2020 -o tyk-pump.key && rpm --import tyk-pump.key RUN rpm --checksig tyk-pump.rpm @@ -311,14 +286,14 @@ jobs: ' > Dockerfile - name: install on ${{ matrix.distro }} - uses: docker/build-push-action@v3 + uses: docker/build-push-action@v4 with: context: "." file: Dockerfile push: false smoke-tests: - if: startsWith(github.ref, 'refs/tags') && !github.event.pull_request.draft + if: startsWith(github.ref, 'refs/tags') needs: - goreleaser runs-on: ubuntu-latest @@ -330,6 +305,8 @@ jobs: - name: Run tests shell: bash + env: + GITHUB_TAG: ${{ github.ref }} run: | set -eaxo pipefail if [ ! -d smoke-tests ]; then @@ -358,32 +335,3 @@ jobs: fi done - # AWS updates only for stable releases - aws-mktplace-byol: - if: ( 'a' == 'b' ) - runs-on: ubuntu-latest - needs: - - smoke-tests - strategy: - matrix: - flavour: - - al2 - - rhel - - steps: - - name: Checkout tyk-pump - uses: actions/checkout@v3 - with: - fetch-depth: 1 - - - uses: actions/download-artifact@v3 - with: - name: rpm - path: aws - - - name: Packer build - working-directory: ./ci/aws - run: | - export VERSION=${{ needs.goreleaser.outputs.tag }} - packer validate -var-file=${{ matrix.flavour }}.vars.json byol.pkr.hcl - packer build -var-file=${{ matrix.flavour }}.vars.json byol.pkr.hcl diff --git a/ci/Dockerfile.std b/ci/Dockerfile.std index 462deed07..cce463d35 100644 --- a/ci/Dockerfile.std +++ b/ci/Dockerfile.std @@ -1,6 +1,6 @@ # Generated by: gromit policy -# Generated on: Wed May 10 06:24:08 UTC 2023 +# Generated on: Wed Jul 26 08:25:03 UTC 2023 FROM debian:bullseye-slim ARG TARGETARCH diff --git a/ci/aws/byol.pkr.hcl b/ci/aws/byol.pkr.hcl index b8b5509f9..167d9afdf 100644 --- a/ci/aws/byol.pkr.hcl +++ b/ci/aws/byol.pkr.hcl @@ -1,6 +1,6 @@ # Generated by: gromit policy -# Generated on: Wed May 10 06:24:08 UTC 2023 +# Generated on: Wed Jul 26 08:25:03 UTC 2023 packer { required_plugins { diff --git a/ci/goreleaser/goreleaser.yml b/ci/goreleaser/goreleaser.yml index c2338c84e..d7666a2b4 100644 --- a/ci/goreleaser/goreleaser.yml +++ b/ci/goreleaser/goreleaser.yml @@ -1,5 +1,5 @@ # Generated by: gromit policy -# Generated on: Wed May 10 06:24:08 UTC 2023 +# Generated on: Wed Jul 26 08:25:03 UTC 2023 # Check the documentation at http://goreleaser.com # This project needs CGO_ENABLED=1 and the cross-compiler toolchains for @@ -17,60 +17,64 @@ builds: binary: tyk-pump dockers: -# Build tykio/tyk-pump-docker-pub, docker.tyk.io/tyk-pump/tyk-pump (amd64) -- ids: - - std - image_templates: - - "tykio/tyk-pump-docker-pub:{{ .Tag }}-amd64" - - "docker.tyk.io/tyk-pump/tyk-pump:{{ .Tag }}" - build_flag_templates: - - "--build-arg=PORTS=80" - - "--platform=linux/amd64" - - "--label=org.opencontainers.image.created={{.Date}}" - - "--label=org.opencontainers.image.title={{.ProjectName}}" - - "--label=org.opencontainers.image.revision={{.FullCommit}}" - - "--label=org.opencontainers.image.version={{.Version}}" - use: buildx - goarch: amd64 - goos: linux - dockerfile: ci/Dockerfile.std - extra_files: - - "ci/install/" - - "README.md" - - "LICENSE.md" - - "pump.example.conf" -# Build tykio/tyk-pump-docker-pub, docker.tyk.io/tyk-pump/tyk-pump (arm64) -- ids: - - std - image_templates: - - "tykio/tyk-pump-docker-pub:{{ .Tag }}-arm64" - - "docker.tyk.io/tyk-pump/tyk-pump:{{ .Tag }}-arm64" - build_flag_templates: - - "--build-arg=PORTS=80" - - "--platform=linux/arm64" - - "--label=org.opencontainers.image.created={{.Date}}" - - "--label=org.opencontainers.image.title={{.ProjectName}}" - - "--label=org.opencontainers.image.revision={{.FullCommit}}" - - "--label=org.opencontainers.image.version={{.Version}}" - use: buildx - goarch: arm64 - goos: linux - dockerfile: ci/Dockerfile.std - extra_files: - - "ci/install/" - - "README.md" - - "LICENSE.md" - - "pump.example.conf" - + # Build tykio/tyk-pump-docker-pub, docker.tyk.io/tyk-pump/tyk-pump (amd64) + - ids: + - std + image_templates: + - "tykio/tyk-pump-docker-pub:{{ .Tag }}-amd64" + - "docker.tyk.io/tyk-pump/tyk-pump:{{ .Tag }}-amd64" + build_flag_templates: + - "--build-arg=PORTS=80" + - "--platform=linux/amd64" + - "--label=org.opencontainers.image.created={{.Date}}" + - "--label=org.opencontainers.image.title={{.ProjectName}}" + - "--label=org.opencontainers.image.revision={{.FullCommit}}" + - "--label=org.opencontainers.image.version={{.Version}}" + use: buildx + goarch: amd64 + goos: linux + dockerfile: ci/Dockerfile.std + extra_files: + - "ci/install/" + - "README.md" + - "LICENSE.md" + - "pump.example.conf" + # Build tykio/tyk-pump-docker-pub, docker.tyk.io/tyk-pump/tyk-pump (arm64) + - ids: + - std + image_templates: + - "tykio/tyk-pump-docker-pub:{{ .Tag }}-arm64" + - "docker.tyk.io/tyk-pump/tyk-pump:{{ .Tag }}-arm64" + build_flag_templates: + - "--build-arg=PORTS=80" + - "--platform=linux/arm64" + - "--label=org.opencontainers.image.created={{.Date}}" + - "--label=org.opencontainers.image.title={{.ProjectName}}" + - "--label=org.opencontainers.image.revision={{.FullCommit}}" + - "--label=org.opencontainers.image.version={{.Version}}" + use: buildx + goarch: arm64 + goos: linux + dockerfile: ci/Dockerfile.std + extra_files: + - "ci/install/" + - "README.md" + - "LICENSE.md" + - "pump.example.conf" + docker_manifests: - name_template: tykio/tyk-pump-docker-pub:{{ .Tag }} image_templates: - - tykio/tyk-pump-docker-pub:{{ .Tag }}-amd64 - - tykio/tyk-pump-docker-pub:{{ .Tag }}-arm64 + - tykio/tyk-pump-docker-pub:{{ .Tag }}-amd64 + - tykio/tyk-pump-docker-pub:{{ .Tag }}-arm64 - name_template: tykio/tyk-pump-docker-pub:v{{ .Major }}.{{ .Minor }}{{.Prerelease}} image_templates: - - tykio/tyk-pump-docker-pub:{{ .Tag }}-amd64 - - tykio/tyk-pump-docker-pub:{{ .Tag }}-arm64 + - tykio/tyk-pump-docker-pub:{{ .Tag }}-amd64 + - tykio/tyk-pump-docker-pub:{{ .Tag }}-arm64 + - name_template: docker.tyk.io/tyk-pump/tyk-pump:{{ .Tag }} + image_templates: + - docker.tyk.io/tyk-pump/tyk-pump:{{ .Tag }}-amd64 + - docker.tyk.io/tyk-pump/tyk-pump:{{ .Tag }}-arm64 nfpms: - id: std @@ -79,6 +83,7 @@ nfpms: maintainer: "Tyk " description: Tyk Analytics Pump to move analytics data from Redis to any supported back end (multiple back ends can be written to at once). package_name: tyk-pump + file_name_template: "{{ .ConventionalFileName }}" builds: - std formats: @@ -103,17 +108,6 @@ nfpms: postinstall: "ci/install/post_install.sh" postremove: "ci/install/post_remove.sh" bindir: "/opt/tyk-pump" - overrides: - rpm: - file_name_template: '{{ .PackageName }}-{{ replace .Version "-" "~" }}-1.{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}{{ if .Mips }}_{{ .Mips }}{{ end }}' - replacements: - amd64: x86_64 - arm: aarch64 - arm64: aarch64 - deb: - file_name_template: '{{ .PackageName }}_{{ replace .Version "-" "~" }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}{{ if .Mips }}_{{ .Mips }}{{ end }}' - replacements: - arm: arm64 rpm: scripts: posttrans: ci/install/post_trans.sh @@ -133,17 +127,6 @@ publishers: - DEBVERS={{ .Env.DEBVERS }} cmd: /pc.sh {{ .ArtifactPath }} -archives: -- id: std-linux - builds: - - std - files: - - README.md - - "ci/install/*" - - "LICENSE.md" - - CHANGELOG.md - - pump.example.conf - checksum: disable: true diff --git a/ci/install/before_install.sh b/ci/install/before_install.sh index bc736fbbb..6b30c85a1 100755 --- a/ci/install/before_install.sh +++ b/ci/install/before_install.sh @@ -1,7 +1,7 @@ #!/bin/bash # Generated by: gromit policy -# Generated on: Wed May 10 06:24:08 UTC 2023 +# Generated on: Wed Jul 26 08:25:03 UTC 2023 echo "Creating user and group..." GROUPNAME="tyk" diff --git a/ci/install/post_install.sh b/ci/install/post_install.sh index ce08381ca..ea03d859c 100755 --- a/ci/install/post_install.sh +++ b/ci/install/post_install.sh @@ -2,7 +2,7 @@ # Generated by: gromit policy -# Generated on: Wed May 10 06:24:08 UTC 2023 +# Generated on: Wed Jul 26 08:25:03 UTC 2023 # If "True" the install directory ownership will be changed to "tyk:tyk" change_ownership="True" diff --git a/ci/install/post_remove.sh b/ci/install/post_remove.sh index e8745fabe..016299b1f 100755 --- a/ci/install/post_remove.sh +++ b/ci/install/post_remove.sh @@ -1,7 +1,7 @@ #!/bin/sh # Generated by: gromit policy -# Generated on: Wed May 10 06:24:08 UTC 2023 +# Generated on: Wed Jul 26 08:25:03 UTC 2023 cleanRemove() { diff --git a/ci/install/post_trans.sh b/ci/install/post_trans.sh index 213b898a3..0ccb92d6d 100644 --- a/ci/install/post_trans.sh +++ b/ci/install/post_trans.sh @@ -1,7 +1,7 @@ #!/bin/sh # Generated by: gromit policy -# Generated on: Wed May 10 06:24:08 UTC 2023 +# Generated on: Wed Jul 26 08:25:03 UTC 2023 if command -V systemctl >/dev/null 2>&1; then if [ ! -f /lib/systemd/system/tyk-pump.service ]; then From 01176880ca5485320bf33a465de01063a265dc5a Mon Sep 17 00:00:00 2001 From: Matias <83959431+mativm02@users.noreply.github.com> Date: Wed, 26 Jul 2023 09:15:25 -0300 Subject: [PATCH 094/102] [TT-9464] Updating storage to v1.0.7 (#689) * Updating storage to v1.0.7 * go mod tidy --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 6653bb6b9..1e6c52ea7 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( github.com/TykTechnologies/gorpc v0.0.0-20210624160652-fe65bda0ccb9 github.com/TykTechnologies/graphql-go-tools v1.6.2-0.20230320143102-7a16078ce517 github.com/TykTechnologies/murmur3 v0.0.0-20230310161213-aad17efd5632 - github.com/TykTechnologies/storage v1.0.5 + github.com/TykTechnologies/storage v1.0.7 github.com/aws/aws-sdk-go-v2 v1.16.14 github.com/aws/aws-sdk-go-v2/config v1.9.0 github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.9.0 diff --git a/go.sum b/go.sum index 0bb908c71..9e21f1ae5 100644 --- a/go.sum +++ b/go.sum @@ -51,8 +51,8 @@ github.com/TykTechnologies/graphql-go-tools v1.6.2-0.20230320143102-7a16078ce517 github.com/TykTechnologies/graphql-go-tools v1.6.2-0.20230320143102-7a16078ce517/go.mod h1:ZiFZcrue3+n2mHH+KLHRipbYVULkgy3Myko5S7IIs74= github.com/TykTechnologies/murmur3 v0.0.0-20230310161213-aad17efd5632 h1:T5NWziFusj8au5nxAqMMh/bZyX9CAyYnBkaMSsfH6BA= github.com/TykTechnologies/murmur3 v0.0.0-20230310161213-aad17efd5632/go.mod h1:UsPYgOFBpNzDXLEti7MKOwHLpVSqdzuNGkVFPspQmnQ= -github.com/TykTechnologies/storage v1.0.5 h1:lfMljPueySAW7Mpc70g1/qC5n2LKNcKgQs+Xw30apP8= -github.com/TykTechnologies/storage v1.0.5/go.mod h1:+0S3KuNlLGBTMTSFREuZFm315zzXjuuCO4QSAPy+d3M= +github.com/TykTechnologies/storage v1.0.7 h1:lPkqSI5w15Eysh7JuOPCk/ajhgC4ibdHRhuiuoQQv2w= +github.com/TykTechnologies/storage v1.0.7/go.mod h1:+0S3KuNlLGBTMTSFREuZFm315zzXjuuCO4QSAPy+d3M= github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/agnivade/levenshtein v1.1.1 h1:QY8M92nrzkmr798gCo3kmMyqXFzdQVpxLlGPRBij0P8= github.com/agnivade/levenshtein v1.1.1/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo= From b538a89fd515d9f417d2d13e659399908982e8f2 Mon Sep 17 00:00:00 2001 From: Tomas Buchaillot Date: Mon, 31 Jul 2023 16:35:12 +0200 Subject: [PATCH 095/102] Fix pump CI (#695) * removing go 1.18 from CI * trying test-params gha branch * trying gha improvements * using main branch on workflows --- .github/workflows/linter.yaml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/linter.yaml b/.github/workflows/linter.yaml index 633effb19..9b15717a7 100644 --- a/.github/workflows/linter.yaml +++ b/.github/workflows/linter.yaml @@ -22,17 +22,19 @@ jobs: strategy: fail-fast: false matrix: - go: [1.18, 1.19] + go: [1.19] with: go: ${{ matrix.go }} redis: 5 mongo: 4.2 + test-options: ""-count=1 -failfast -v"" golangci: needs: gotest if: ${{ always() }} uses: TykTechnologies/github-actions/.github/workflows/golangci.yaml@main with: main_branch: "master" + go_version: 1.19 sonarcloud: if: ${{ always() }} needs: [gotest, golangci] From 91dd8a0143d1fbb005cd2908cecc91c36cc74970 Mon Sep 17 00:00:00 2001 From: Matias <83959431+mativm02@users.noreply.github.com> Date: Tue, 1 Aug 2023 09:40:24 -0300 Subject: [PATCH 096/102] [TT-9360] Changing Timeout from time.Duration to int (#696) * Updating storage to v1.0.7 * Changing Timeout from time.Duration to int * changing timeout type to interface * linting * handling numbers in format string --- pumps/kafka.go | 33 ++++++++++++++++++++++++++++----- 1 file changed, 28 insertions(+), 5 deletions(-) diff --git a/pumps/kafka.go b/pumps/kafka.go index a133068a2..ad2756a99 100644 --- a/pumps/kafka.go +++ b/pumps/kafka.go @@ -4,6 +4,8 @@ import ( "context" "crypto/tls" "encoding/json" + "os" + "strconv" "time" "github.com/TykTechnologies/tyk-pump/analytics" @@ -39,8 +41,8 @@ type KafkaConf struct { ClientId string `json:"client_id" mapstructure:"client_id"` // The topic that the writer will produce messages to. Topic string `json:"topic" mapstructure:"topic"` - // Timeout is the maximum amount of time will wait for a connect or write to complete. - Timeout time.Duration `json:"timeout" mapstructure:"timeout"` + // Timeout is the maximum amount of seconds to wait for a connect or write to complete. + Timeout interface{} `json:"timeout" mapstructure:"timeout"` // Enable "github.com/golang/snappy" codec to be used to compress Kafka messages. By default // is `false`. Compressed bool `json:"compressed" mapstructure:"compressed"` @@ -90,6 +92,10 @@ func (k *KafkaPump) Init(config interface{}) error { } processPumpEnvVars(k, k.log, k.kafkaConf, kafkaDefaultENV) + // This interface field is not reached by envconfig library, that's why we manually check it + if os.Getenv("TYK_PMP_PUMPS_KAFKA_META_TIMEOUT") != "" { + k.kafkaConf.Timeout = os.Getenv("TYK_PMP_PUMPS_KAFKA_META_TIMEOUT") + } var tlsConfig *tls.Config if k.kafkaConf.UseSSL { @@ -137,9 +143,26 @@ func (k *KafkaPump) Init(config interface{}) error { k.log.WithField("SASL-Mechanism", k.kafkaConf.SASLMechanism).Warn("Tyk pump doesn't support this SASL mechanism.") } + // Timeout is an interface type to allow both time.Duration and float values + var timeout time.Duration + switch v := k.kafkaConf.Timeout.(type) { + case string: + timeout, err = time.ParseDuration(v) // i.e: when timeout is '1s' + if err != nil { + floatValue, floatErr := strconv.ParseFloat(v, 64) // i.e: when timeout is '1' + if floatErr != nil { + k.log.Fatal("Failed to parse timeout: ", floatErr) + } else { + timeout = time.Duration(floatValue * float64(time.Second)) + } + } + case float64: + timeout = time.Duration(v) * time.Second // i.e: when timeout is 1 + } + //Kafka writer connection config dialer := &kafka.Dialer{ - Timeout: k.kafkaConf.Timeout * time.Second, + Timeout: timeout, ClientID: k.kafkaConf.ClientId, TLS: tlsConfig, SASLMechanism: mechanism, @@ -149,8 +172,8 @@ func (k *KafkaPump) Init(config interface{}) error { k.writerConfig.Topic = k.kafkaConf.Topic k.writerConfig.Balancer = &kafka.LeastBytes{} k.writerConfig.Dialer = dialer - k.writerConfig.WriteTimeout = k.kafkaConf.Timeout * time.Second - k.writerConfig.ReadTimeout = k.kafkaConf.Timeout * time.Second + k.writerConfig.WriteTimeout = timeout + k.writerConfig.ReadTimeout = timeout if k.kafkaConf.Compressed { k.writerConfig.CompressionCodec = snappy.NewCompressionCodec() } From a1fad74e91448d2fc17b931cbb22eca2bbbb5c87 Mon Sep 17 00:00:00 2001 From: Matias <83959431+mativm02@users.noreply.github.com> Date: Tue, 8 Aug 2023 09:28:05 -0300 Subject: [PATCH 097/102] [TT-9464] Updating storage to v1.0.8 (#698) * Updating storage to v1.0.7 * go mod tidy * Updating storage to v1.0.8 --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 1e6c52ea7..668bf524b 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( github.com/TykTechnologies/gorpc v0.0.0-20210624160652-fe65bda0ccb9 github.com/TykTechnologies/graphql-go-tools v1.6.2-0.20230320143102-7a16078ce517 github.com/TykTechnologies/murmur3 v0.0.0-20230310161213-aad17efd5632 - github.com/TykTechnologies/storage v1.0.7 + github.com/TykTechnologies/storage v1.0.8 github.com/aws/aws-sdk-go-v2 v1.16.14 github.com/aws/aws-sdk-go-v2/config v1.9.0 github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.9.0 diff --git a/go.sum b/go.sum index 9e21f1ae5..d2b7328b1 100644 --- a/go.sum +++ b/go.sum @@ -51,8 +51,8 @@ github.com/TykTechnologies/graphql-go-tools v1.6.2-0.20230320143102-7a16078ce517 github.com/TykTechnologies/graphql-go-tools v1.6.2-0.20230320143102-7a16078ce517/go.mod h1:ZiFZcrue3+n2mHH+KLHRipbYVULkgy3Myko5S7IIs74= github.com/TykTechnologies/murmur3 v0.0.0-20230310161213-aad17efd5632 h1:T5NWziFusj8au5nxAqMMh/bZyX9CAyYnBkaMSsfH6BA= github.com/TykTechnologies/murmur3 v0.0.0-20230310161213-aad17efd5632/go.mod h1:UsPYgOFBpNzDXLEti7MKOwHLpVSqdzuNGkVFPspQmnQ= -github.com/TykTechnologies/storage v1.0.7 h1:lPkqSI5w15Eysh7JuOPCk/ajhgC4ibdHRhuiuoQQv2w= -github.com/TykTechnologies/storage v1.0.7/go.mod h1:+0S3KuNlLGBTMTSFREuZFm315zzXjuuCO4QSAPy+d3M= +github.com/TykTechnologies/storage v1.0.8 h1:MBs6hk5oLOmr2qK5/rl+dYO6iDMez6u3QkwOCL6K8n8= +github.com/TykTechnologies/storage v1.0.8/go.mod h1:+0S3KuNlLGBTMTSFREuZFm315zzXjuuCO4QSAPy+d3M= github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/agnivade/levenshtein v1.1.1 h1:QY8M92nrzkmr798gCo3kmMyqXFzdQVpxLlGPRBij0P8= github.com/agnivade/levenshtein v1.1.1/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo= From 3bf1f8539f147adce0c5c879a852d78ae4bcabef Mon Sep 17 00:00:00 2001 From: Kofo Okesola Date: Wed, 23 Aug 2023 08:58:32 +0100 Subject: [PATCH 098/102] [TT-9855]: fix index creation error on graph sql pump creation (#715) * fix index creation error * fix ci * fix lint --- analytics/graph_record.go | 14 +++++++++++++- pumps/graph_sql.go | 1 + 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/analytics/graph_record.go b/analytics/graph_record.go index a867e7660..160353713 100644 --- a/analytics/graph_record.go +++ b/analytics/graph_record.go @@ -20,6 +20,13 @@ import ( "github.com/TykTechnologies/storage/persistent/model" ) +// GraphSQLTableName should be defined before SQL migration is called on the GraphRecord +// the reason this approach is used to define the table name is due to gorm's inability to +// read values from the fields of the GraphRecord/AnalyticsRecord struct when it is migrating, due to that +// a single static value is going to be returned as TableName and it will be used as the prefix for index/relationship creation no matter the +// value passed to db.Table() +var GraphSQLTableName string + type GraphRecord struct { Types map[string][]string `gorm:"types"` @@ -32,8 +39,13 @@ type GraphRecord struct { HasErrors bool `gorm:"has_errors"` } +// TableName is used by both the sql orm and mongo driver the table name and collection name used for operations on this model +// the conditional return is to ensure the right value is used for both the sql and mongo operations func (g *GraphRecord) TableName() string { - return g.AnalyticsRecord.TableName() + if GraphSQLTableName == "" { + return g.AnalyticsRecord.TableName() + } + return GraphSQLTableName } // GetObjectID is a dummy function to satisfy the interface diff --git a/pumps/graph_sql.go b/pumps/graph_sql.go index 89b415ba2..0253f35eb 100644 --- a/pumps/graph_sql.go +++ b/pumps/graph_sql.go @@ -87,6 +87,7 @@ func (g *GraphSQLPump) Init(conf interface{}) error { if name := g.Conf.TableName; name != "" { g.tableName = name } + analytics.GraphSQLTableName = g.tableName if !g.Conf.TableSharding { if err := g.db.Table(g.tableName).AutoMigrate(&analytics.GraphRecord{}); err != nil { g.log.WithError(err).Error("error migrating graph analytics table") From 69f5f4a7d1b7edc436c9e943c404f5242cedc363 Mon Sep 17 00:00:00 2001 From: Tomas Buchaillot Date: Wed, 23 Aug 2023 15:54:53 +0200 Subject: [PATCH 099/102] TT-9873 Fix prometheus tracking path (#716) * Fix prometheus tracking path * suggested fixes --- pumps/prometheus.go | 13 +++++++-- pumps/prometheus_test.go | 59 +++++++++++++++++++++++++++++++++++++++- 2 files changed, 69 insertions(+), 3 deletions(-) diff --git a/pumps/prometheus.go b/pumps/prometheus.go index b1b64adec..8613685a5 100644 --- a/pumps/prometheus.go +++ b/pumps/prometheus.go @@ -42,6 +42,9 @@ type PrometheusConf struct { AggregateObservations bool `json:"aggregate_observations" mapstructure:"aggregate_observations"` // Metrics to exclude from exposition. Currently, excludes only the base metrics. DisabledMetrics []string `json:"disabled_metrics" mapstructure:"disabled_metrics"` + // Specifies if it should expose aggregated metrics for all the endpoints. By default, `false` + // which means that all APIs endpoints will be counted as 'unknown' unless the API use track endpoint plugin. + TrackAllPaths bool `json:"track_all_paths" mapstructure:"track_all_paths"` // Custom Prometheus metrics. CustomMetrics CustomMetrics `json:"custom_metrics" mapstructure:"custom_metrics"` } @@ -91,8 +94,9 @@ type counterStruct struct { } const ( - counterType = "counter" - histogramType = "histogram" + counterType = "counter" + histogramType = "histogram" + prometheusUnknownPath = "unknown" ) var ( @@ -245,6 +249,11 @@ func (p *PrometheusPump) WriteData(ctx context.Context, data []interface{}) erro default: } record := item.(analytics.AnalyticsRecord) + + if !(p.conf.TrackAllPaths || record.TrackPath) { + record.Path = prometheusUnknownPath + } + // we loop through all the metrics available. for _, metric := range p.allMetrics { if metric.enabled { diff --git a/pumps/prometheus_test.go b/pumps/prometheus_test.go index 043fa5f55..383b1c61a 100644 --- a/pumps/prometheus_test.go +++ b/pumps/prometheus_test.go @@ -352,6 +352,7 @@ func TestPrometheusCounterMetric(t *testing.T) { analyticsRecords []analytics.AnalyticsRecord expectedMetricsAmount int expectedMetrics map[string]counterStruct + trackAllPaths bool }{ { testName: "HTTP status codes per API", @@ -375,7 +376,8 @@ func TestPrometheusCounterMetric(t *testing.T) { }, }, { - testName: "HTTP status codes per API path and method", + testName: "HTTP status codes per API path and method - trackign all paths", + trackAllPaths: true, metric: &PrometheusMetric{ Name: "tyk_http_status_per_path", Help: "HTTP status codes per API path and method", @@ -399,6 +401,57 @@ func TestPrometheusCounterMetric(t *testing.T) { "200--api_2--test--GET": {labelValues: []string{"200", "api_2", "test", "GET"}, count: 1}, }, }, + { + testName: "HTTP status codes per API path and method - tracking some paths", + trackAllPaths: false, + metric: &PrometheusMetric{ + Name: "tyk_http_status_per_path", + Help: "HTTP status codes per API path and method", + MetricType: counterType, + Labels: []string{"code", "api", "path", "method"}, + }, + analyticsRecords: []analytics.AnalyticsRecord{ + {APIID: "api_1", ResponseCode: 500, Path: "test", Method: "GET", TrackPath: true}, + {APIID: "api_1", ResponseCode: 500, Path: "test2", Method: "GET"}, + {APIID: "api_1", ResponseCode: 500, Path: "test", Method: "GET", TrackPath: true}, + {APIID: "api_1", ResponseCode: 500, Path: "test", Method: "POST", TrackPath: true}, + {APIID: "api_1", ResponseCode: 200, Path: "test2", Method: "GET"}, + {APIID: "api_2", ResponseCode: 200, Path: "test", Method: "GET"}, + }, + expectedMetricsAmount: 5, + expectedMetrics: map[string]counterStruct{ + "500--api_1--test--GET": {labelValues: []string{"500", "api_1", "test", "GET"}, count: 2}, + "500--api_1--test--POST": {labelValues: []string{"500", "api_1", "test", "POST"}, count: 1}, + "500--api_1--unknown--GET": {labelValues: []string{"500", "api_1", "unknown", "GET"}, count: 1}, + "200--api_1--unknown--GET": {labelValues: []string{"200", "api_1", "unknown", "GET"}, count: 1}, + "200--api_2--unknown--GET": {labelValues: []string{"200", "api_2", "unknown", "GET"}, count: 1}, + }, + }, + { + testName: "HTTP status codes per API path and method - not tracking paths", + trackAllPaths: false, + metric: &PrometheusMetric{ + Name: "tyk_http_status_per_path", + Help: "HTTP status codes per API path and method", + MetricType: counterType, + Labels: []string{"code", "api", "path", "method"}, + }, + analyticsRecords: []analytics.AnalyticsRecord{ + {APIID: "api_1", ResponseCode: 500, Path: "test", Method: "GET"}, + {APIID: "api_1", ResponseCode: 500, Path: "test2", Method: "GET"}, + {APIID: "api_1", ResponseCode: 500, Path: "test", Method: "GET"}, + {APIID: "api_1", ResponseCode: 500, Path: "test", Method: "POST"}, + {APIID: "api_1", ResponseCode: 200, Path: "test2", Method: "GET"}, + {APIID: "api_2", ResponseCode: 200, Path: "test", Method: "GET"}, + }, + expectedMetricsAmount: 4, + expectedMetrics: map[string]counterStruct{ + "500--api_1--unknown--GET": {labelValues: []string{"500", "api_1", "unknown", "GET"}, count: 3}, + "500--api_1--unknown--POST": {labelValues: []string{"500", "api_1", "unknown", "POST"}, count: 1}, + "200--api_1--unknown--GET": {labelValues: []string{"200", "api_1", "unknown", "GET"}, count: 1}, + "200--api_2--unknown--GET": {labelValues: []string{"200", "api_2", "unknown", "GET"}, count: 1}, + }, + }, { testName: "HTTP status codes per API key", metric: &PrometheusMetric{ @@ -474,6 +527,10 @@ func TestPrometheusCounterMetric(t *testing.T) { assert.Nil(t, err) defer prometheus.Unregister(tc.metric.counterVec) for _, record := range tc.analyticsRecords { + if !(tc.trackAllPaths || record.TrackPath) { + record.Path = "unknown" + } + labelValues := tc.metric.GetLabelsValues(record) assert.Equal(t, len(tc.metric.Labels), len(labelValues)) From 657436f2970628e1aaeac5f01c2887f4e21f044f Mon Sep 17 00:00:00 2001 From: Tomas Buchaillot Date: Tue, 29 Aug 2023 10:21:57 +0200 Subject: [PATCH 100/102] [TT-9468] New SQL Aggregate indexes (#694) * fixing SQL Aggregate indexes * TT-9490 change name from APISchema to ApiSchema (#681) * change name from APISchema to ApiSchema * update regex to blur mongo credentials (#686) * updated prometheus to 1.16 (#688) * Auto generated from templates by gromit (#692) Co-authored-by: Gromit * [TT-9464] Updating storage to v1.0.7 (#689) * Updating storage to v1.0.7 * go mod tidy * fixing SQL Aggregate indexes * removing unused func * linting * adding waiting time for sqlite * using background only on postgres * adding comment * updating comments * restoring linter.yaml * adding index created on non existing table, not background scenario * adding dbType var from config * taking care of side scenario * controlling bg chan * adding omit_index_creation --------- Co-authored-by: Sredny M Co-authored-by: Alok G Singh Co-authored-by: Gromit Co-authored-by: Matias <83959431+mativm02@users.noreply.github.com> --- analytics/aggregate.go | 8 +- go.mod | 14 +-- go.sum | 65 ++++++------ pumps/sql_aggregate.go | 104 +++++++++++++++++++- pumps/sql_aggregate_test.go | 190 +++++++++++++++++++++++++++++++++++- 5 files changed, 331 insertions(+), 50 deletions(-) diff --git a/analytics/aggregate.go b/analytics/aggregate.go index c9ce8ffe3..bea95bd56 100644 --- a/analytics/aggregate.go +++ b/analytics/aggregate.go @@ -139,10 +139,10 @@ type SQLAnalyticsRecordAggregate struct { Counter `json:"counter" gorm:"embedded"` - TimeStamp int64 `json:"timestamp" gorm:"index:dimension, priority:1"` - OrgID string `json:"org_id" gorm:"index:dimension, priority:2"` - Dimension string `json:"dimension" gorm:"index:dimension, priority:3"` - DimensionValue string `json:"dimension_value" gorm:"index:dimension, priority:4"` + TimeStamp int64 `json:"timestamp"` + OrgID string `json:"org_id"` + Dimension string `json:"dimension"` + DimensionValue string `json:"dimension_value"` Code `json:"code" gorm:"embedded"` } diff --git a/go.mod b/go.mod index 668bf524b..774492a02 100644 --- a/go.mod +++ b/go.mod @@ -16,7 +16,7 @@ require ( github.com/fatih/structs v1.1.0 github.com/go-redis/redis/v8 v8.3.1 github.com/gocraft/health v0.0.0-20170925182251-8675af27fef0 - github.com/gofrs/uuid v3.3.0+incompatible + github.com/gofrs/uuid v4.0.0+incompatible github.com/golang/protobuf v1.5.3 github.com/google/go-cmp v0.5.9 github.com/gorilla/mux v1.8.0 @@ -45,9 +45,9 @@ require ( gopkg.in/olivere/elastic.v6 v6.2.31 gopkg.in/vmihailenco/msgpack.v2 v2.9.1 gorm.io/driver/mysql v1.0.3 - gorm.io/driver/postgres v1.0.5 + gorm.io/driver/postgres v1.2.0 gorm.io/driver/sqlite v1.1.3 - gorm.io/gorm v1.21.10 + gorm.io/gorm v1.21.16 ) require ( @@ -87,13 +87,13 @@ require ( github.com/imdario/mergo v0.3.12 // indirect github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 // indirect github.com/jackc/chunkreader/v2 v2.0.1 // indirect - github.com/jackc/pgconn v1.7.0 // indirect + github.com/jackc/pgconn v1.10.0 // indirect github.com/jackc/pgio v1.0.0 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect - github.com/jackc/pgproto3/v2 v2.0.5 // indirect + github.com/jackc/pgproto3/v2 v2.1.1 // indirect github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b // indirect - github.com/jackc/pgtype v1.5.0 // indirect - github.com/jackc/pgx/v4 v4.9.0 // indirect + github.com/jackc/pgtype v1.8.1 // indirect + github.com/jackc/pgx/v4 v4.13.0 // indirect github.com/jehiah/go-strftime v0.0.0-20151206194810-2efbe75097a5 // indirect github.com/jensneuse/abstractlogger v0.0.4 // indirect github.com/jensneuse/byte-template v0.0.0-20200214152254-4f3cf06e5c68 // indirect diff --git a/go.sum b/go.sum index d2b7328b1..9cc9c15b9 100644 --- a/go.sum +++ b/go.sum @@ -33,6 +33,8 @@ github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy86 github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= +github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60= github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= @@ -202,8 +204,10 @@ github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJ github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI= github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= @@ -236,9 +240,9 @@ github.com/gocraft/health v0.0.0-20170925182251-8675af27fef0 h1:pKjeDsx7HGGbjr7V github.com/gocraft/health v0.0.0-20170925182251-8675af27fef0/go.mod h1:rWibcVfwbUxi/QXW84U7vNTcIcZFd6miwbt8ritxh/Y= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= -github.com/gofrs/uuid v3.3.0+incompatible h1:8K4tyRfvU1CYPgJsveYFQMhpFd/wXNM7iK6rR7UHz84= github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw= +github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= @@ -353,15 +357,17 @@ github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgO github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= -github.com/jackc/pgconn v1.4.0/go.mod h1:Y2O3ZDF0q4mMacyWV3AstPJpeHXWGEetiFttmq5lahk= -github.com/jackc/pgconn v1.5.0/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI= -github.com/jackc/pgconn v1.5.1-0.20200601181101-fa742c524853/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI= -github.com/jackc/pgconn v1.7.0 h1:pwjzcYyfmz/HQOQlENvG1OcDqauTGaqlVahq934F0/U= -github.com/jackc/pgconn v1.7.0/go.mod h1:sF/lPpNEMEOp+IYhyQGdAvrG20gWf6A1tKlr0v7JMeA= +github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= +github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY= +github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= +github.com/jackc/pgconn v1.10.0 h1:4EYhlDVEMsJ30nNj0mmgwIUXoq7e9sMJrVC2ED6QlCU= +github.com/jackc/pgconn v1.10.0/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= -github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2 h1:JVX6jT/XfzNqIjye4717ITLaNwV9mWbJx0dLCpcRzdA= github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= +github.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c= +github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65 h1:DadwsjnMwFjfWc9y5Wi/+Zz7xoE5ALHsRQlOctkOiHc= +github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= @@ -369,33 +375,26 @@ github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= -github.com/jackc/pgproto3/v2 v2.0.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgproto3/v2 v2.0.5 h1:NUbEWPmCQZbMmYlTjVoNPhc0CfnYyz2bfUAh6A5ZVJM= -github.com/jackc/pgproto3/v2 v2.0.5/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgservicefile v0.0.0-20200307190119-3430c5407db8/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= +github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.1.1 h1:7PQ/4gLoqnl87ZxL7xjO0DR5gYuviDCZxQJsUlFW1eI= +github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b h1:C8S2+VttkHFdOOCXJe+YGfa4vHYwlt4Zx+IVXQ97jYg= github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= -github.com/jackc/pgtype v1.2.0/go.mod h1:5m2OfMh1wTK7x+Fk952IDmI4nw3nPrvtQdM0ZT4WpC0= -github.com/jackc/pgtype v1.3.1-0.20200510190516-8cd94a14c75a/go.mod h1:vaogEUkALtxZMCH411K+tKzNpwzCKU+AnPzBKZ+I+Po= -github.com/jackc/pgtype v1.3.1-0.20200606141011-f6355165a91c/go.mod h1:cvk9Bgu/VzJ9/lxTO5R5sf80p0DiucVtN7ZxvaC4GmQ= -github.com/jackc/pgtype v1.5.0 h1:jzBqRk2HFG2CV4AIwgCI2PwTgm6UUoCAK2ofHHRirtc= -github.com/jackc/pgtype v1.5.0/go.mod h1:JCULISAZBFGrHaOXIIFiyfzW5VY0GRitRr8NeJsrdig= +github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM= +github.com/jackc/pgtype v1.8.1 h1:9k0IXtdJXHJbyAWQgbWr1lU+MEhPXZz6RIXxfR5oxXs= +github.com/jackc/pgtype v1.8.1/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= -github.com/jackc/pgx/v4 v4.5.0/go.mod h1:EpAKPLdnTorwmPUUsqrPxy5fphV18j9q3wrfRXgo+kA= -github.com/jackc/pgx/v4 v4.6.1-0.20200510190926-94ba730bb1e9/go.mod h1:t3/cdRQl6fOLDxqtlyhe9UWgfIi9R8+8v8GKV5TRA/o= -github.com/jackc/pgx/v4 v4.6.1-0.20200606145419-4e5062306904/go.mod h1:ZDaNWkt9sW1JMiNn0kdYBaLelIhw7Pg4qd+Vk6tw7Hg= -github.com/jackc/pgx/v4 v4.9.0 h1:6STjDqppM2ROy5p1wNDcsC7zJTjSHeuCsguZmXyzx7c= -github.com/jackc/pgx/v4 v4.9.0/go.mod h1:MNGWmViCgqbZck9ujOOBN63gK9XVGILXWCvKLGKmnms= +github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs= +github.com/jackc/pgx/v4 v4.13.0 h1:JCjhT5vmhMAf/YwBHLvrBn4OGdIQBiFG6ym8Zmdx570= +github.com/jackc/pgx/v4 v4.13.0/go.mod h1:9P4X524sErlaxj0XSGZk7s+LD0eOyu1ZDUrrpznYDF0= github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v1.1.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v1.1.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v1.1.2/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= @@ -469,7 +468,7 @@ github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgx github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.6 h1:jbk+ZieJ0D7EVGJYpL9QTz7/YW6UHbmdnZWYyK5cdBs= github.com/lib/pq v1.10.6/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lintianzhi/graylogd v0.0.0-20180503131252-dc68342f04dc h1:7f0qjuEBw/5vUrP2lyIUgAihl0A6H0E79kswNy6edeE= @@ -659,8 +658,8 @@ github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNX github.com/shirou/gopsutil v3.20.11+incompatible h1:LJr4ZQK4mPpIV5gOa4jCOKOGb4ty4DZO54I4FGqIpto= github.com/shirou/gopsutil v3.20.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= -github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc h1:jUIKcSPO9MoMJBbEoyE/RJoE8vz7Mb8AjvifMMwSyvY= -github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= @@ -796,17 +795,18 @@ golang.org/x/crypto v0.0.0-20190506204251-e1dfcc566284/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191227163750-53104e6ec876/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201112155050-0c6587e931a9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d h1:sK3txAijHtOK88l68nt020reeT1ZdKLIYetKl95FzVY= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= @@ -913,7 +913,6 @@ golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -955,6 +954,7 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= @@ -997,6 +997,7 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= @@ -1107,8 +1108,8 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gorm.io/driver/mysql v1.0.3 h1:+JKBYPfn1tygR1/of/Fh2T8iwuVwzt+PEJmKaXzMQXg= gorm.io/driver/mysql v1.0.3/go.mod h1:twGxftLBlFgNVNakL7F+P/x9oYqoymG3YYT8cAfI9oI= -gorm.io/driver/postgres v1.0.5 h1:raX6ezL/ciUmaYTvOq48jq1GE95aMC0CmxQYbxQ4Ufw= -gorm.io/driver/postgres v1.0.5/go.mod h1:qrD92UurYzNctBMVCJ8C3VQEjffEuphycXtxOudXNCA= +gorm.io/driver/postgres v1.2.0 h1:2k0EYyqii7sfWVM7yomw6a82Jt5wjuQUpWmD6fI9fGI= +gorm.io/driver/postgres v1.2.0/go.mod h1:c/8rVZUl30/ZyaQtAobsLRbBTubskhCrkWZDwZe1KfI= gorm.io/driver/sqlite v1.1.3 h1:BYfdVuZB5He/u9dt4qDpZqiqDJ6KhPqs5QUqsr/Eeuc= gorm.io/driver/sqlite v1.1.3/go.mod h1:AKDgRWk8lcSQSw+9kxCJnX/yySj8G3rdwYlU57cB45c= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/pumps/sql_aggregate.go b/pumps/sql_aggregate.go index 5ed7797b9..ec4935620 100644 --- a/pumps/sql_aggregate.go +++ b/pumps/sql_aggregate.go @@ -29,6 +29,8 @@ type SQLAggregatePumpConf struct { // Determines if the aggregations should be made per minute instead of per hour. StoreAnalyticsPerMinute bool `json:"store_analytics_per_minute" mapstructure:"store_analytics_per_minute"` IgnoreAggregationsList []string `json:"ignore_aggregations" mapstructure:"ignore_aggregations"` + // Set to true to disable the default tyk index creation. + OmitIndexCreation bool `json:"omit_index_creation" mapstructure:"omit_index_creation"` } type SQLAggregatePump struct { @@ -37,6 +39,9 @@ type SQLAggregatePump struct { db *gorm.DB dbType string dialect gorm.Dialector + + // this channel is used to signal that the background index creation has finished - this is used for testing + backgroundIndexCreated chan bool } var ( @@ -44,6 +49,11 @@ var ( SQLAggregateDefaultENV = PUMPS_ENV_PREFIX + "_SQLAGGREGATE" + PUMPS_ENV_META_PREFIX ) +const ( + oldAggregatedIndexName = "dimension" + newAggregatedIndexName = "idx_dimension" +) + func (c *SQLAggregatePump) New() Pump { newPump := SQLAggregatePump{} return &newPump @@ -97,6 +107,8 @@ func (c *SQLAggregatePump) Init(conf interface{}) error { c.log.Error(errDialect) return errDialect } + c.dbType = c.SQLConf.Type + db, err := gorm.Open(dialect, &gorm.Config{ AutoEmbedd: true, UseJSONTags: true, @@ -106,9 +118,27 @@ func (c *SQLAggregatePump) Init(conf interface{}) error { c.log.Error(err) return err } + c.db = db + if !c.SQLConf.TableSharding { - c.db.Table(analytics.AggregateSQLTable).AutoMigrate(&analytics.SQLAnalyticsRecordAggregate{}) + // if table doesn't exist, create it + if err := c.ensureTable(analytics.AggregateSQLTable); err != nil { + return err + } + + // we can run the index creation in background only for postgres since it supports CONCURRENTLY + shouldRunOnBackground := false + if c.dbType == "postgres" { + shouldRunOnBackground = true + c.backgroundIndexCreated = make(chan bool, 1) + } + + // if index doesn't exist, create it + if err := c.ensureIndex(analytics.AggregateSQLTable, shouldRunOnBackground); err != nil { + c.log.Error(err) + return err + } } if c.SQLConf.BatchSize == 0 { @@ -119,6 +149,71 @@ func (c *SQLAggregatePump) Init(conf interface{}) error { return nil } +// ensureIndex creates the new optimized index for tyk_aggregated. +// it uses CONCURRENTLY to avoid locking the table for a long time - postgresql.org/docs/current/sql-createindex.html#SQL-CREATEINDEX-CONCURRENTLY +// if background is true, it will run the index creation in a goroutine +// if not, it will block until it finishes +func (c *SQLAggregatePump) ensureIndex(tableName string, background bool) error { + if c.SQLConf.OmitIndexCreation { + c.log.Info("omit_index_creation set to true, omitting index creation..") + return nil + } + + if !c.db.Migrator().HasIndex(tableName, newAggregatedIndexName) { + createIndexFn := func(c *SQLAggregatePump) error { + option := "" + if c.dbType == "postgres" { + option = "CONCURRENTLY" + } + + err := c.db.Table(tableName).Exec(fmt.Sprintf("CREATE INDEX %s IF NOT EXISTS %s ON %s (dimension, timestamp, org_id, dimension_value)", option, newAggregatedIndexName, tableName)).Error + if err != nil { + c.log.Errorf("error creating index for table %s : %s", tableName, err.Error()) + return err + } + + if background { + c.backgroundIndexCreated <- true + } + + c.log.Info("Index ", newAggregatedIndexName, " for table ", tableName, " created successfully") + + return nil + } + + if background { + c.log.Info("Creating index for table ", tableName, " on background...") + + go func(c *SQLAggregatePump) { + if err := createIndexFn(c); err != nil { + c.log.Error(err) + } + }(c) + + return nil + } + + c.log.Info("Creating index for table ", tableName, "...") + return createIndexFn(c) + } + c.log.Info(newAggregatedIndexName, " already exists.") + + return nil +} + +// ensureTable creates the table if it doesn't exist +func (c *SQLAggregatePump) ensureTable(tableName string) error { + if !c.db.Migrator().HasTable(tableName) { + c.db = c.db.Table(tableName) + + if err := c.db.Migrator().CreateTable(&analytics.SQLAnalyticsRecordAggregate{}); err != nil { + c.log.Error("error creating table", err) + return err + } + } + return nil +} + // WriteData aggregates and writes the passed data to SQL database. When table sharding is enabled, startIndex and endIndex // are found by checking timestamp of the records. The main for loop iterates and finds the index where a new day starts. // Then, the data is passed to AggregateData function and written to database day by day on different tables. However, @@ -155,8 +250,11 @@ func (c *SQLAggregatePump) WriteData(ctx context.Context, data []interface{}) er table = analytics.AggregateSQLTable + "_" + recDate c.db = c.db.Table(table) - if !c.db.Migrator().HasTable(table) { - c.db.AutoMigrate(&analytics.SQLAnalyticsRecordAggregate{}) + if errTable := c.ensureTable(table); errTable != nil { + return errTable + } + if err := c.ensureIndex(table, false); err != nil { + return err } } else { i = dataLen // write all records at once for non-sharded case, stop for loop after 1 iteration diff --git a/pumps/sql_aggregate_test.go b/pumps/sql_aggregate_test.go index a3da85333..48dc1b2aa 100644 --- a/pumps/sql_aggregate_test.go +++ b/pumps/sql_aggregate_test.go @@ -2,12 +2,15 @@ package pumps import ( "context" + "errors" "net/http" "testing" "time" "github.com/TykTechnologies/tyk-pump/analytics" "github.com/stretchr/testify/assert" + "gorm.io/gorm" + "gorm.io/gorm/logger" ) func TestSQLAggregateInit(t *testing.T) { @@ -26,6 +29,9 @@ func TestSQLAggregateInit(t *testing.T) { assert.NotNil(t, pmp.db) assert.Equal(t, "sqlite", pmp.db.Dialector.Name()) + assert.Equal(t, true, pmp.db.Migrator().HasTable(analytics.AggregateSQLTable)) + + assert.Equal(t, true, pmp.db.Migrator().HasIndex(analytics.AggregateSQLTable, newAggregatedIndexName)) // Checking with invalid type cfg["type"] = "invalid" @@ -33,7 +39,6 @@ func TestSQLAggregateInit(t *testing.T) { invalidDialectErr := pmp2.Init(cfg) assert.NotNil(t, invalidDialectErr) // TODO check how to test postgres connection - it's going to requiere to have some postgres up - } func TestSQLAggregateWriteData_Sharded(t *testing.T) { @@ -47,6 +52,8 @@ func TestSQLAggregateWriteData_Sharded(t *testing.T) { t.Fatal("SQL Pump Aggregate couldn't be initialized with err: ", err) } + // wait until the index is created for sqlite to avoid locking + keys := make([]interface{}, 8) now := time.Now() nowPlus1 := time.Now().AddDate(0, 0, 1) @@ -100,7 +107,7 @@ func TestSQLAggregateWriteData_Sharded(t *testing.T) { } func TestSQLAggregateWriteData(t *testing.T) { - pmp := SQLAggregatePump{} + pmp := &SQLAggregatePump{} cfg := make(map[string]interface{}) cfg["type"] = "sqlite" cfg["batch_size"] = 2000 @@ -113,6 +120,9 @@ func TestSQLAggregateWriteData(t *testing.T) { pmp.db.Migrator().DropTable(analytics.AggregateSQLTable) }(table) + err = pmp.ensureIndex(analytics.AggregateSQLTable, false) + assert.Nil(t, err) + now := time.Now() nowPlus1 := time.Now().Add(1 * time.Hour) @@ -270,7 +280,7 @@ func TestSQLAggregateWriteDataValues(t *testing.T) { // Configure and Initialise pump first dbRecords := []analytics.SQLAnalyticsRecordAggregate{} - pmp := SQLAggregatePump{} + pmp := &SQLAggregatePump{} cfg := make(map[string]interface{}) cfg["type"] = "sqlite" cfg["batch_size"] = 1 @@ -279,7 +289,7 @@ func TestSQLAggregateWriteDataValues(t *testing.T) { if err != nil { t.Fatal("SQL Pump Aggregate couldn't be initialized with err: ", err) } - defer func(pmp SQLAggregatePump) { + defer func(pmp *SQLAggregatePump) { err := pmp.db.Migrator().DropTable(analytics.AggregateSQLTable) if err != nil { t.Error(err) @@ -326,3 +336,175 @@ func TestDecodeRequestAndDecodeResponseSQLAggregate(t *testing.T) { assert.False(t, newPump.GetDecodedRequest()) assert.False(t, newPump.GetDecodedResponse()) } + +func TestEnsureIndex(t *testing.T) { + //nolint:govet + tcs := []struct { + testName string + givenTableName string + expectedErr error + pmpSetupFn func(tableName string) *SQLAggregatePump + givenRunInBackground bool + shouldHaveIndex bool + }{ + { + testName: "index created correctly, not background", + pmpSetupFn: func(tableName string) *SQLAggregatePump { + pmp := &SQLAggregatePump{} + cfg := &SQLAggregatePumpConf{} + cfg.Type = "sqlite" + cfg.ConnectionString = "" + pmp.SQLConf = cfg + + pmp.log = log.WithField("prefix", "sql-aggregate-pump") + dialect, errDialect := Dialect(&pmp.SQLConf.SQLConf) + if errDialect != nil { + return nil + } + db, err := gorm.Open(dialect, &gorm.Config{ + AutoEmbedd: true, + UseJSONTags: true, + Logger: logger.Default.LogMode(logger.Info), + }) + if err != nil { + return nil + } + pmp.db = db + + if err := pmp.ensureTable(tableName); err != nil { + return nil + } + + return pmp + }, + givenTableName: "test", + givenRunInBackground: false, + expectedErr: nil, + shouldHaveIndex: true, + }, + { + testName: "index created correctly, background", + pmpSetupFn: func(tableName string) *SQLAggregatePump { + pmp := &SQLAggregatePump{} + cfg := &SQLAggregatePumpConf{} + cfg.Type = "sqlite" + cfg.ConnectionString = "" + pmp.SQLConf = cfg + + pmp.log = log.WithField("prefix", "sql-aggregate-pump") + dialect, errDialect := Dialect(&pmp.SQLConf.SQLConf) + if errDialect != nil { + return nil + } + db, err := gorm.Open(dialect, &gorm.Config{ + AutoEmbedd: true, + UseJSONTags: true, + Logger: logger.Default.LogMode(logger.Info), + }) + if err != nil { + return nil + } + pmp.db = db + + pmp.backgroundIndexCreated = make(chan bool, 1) + + if err := pmp.ensureTable(tableName); err != nil { + return nil + } + + return pmp + }, + givenTableName: "test2", + givenRunInBackground: true, + expectedErr: nil, + shouldHaveIndex: true, + }, + { + testName: "index created on non existing table, not background", + pmpSetupFn: func(tableName string) *SQLAggregatePump { + pmp := &SQLAggregatePump{} + cfg := &SQLAggregatePumpConf{} + cfg.Type = "sqlite" + cfg.ConnectionString = "" + pmp.SQLConf = cfg + + pmp.log = log.WithField("prefix", "sql-aggregate-pump") + dialect, errDialect := Dialect(&pmp.SQLConf.SQLConf) + if errDialect != nil { + return nil + } + db, err := gorm.Open(dialect, &gorm.Config{ + AutoEmbedd: true, + UseJSONTags: true, + Logger: logger.Default.LogMode(logger.Info), + }) + if err != nil { + return nil + } + pmp.db = db + + return pmp + }, + givenTableName: "test3", + givenRunInBackground: false, + expectedErr: errors.New("no such table: main.test3"), + shouldHaveIndex: false, + }, + { + testName: "omit_index_creation enabled", + pmpSetupFn: func(tableName string) *SQLAggregatePump { + pmp := &SQLAggregatePump{} + cfg := &SQLAggregatePumpConf{} + cfg.Type = "sqlite" + cfg.ConnectionString = "" + cfg.OmitIndexCreation = true + pmp.SQLConf = cfg + + pmp.log = log.WithField("prefix", "sql-aggregate-pump") + dialect, errDialect := Dialect(&pmp.SQLConf.SQLConf) + if errDialect != nil { + return nil + } + db, err := gorm.Open(dialect, &gorm.Config{ + AutoEmbedd: true, + UseJSONTags: true, + Logger: logger.Default.LogMode(logger.Info), + }) + if err != nil { + return nil + } + pmp.db = db + + if err := pmp.ensureTable(tableName); err != nil { + return nil + } + return pmp + }, + givenTableName: "test3", + givenRunInBackground: false, + expectedErr: nil, + shouldHaveIndex: false, + }, + } + + for _, tc := range tcs { + t.Run(tc.testName, func(t *testing.T) { + pmp := tc.pmpSetupFn(tc.givenTableName) + assert.NotNil(t, pmp) + + actualErr := pmp.ensureIndex(tc.givenTableName, tc.givenRunInBackground) + + if actualErr == nil { + if tc.givenRunInBackground { + // wait for the background index creation to finish + <-pmp.backgroundIndexCreated + } else { + hasIndex := pmp.db.Table(tc.givenTableName).Migrator().HasIndex(tc.givenTableName, newAggregatedIndexName) + assert.Equal(t, tc.shouldHaveIndex, hasIndex) + } + } else { + assert.Equal(t, tc.expectedErr.Error(), actualErr.Error()) + } + }) + } +} From 092d669872ecf04861f29637d96533949d35eb54 Mon Sep 17 00:00:00 2001 From: Matias <83959431+mativm02@users.noreply.github.com> Date: Fri, 8 Sep 2023 13:07:37 -0300 Subject: [PATCH 101/102] updating TrackAllPaths comment (#722) --- pumps/prometheus.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pumps/prometheus.go b/pumps/prometheus.go index 8613685a5..b133476d1 100644 --- a/pumps/prometheus.go +++ b/pumps/prometheus.go @@ -43,7 +43,7 @@ type PrometheusConf struct { // Metrics to exclude from exposition. Currently, excludes only the base metrics. DisabledMetrics []string `json:"disabled_metrics" mapstructure:"disabled_metrics"` // Specifies if it should expose aggregated metrics for all the endpoints. By default, `false` - // which means that all APIs endpoints will be counted as 'unknown' unless the API use track endpoint plugin. + // which means that all APIs endpoints will be counted as 'unknown' unless the API uses the track endpoint plugin. TrackAllPaths bool `json:"track_all_paths" mapstructure:"track_all_paths"` // Custom Prometheus metrics. CustomMetrics CustomMetrics `json:"custom_metrics" mapstructure:"custom_metrics"` From 7af2a45bde2e28e127dc71b6ab13b31436cf082a Mon Sep 17 00:00:00 2001 From: Kofo Okesola Date: Mon, 18 Sep 2023 09:31:19 +0100 Subject: [PATCH 102/102] [TT-100053]: aggregate graph aggregate records by api_id (#725) * aggregate graph iaggregate records by api_id * fixed tests * fix dimensions test * fixed sharded table * fix goang ci lint --- analytics/aggregate.go | 20 +++++++++++++++++--- analytics/aggregate_test.go | 35 ++++++++++++----------------------- pumps/graph_sql_aggregate.go | 21 +++++++++++---------- 3 files changed, 40 insertions(+), 36 deletions(-) diff --git a/analytics/aggregate.go b/analytics/aggregate.go index bea95bd56..5577dda3b 100644 --- a/analytics/aggregate.go +++ b/analytics/aggregate.go @@ -147,6 +147,20 @@ type SQLAnalyticsRecordAggregate struct { Code `json:"code" gorm:"embedded"` } +type GraphSQLAnalyticsRecordAggregate struct { + ID string `gorm:"primaryKey"` + + OrgID string `json:"org_id"` + Dimension string `json:"dimension"` + DimensionValue string `json:"dimension_value"` + APIID string `json:"api_id"` + + Counter `json:"counter" gorm:"embedded"` + Code `json:"code" gorm:"embedded"` + + TimeStamp int64 `json:"timestamp"` +} + type Code struct { Code1x int `json:"1x" gorm:"1x"` Code200 int `json:"200" gorm:"200"` @@ -611,6 +625,7 @@ func replaceUnsupportedChars(path string) string { return result } +// AggregateGraphData collects the graph records into a map of GraphRecordAggregate to apiID func AggregateGraphData(data []interface{}, dbIdentifier string, aggregationTime int) map[string]GraphRecordAggregate { aggregateMap := make(map[string]GraphRecordAggregate) @@ -619,14 +634,13 @@ func AggregateGraphData(data []interface{}, dbIdentifier string, aggregationTime if !ok { continue } - if !record.IsGraphRecord() { continue } graphRec := record.ToGraphRecord() - aggregate, found := aggregateMap[record.OrgID] + aggregate, found := aggregateMap[record.APIID] if !found { aggregate = NewGraphRecordAggregate() @@ -676,7 +690,7 @@ func AggregateGraphData(data []interface{}, dbIdentifier string, aggregationTime aggregate.RootFields[field].Identifier = field aggregate.RootFields[field].HumanIdentifier = field } - aggregateMap[record.OrgID] = aggregate + aggregateMap[record.APIID] = aggregate } return aggregateMap } diff --git a/analytics/aggregate_test.go b/analytics/aggregate_test.go index cd99c8d1f..77ec7624e 100644 --- a/analytics/aggregate_test.go +++ b/analytics/aggregate_test.go @@ -90,6 +90,8 @@ func TestTrimTag(t *testing.T) { } func TestAggregateGraphData(t *testing.T) { + query := `{"query":"query{\n characters(filter: {\n \n }){\n info{\n count\n }\n }\n}"}` + rawResponse := `{"data":{"characters":{"info":{"count":758}}}}` sampleRecord := AnalyticsRecord{ TimeStamp: time.Date(2022, 1, 1, 0, 0, 0, 0, time.UTC), Method: "POST", @@ -109,6 +111,8 @@ func TestAggregateGraphData(t *testing.T) { APIKey: "test-key", TrackPath: true, OauthID: "test-id", + RawRequest: base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf(requestTemplate, len(query), query))), + RawResponse: base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf(responseTemplate, len(rawResponse), rawResponse))), } compareFields := func(r *require.Assertions, expected, actual map[string]*Counter) { @@ -133,16 +137,12 @@ func TestAggregateGraphData(t *testing.T) { records := make([]interface{}, 3) for i := range records { record := sampleRecord - query := `{"query":"query{\n characters(filter: {\n \n }){\n info{\n count\n }\n }\n}"}` - response := `{"data":{"characters":{"info":{"count":758}}}}` - record.RawRequest = base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf(requestTemplate, len(query), query))) - record.RawResponse = base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf(responseTemplate, len(response), response))) records[i] = record } return records }, expectedAggregate: map[string]GraphRecordAggregate{ - "test-org": { + "test-api": { Types: map[string]*Counter{ "Characters": {Hits: 3, ErrorTotal: 0, Success: 3}, "Info": {Hits: 3, ErrorTotal: 0, Success: 3}, @@ -163,10 +163,6 @@ func TestAggregateGraphData(t *testing.T) { records := make([]interface{}, 3) for i := range records { record := sampleRecord - query := `{"query":"query{\n characters(filter: {\n \n }){\n info{\n count\n }\n }\n}"}` - response := `{"data":{"characters":{"info":{"count":758}}}}` - record.RawRequest = base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf(requestTemplate, len(query), query))) - record.RawResponse = base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf(responseTemplate, len(response), response))) if i == 1 { record.Tags = []string{} } @@ -175,7 +171,7 @@ func TestAggregateGraphData(t *testing.T) { return records }, expectedAggregate: map[string]GraphRecordAggregate{ - "test-org": { + "test-api": { Types: map[string]*Counter{ "Characters": {Hits: 2, ErrorTotal: 0, Success: 2}, "Info": {Hits: 2, ErrorTotal: 0, Success: 2}, @@ -196,19 +192,16 @@ func TestAggregateGraphData(t *testing.T) { records := make([]interface{}, 3) for i := range records { record := sampleRecord - query := `{"query":"query{\n characters(filter: {\n \n }){\n info{\n count\n }\n }\n}"}` - response := `{"data":{"characters":{"info":{"count":758}}}}` if i == 1 { - response = graphErrorResponse + response := graphErrorResponse + record.RawResponse = base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf(responseTemplate, len(response), response))) } - record.RawRequest = base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf(requestTemplate, len(query), query))) - record.RawResponse = base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf(responseTemplate, len(response), response))) records[i] = record } return records }, expectedAggregate: map[string]GraphRecordAggregate{ - "test-org": { + "test-api": { Types: map[string]*Counter{ "Characters": {Hits: 3, ErrorTotal: 1, Success: 2}, "Info": {Hits: 3, ErrorTotal: 1, Success: 2}, @@ -229,10 +222,6 @@ func TestAggregateGraphData(t *testing.T) { records := make([]interface{}, 5) for i := range records { record := sampleRecord - query := `{"query":"query{\n characters(filter: {\n \n }){\n info{\n count\n }\n }\n}"}` - response := `{"data":{"characters":{"info":{"count":758}}}}` - record.RawRequest = base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf(requestTemplate, len(query), query))) - record.RawResponse = base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf(responseTemplate, len(response), response))) if i == 2 || i == 4 { record.ResponseCode = 500 } @@ -241,7 +230,7 @@ func TestAggregateGraphData(t *testing.T) { return records }, expectedAggregate: map[string]GraphRecordAggregate{ - "test-org": { + "test-api": { Types: map[string]*Counter{ "Characters": {Hits: 5, ErrorTotal: 2, Success: 3}, "Info": {Hits: 5, ErrorTotal: 2, Success: 3}, @@ -325,7 +314,7 @@ func TestAggregateGraphData_Dimension(t *testing.T) { r := require.New(t) aggregated := AggregateGraphData(records, "", 1) r.Len(aggregated, 1) - aggre := aggregated["test-org"] + aggre := aggregated["test-api"] dimensions := aggre.Dimensions() fmt.Println(dimensions) for d, values := range responsesCheck { @@ -337,7 +326,7 @@ func TestAggregateGraphData_Dimension(t *testing.T) { } } if !found { - t.Errorf("item missing from dimensions: NameL %s, Value: %s, Hits:3", d, v) + t.Errorf("item missing from dimensions: Name: %s, Value: %s, Hits:3", d, v) } } } diff --git a/pumps/graph_sql_aggregate.go b/pumps/graph_sql_aggregate.go index cd9ef78f7..5df39ad76 100644 --- a/pumps/graph_sql_aggregate.go +++ b/pumps/graph_sql_aggregate.go @@ -68,7 +68,7 @@ func (s *GraphSQLAggregatePump) Init(conf interface{}) error { } s.db = db if !s.SQLConf.TableSharding { - if err := s.db.Table(analytics.AggregateGraphSQLTable).AutoMigrate(&analytics.SQLAnalyticsRecordAggregate{}); err != nil { + if err := s.db.Table(analytics.AggregateGraphSQLTable).AutoMigrate(&analytics.GraphSQLAnalyticsRecordAggregate{}); err != nil { s.log.WithError(err).Warn("error migrating table") } } @@ -115,7 +115,7 @@ func (s *GraphSQLAggregatePump) WriteData(ctx context.Context, data []interface{ table = analytics.AggregateGraphSQLTable + "_" + recDate s.db = s.db.Table(table) if !s.db.Migrator().HasTable(table) { - if err := s.db.AutoMigrate(&analytics.SQLAnalyticsRecordAggregate{}); err != nil { + if err := s.db.AutoMigrate(&analytics.GraphSQLAnalyticsRecordAggregate{}); err != nil { s.log.WithError(err).Warn("error running auto migration") } } @@ -132,11 +132,11 @@ func (s *GraphSQLAggregatePump) WriteData(ctx context.Context, data []interface{ aggregationTime = 60 } - analyticsPerOrg := analytics.AggregateGraphData(data[startIndex:endIndex], "", aggregationTime) + analyticsPerAPI := analytics.AggregateGraphData(data[startIndex:endIndex], "", aggregationTime) - for orgID := range analyticsPerOrg { - ag := analyticsPerOrg[orgID] - err := s.DoAggregatedWriting(ctx, table, orgID, &ag) + for apiID := range analyticsPerAPI { + ag := analyticsPerAPI[apiID] + err := s.DoAggregatedWriting(ctx, table, ag.OrgID, apiID, &ag) if err != nil { s.log.WithError(err).Error("error writing record") return err @@ -150,14 +150,15 @@ func (s *GraphSQLAggregatePump) WriteData(ctx context.Context, data []interface{ return nil } -func (s *GraphSQLAggregatePump) DoAggregatedWriting(ctx context.Context, table, orgID string, ag *analytics.GraphRecordAggregate) error { - recs := []analytics.SQLAnalyticsRecordAggregate{} +func (s *GraphSQLAggregatePump) DoAggregatedWriting(ctx context.Context, table, orgID, apiID string, ag *analytics.GraphRecordAggregate) error { + var recs []analytics.GraphSQLAnalyticsRecordAggregate dimensions := ag.Dimensions() for _, d := range dimensions { - rec := analytics.SQLAnalyticsRecordAggregate{ - ID: hex.EncodeToString([]byte(fmt.Sprintf("%v", ag.TimeStamp.Unix()) + orgID + d.Name + d.Value)), + rec := analytics.GraphSQLAnalyticsRecordAggregate{ + ID: hex.EncodeToString([]byte(fmt.Sprintf("%v", ag.TimeStamp.Unix()) + apiID + d.Name + d.Value)), OrgID: orgID, + APIID: apiID, TimeStamp: ag.TimeStamp.Unix(), Counter: *d.Counter, Dimension: d.Name,