From ede986e2c32df31a8f056a4c7e3f066193b5ec18 Mon Sep 17 00:00:00 2001 From: Bryan Aguilar Date: Tue, 15 Aug 2023 08:51:55 -0700 Subject: [PATCH 1/8] Update collector versions --- go.mod | 271 ++++++++++----------- go.sum | 594 ++++++++++++++++++++++----------------------- testbed/go.mod | 297 ++++++++++++----------- testbed/go.sum | 646 ++++++++++++++++++++++++------------------------- 4 files changed, 900 insertions(+), 908 deletions(-) diff --git a/go.mod b/go.mod index 64552c3db..168c7ee19 100644 --- a/go.mod +++ b/go.mod @@ -3,66 +3,66 @@ module github.com/aws-observability/aws-otel-collector go 1.19 require ( - github.com/open-telemetry/opentelemetry-collector-contrib/confmap/provider/s3provider v0.82.0 - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter v0.82.0 - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.82.0 - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter v0.82.0 - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/dynatraceexporter v0.82.0 - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/fileexporter v0.82.0 - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.82.0 - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter v0.82.0 - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/logzioexporter v0.82.0 - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter v0.82.0 - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.82.0 - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter v0.82.0 - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter v0.82.0 - github.com/open-telemetry/opentelemetry-collector-contrib/extension/awsproxy v0.82.0 - github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.82.0 - github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.82.0 - github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.82.0 - github.com/open-telemetry/opentelemetry-collector-contrib/extension/sigv4authextension v0.82.0 - github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.82.0 - github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.82.0 - github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatorateprocessor v0.82.0 - github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.82.0 - github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbytraceprocessor v0.82.0 - github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.82.0 - github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricsgenerationprocessor v0.82.0 - github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor v0.82.0 - github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.82.0 - github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.82.0 - github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.82.0 - github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanprocessor v0.82.0 - github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.82.0 - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.82.0 - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsecscontainermetricsreceiver v0.82.0 - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.82.0 - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.82.0 - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.82.0 - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.82.0 - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver v0.82.0 - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.82.0 + github.com/open-telemetry/opentelemetry-collector-contrib/confmap/provider/s3provider v0.83.0 + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter v0.83.0 + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.83.0 + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter v0.83.0 + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/dynatraceexporter v0.83.0 + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/fileexporter v0.83.0 + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.83.0 + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter v0.83.0 + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/logzioexporter v0.83.0 + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter v0.83.0 + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.83.0 + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter v0.83.0 + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter v0.83.0 + github.com/open-telemetry/opentelemetry-collector-contrib/extension/awsproxy v0.83.0 + github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.83.0 + github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.83.0 + github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.83.0 + github.com/open-telemetry/opentelemetry-collector-contrib/extension/sigv4authextension v0.83.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.83.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.83.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatorateprocessor v0.83.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.83.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbytraceprocessor v0.83.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.83.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricsgenerationprocessor v0.83.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor v0.83.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.83.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.83.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.83.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanprocessor v0.83.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.83.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.83.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsecscontainermetricsreceiver v0.83.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.83.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.83.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.83.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.83.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver v0.83.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.83.0 github.com/opencontainers/runc v1.1.8 github.com/pkg/errors v0.9.1 github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.8.4 - go.opentelemetry.io/collector v0.82.0 - go.opentelemetry.io/collector/component v0.82.0 - go.opentelemetry.io/collector/confmap v0.82.0 - go.opentelemetry.io/collector/exporter v0.82.0 - go.opentelemetry.io/collector/exporter/loggingexporter v0.82.0 - go.opentelemetry.io/collector/exporter/otlpexporter v0.82.0 - go.opentelemetry.io/collector/exporter/otlphttpexporter v0.82.0 - go.opentelemetry.io/collector/extension v0.82.0 - go.opentelemetry.io/collector/extension/ballastextension v0.82.0 - go.opentelemetry.io/collector/extension/zpagesextension v0.82.0 + go.opentelemetry.io/collector v0.83.0 + go.opentelemetry.io/collector/component v0.83.0 + go.opentelemetry.io/collector/confmap v0.83.0 + go.opentelemetry.io/collector/exporter v0.83.0 + go.opentelemetry.io/collector/exporter/loggingexporter v0.83.0 + go.opentelemetry.io/collector/exporter/otlpexporter v0.83.0 + go.opentelemetry.io/collector/exporter/otlphttpexporter v0.83.0 + go.opentelemetry.io/collector/extension v0.83.0 + go.opentelemetry.io/collector/extension/ballastextension v0.83.0 + go.opentelemetry.io/collector/extension/zpagesextension v0.83.0 go.opentelemetry.io/collector/featuregate v1.0.0-rcv0014 - go.opentelemetry.io/collector/processor v0.82.0 - go.opentelemetry.io/collector/processor/batchprocessor v0.82.0 - go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.82.0 - go.opentelemetry.io/collector/receiver v0.82.0 - go.opentelemetry.io/collector/receiver/otlpreceiver v0.82.0 + go.opentelemetry.io/collector/processor v0.83.0 + go.opentelemetry.io/collector/processor/batchprocessor v0.83.0 + go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.83.0 + go.opentelemetry.io/collector/receiver v0.83.0 + go.opentelemetry.io/collector/receiver/otlpreceiver v0.83.0 go.uber.org/multierr v1.11.0 go.uber.org/zap v1.25.0 golang.org/x/sys v0.11.0 @@ -70,47 +70,48 @@ require ( ) require ( - cloud.google.com/go/compute v1.20.1 // indirect + cloud.google.com/go/compute v1.23.0 // indirect cloud.google.com/go/compute/metadata v0.2.4-0.20230617002413-005d2dfb6b68 // indirect contrib.go.opencensus.io/exporter/prometheus v0.4.2 // indirect github.com/Azure/azure-sdk-for-go v65.0.0+incompatible // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect github.com/Azure/go-autorest/autorest v0.11.28 // indirect - github.com/Azure/go-autorest/autorest/adal v0.9.22 // indirect + github.com/Azure/go-autorest/autorest/adal v0.9.23 // indirect github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect github.com/DataDog/agent-payload/v5 v5.0.89 // indirect - github.com/DataDog/datadog-agent/pkg/obfuscate v0.47.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.47.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/obfuscate v0.48.0-beta.1 // indirect + github.com/DataDog/datadog-agent/pkg/proto v0.48.0-beta.1 // indirect + github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.48.0-beta.1 // indirect github.com/DataDog/datadog-agent/pkg/trace v0.48.0-devel // indirect - github.com/DataDog/datadog-agent/pkg/util/cgroups v0.47.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/log v0.47.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.47.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.47.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/cgroups v0.48.0-beta.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.48.0-beta.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.48.0-beta.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.48.0-beta.1 // indirect github.com/DataDog/datadog-api-client-go/v2 v2.14.0 // indirect github.com/DataDog/datadog-go/v5 v5.1.1 // indirect - github.com/DataDog/go-tuf v0.3.0--fix-localmeta-fork // indirect + github.com/DataDog/go-tuf v1.0.1-0.5.2 // indirect github.com/DataDog/gohai v0.0.0-20220718130825-1776f9beb9cc // indirect - github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.5.2 // indirect - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.5.2 // indirect - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.5.2 // indirect - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.5.2 // indirect - github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.5.2 // indirect + github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.7.0 // indirect + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.7.0 // indirect + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.7.0 // indirect + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.7.0 // indirect + github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.7.0 // indirect github.com/DataDog/sketches-go v1.4.2 // indirect github.com/DataDog/zstd v1.5.2 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.18.0 // indirect + github.com/IBM/sarama v1.40.1 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect - github.com/Shopify/sarama v1.38.1 // indirect github.com/Showmax/go-fqdn v1.0.0 // indirect github.com/alecthomas/participle/v2 v2.0.0 // indirect github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect - github.com/antonmedv/expr v1.12.5 // indirect + github.com/antonmedv/expr v1.13.0 // indirect github.com/apache/thrift v0.18.1 // indirect github.com/armon/go-metrics v0.4.1 // indirect - github.com/aws/aws-sdk-go v1.44.320 // indirect + github.com/aws/aws-sdk-go v1.44.323 // indirect github.com/aws/aws-sdk-go-v2 v1.20.1 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.0.0 // indirect github.com/aws/aws-sdk-go-v2/config v1.18.33 // indirect @@ -143,7 +144,7 @@ require ( github.com/cyphar/filepath-securejoin v0.2.3 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dennwc/varint v1.0.0 // indirect - github.com/digitalocean/godo v1.97.0 // indirect + github.com/digitalocean/godo v1.98.0 // indirect github.com/docker/distribution v2.8.2+incompatible // indirect github.com/docker/docker v24.0.5+incompatible // indirect github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11 // indirect @@ -189,12 +190,12 @@ require ( github.com/google/uuid v1.3.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.2.5 // indirect github.com/googleapis/gax-go/v2 v2.12.0 // indirect - github.com/gophercloud/gophercloud v1.2.0 // indirect + github.com/gophercloud/gophercloud v1.3.0 // indirect github.com/gorilla/mux v1.8.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect - github.com/hashicorp/consul/api v1.23.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.2 // indirect + github.com/hashicorp/consul/api v1.24.0 // indirect github.com/hashicorp/cronexpr v1.1.1 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect @@ -204,14 +205,14 @@ require ( github.com/hashicorp/go-retryablehttp v0.7.2 // indirect github.com/hashicorp/go-rootcerts v1.0.2 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect - github.com/hashicorp/golang-lru v0.6.0 // indirect - github.com/hashicorp/nomad/api v0.0.0-20230308192510-48e7d70fcd4b // indirect + github.com/hashicorp/golang-lru v1.0.2 // indirect + github.com/hashicorp/nomad/api v0.0.0-20230418003350-3067191c5197 // indirect github.com/hashicorp/serf v0.10.1 // indirect - github.com/hetznercloud/hcloud-go v1.41.0 // indirect + github.com/hetznercloud/hcloud-go v1.42.0 // indirect github.com/iancoleman/strcase v0.3.0 // indirect github.com/imdario/mergo v0.3.15 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/ionos-cloud/sdk-go/v6 v6.1.4 // indirect + github.com/ionos-cloud/sdk-go/v6 v6.1.6 // indirect github.com/jaegertracing/jaeger v1.41.0 // indirect github.com/jcmturner/aescts/v2 v2.0.0 // indirect github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect @@ -228,13 +229,13 @@ require ( github.com/knadh/koanf/v2 v2.0.1 // indirect github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b // indirect github.com/lightstep/go-expohisto v1.0.0 // indirect - github.com/linode/linodego v1.14.1 // indirect + github.com/linode/linodego v1.16.1 // indirect github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.17 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect - github.com/miekg/dns v1.1.51 // indirect + github.com/miekg/dns v1.1.53 // indirect github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect @@ -248,43 +249,43 @@ require ( github.com/mrunalp/fileutils v0.5.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/awsutil v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/containerinsight v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/cwlogs v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/k8s v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/metrics v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/proxy v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/kubelet v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchperresourceattr v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experimentalmetricmetadata v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/signalfx v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.82.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/awsutil v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/containerinsight v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/cwlogs v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/k8s v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/metrics v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/proxy v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/kubelet v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchperresourceattr v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experimentalmetricmetadata v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/signalfx v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.83.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.1.0-rc2 // indirect - github.com/opencontainers/runtime-spec v1.0.3-0.20220909204839-494a5a6aca78 // indirect + github.com/opencontainers/image-spec v1.1.0-rc4 // indirect + github.com/opencontainers/runtime-spec v1.1.0-rc.3 // indirect github.com/opencontainers/selinux v1.10.0 // indirect github.com/openshift/api v3.9.0+incompatible // indirect github.com/openshift/client-go v0.0.0-20210521082421-73d9475a9142 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect - github.com/openzipkin/zipkin-go v0.4.1 // indirect + github.com/openzipkin/zipkin-go v0.4.2 // indirect github.com/outcaste-io/ristretto v0.2.1 // indirect - github.com/ovh/go-ovh v1.3.0 // indirect + github.com/ovh/go-ovh v1.4.1 // indirect github.com/patrickmn/go-cache v2.1.0+incompatible // indirect github.com/philhofer/fwd v1.1.2 // indirect github.com/pierrec/lz4/v4 v4.1.17 // indirect @@ -295,14 +296,14 @@ require ( github.com/prometheus/common v0.44.0 // indirect github.com/prometheus/common/sigv4 v0.1.0 // indirect github.com/prometheus/procfs v0.10.1 // indirect - github.com/prometheus/prometheus v0.43.1 // indirect + github.com/prometheus/prometheus v0.44.0 // indirect github.com/prometheus/statsd_exporter v0.22.7 // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect github.com/rs/cors v1.9.0 // indirect - github.com/scaleway/scaleway-sdk-go v1.0.0-beta.14 // indirect + github.com/scaleway/scaleway-sdk-go v1.0.0-beta.15 // indirect github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646 // indirect - github.com/secure-systems-lab/go-securesystemslib v0.5.0 // indirect - github.com/shirou/gopsutil/v3 v3.23.6 // indirect + github.com/secure-systems-lab/go-securesystemslib v0.7.0 // indirect + github.com/shirou/gopsutil/v3 v3.23.7 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect github.com/signalfx/com_signalfx_metrics_protobuf v0.0.3 // indirect github.com/signalfx/gohistogram v0.0.0-20160107210732-1ccfd2ff5083 // indirect @@ -331,20 +332,20 @@ require ( github.com/xdg-go/stringprep v1.0.4 // indirect github.com/yusufpapurcu/wmi v1.2.3 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/collector/config/configauth v0.82.0 // indirect - go.opentelemetry.io/collector/config/configcompression v0.82.0 // indirect - go.opentelemetry.io/collector/config/configgrpc v0.82.0 // indirect - go.opentelemetry.io/collector/config/confighttp v0.82.0 // indirect - go.opentelemetry.io/collector/config/confignet v0.82.0 // indirect - go.opentelemetry.io/collector/config/configopaque v0.82.0 // indirect - go.opentelemetry.io/collector/config/configtelemetry v0.82.0 // indirect - go.opentelemetry.io/collector/config/configtls v0.82.0 // indirect - go.opentelemetry.io/collector/config/internal v0.82.0 // indirect - go.opentelemetry.io/collector/connector v0.82.0 // indirect - go.opentelemetry.io/collector/consumer v0.82.0 // indirect - go.opentelemetry.io/collector/extension/auth v0.82.0 // indirect + go.opentelemetry.io/collector/config/configauth v0.83.0 // indirect + go.opentelemetry.io/collector/config/configcompression v0.83.0 // indirect + go.opentelemetry.io/collector/config/configgrpc v0.83.0 // indirect + go.opentelemetry.io/collector/config/confighttp v0.83.0 // indirect + go.opentelemetry.io/collector/config/confignet v0.83.0 // indirect + go.opentelemetry.io/collector/config/configopaque v0.83.0 // indirect + go.opentelemetry.io/collector/config/configtelemetry v0.83.0 // indirect + go.opentelemetry.io/collector/config/configtls v0.83.0 // indirect + go.opentelemetry.io/collector/config/internal v0.83.0 // indirect + go.opentelemetry.io/collector/connector v0.83.0 // indirect + go.opentelemetry.io/collector/consumer v0.83.0 // indirect + go.opentelemetry.io/collector/extension/auth v0.83.0 // indirect go.opentelemetry.io/collector/pdata v1.0.0-rcv0014 // indirect - go.opentelemetry.io/collector/semconv v0.82.0 // indirect + go.opentelemetry.io/collector/semconv v0.83.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.1-0.20230612162650-64be7e574a17 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0 // indirect go.opentelemetry.io/contrib/propagators/b3 v1.17.0 // indirect @@ -369,17 +370,17 @@ require ( golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 // indirect golang.org/x/mod v0.12.0 // indirect golang.org/x/net v0.14.0 // indirect - golang.org/x/oauth2 v0.10.0 // indirect + golang.org/x/oauth2 v0.11.0 // indirect golang.org/x/term v0.11.0 // indirect golang.org/x/text v0.12.0 // indirect golang.org/x/time v0.3.0 // indirect golang.org/x/tools v0.12.0 // indirect gonum.org/v1/gonum v0.13.0 // indirect - google.golang.org/api v0.134.0 // indirect + google.golang.org/api v0.136.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20230706204954-ccb25ca9f130 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230720185612-659f7aaaa771 // indirect + google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230807174057-1744710a1577 // indirect google.golang.org/grpc v1.57.0 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect @@ -394,7 +395,7 @@ require ( k8s.io/klog/v2 v2.90.1 // indirect k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f // indirect k8s.io/utils v0.0.0-20230308161112-d77c459e9343 // indirect - sigs.k8s.io/controller-runtime v0.15.0 // indirect + sigs.k8s.io/controller-runtime v0.15.1 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect sigs.k8s.io/yaml v1.3.0 // indirect diff --git a/go.sum b/go.sum index 3d8bc6fa8..dbd548160 100644 --- a/go.sum +++ b/go.sum @@ -41,8 +41,8 @@ cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJW cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= -cloud.google.com/go/compute v1.20.1 h1:6aKEtlUiwEpJzM001l0yFkpXmUVXaN8W+fbkb2AZNbg= -cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= +cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= cloud.google.com/go/compute/metadata v0.2.4-0.20230617002413-005d2dfb6b68 h1:aRVqY1p2IJaBGStWMsQMpkAa83cPkCDLl80eOj0Rbz4= cloud.google.com/go/compute/metadata v0.2.4-0.20230617002413-005d2dfb6b68/go.mod h1:1a3eRNYX12fs5UABBIXS8HXVvQbX9hRB/RkEBPORpe8= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= @@ -76,8 +76,8 @@ github.com/Azure/go-autorest/autorest v0.11.28 h1:ndAExarwr5Y+GaHE6VCaY1kyS/HwwG github.com/Azure/go-autorest/autorest v0.11.28/go.mod h1:MrkzG3Y3AH668QyF9KRk5neJnGgmhQ6krbhR8Q5eMvA= github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= -github.com/Azure/go-autorest/autorest/adal v0.9.22 h1:/GblQdIudfEM3AWWZ0mrYJQSd7JS4S/Mbzh6F0ov0Xc= -github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk= +github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8= +github.com/Azure/go-autorest/autorest/adal v0.9.23/go.mod h1:5pcMqFkdPhviJdlEy3kC/v1ZLnQl0MH6XA5YCcMhy4c= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= @@ -97,40 +97,42 @@ github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DataDog/agent-payload/v5 v5.0.89 h1:uuLWf/exyNYBnheG9OH2dOWZpCJvaEHX3W9CAd8KarU= github.com/DataDog/agent-payload/v5 v5.0.89/go.mod h1:oQZi1VZp1e3QvlSUX4iphZCpJaFepUxWq0hNXxihKBM= -github.com/DataDog/datadog-agent/pkg/obfuscate v0.47.0-rc.3 h1:imU7+gtggz9YPG/wJdrtLmL+bvafq+08oivQ8VRO4g8= -github.com/DataDog/datadog-agent/pkg/obfuscate v0.47.0-rc.3/go.mod h1:e933RWa4kAWuHi5jpzEuOiULlv21HcCFEVIYegmaB5c= -github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.47.0-rc.3 h1:LFckPfptq8yevWp5TSAbHxGv4LqaEWyRyEQAj+ioKl0= -github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.47.0-rc.3/go.mod h1:7uPrckBTIabtHAuoJnQes2XuDmopCImBPhM+o66DvA0= +github.com/DataDog/datadog-agent/pkg/obfuscate v0.48.0-beta.1 h1:g7kb8NGjApkncwuXjkEpYHjYj08hqklvjqB3Gs2uPpQ= +github.com/DataDog/datadog-agent/pkg/obfuscate v0.48.0-beta.1/go.mod h1:e933RWa4kAWuHi5jpzEuOiULlv21HcCFEVIYegmaB5c= +github.com/DataDog/datadog-agent/pkg/proto v0.48.0-beta.1 h1:Htxj/RE55AeDZ+OE6+x+kJQz3toGWzR40Baq0Dknv8U= +github.com/DataDog/datadog-agent/pkg/proto v0.48.0-beta.1/go.mod h1:O3WwGRPZxs4BpB2ccUvIIPprhscWBRpudJT6mC+7sr8= +github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.48.0-beta.1 h1:We9Y6+kwCnSOQilk2koeADjbZgMHFDl6iHBaobU5nAw= +github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.48.0-beta.1/go.mod h1:5Q39ZOIOwZMnFyRadp+5gH1bFdjmb+Pgxe+j5XOwaTg= github.com/DataDog/datadog-agent/pkg/trace v0.48.0-devel h1:sC2wq2fuI1r3U6FmUsn4clsrFOql5XBfs1EG15LPDEc= github.com/DataDog/datadog-agent/pkg/trace v0.48.0-devel/go.mod h1:CmdN7Zrj+S+2hOSGW5hFT2LC2FVIF/avJTyvhUjaueI= -github.com/DataDog/datadog-agent/pkg/util/cgroups v0.47.0-rc.3 h1:thSia6kXdVcSozdPQOAzSDCpcPBF91ScafOzETLbJP8= -github.com/DataDog/datadog-agent/pkg/util/cgroups v0.47.0-rc.3/go.mod h1:TmxM8Pe+1QBWfM1JisS3xjvX1/kk655XY/IjqA36g6s= -github.com/DataDog/datadog-agent/pkg/util/log v0.47.0-rc.3 h1:Pb223YrOHuT2io1nDzToc+bSDoikxAITjTl9kZvjFSY= -github.com/DataDog/datadog-agent/pkg/util/log v0.47.0-rc.3/go.mod h1:Ci+eWLEPbZsqy9/eNBMN1FNJUqiPx+HrLcGGpVmujJ8= -github.com/DataDog/datadog-agent/pkg/util/pointer v0.47.0-rc.3 h1:CPrsO0OU+MkjvNQhW153DQF4zuHvGkdkxGOp2M2/y34= -github.com/DataDog/datadog-agent/pkg/util/pointer v0.47.0-rc.3/go.mod h1:HMpYpkuxDFYuYLjDTKoG0NjtPoAwIymvBEhlA3pJbJk= -github.com/DataDog/datadog-agent/pkg/util/scrubber v0.47.0-rc.3 h1:gAL3HM+Tg5S0MLBjv5K8+elDJS6COf+9Io9dVh7EwYc= -github.com/DataDog/datadog-agent/pkg/util/scrubber v0.47.0-rc.3/go.mod h1:Whfh1SJOwtp2YvDUNzqw/jmSbGOOso+HJHOEJULh1+M= +github.com/DataDog/datadog-agent/pkg/util/cgroups v0.48.0-beta.1 h1:9iyw6jSwJwsFe8TooU8mqMhMfFiW6N/05OnNMg91kBY= +github.com/DataDog/datadog-agent/pkg/util/cgroups v0.48.0-beta.1/go.mod h1:TmxM8Pe+1QBWfM1JisS3xjvX1/kk655XY/IjqA36g6s= +github.com/DataDog/datadog-agent/pkg/util/log v0.48.0-beta.1 h1:k4tcg077NsPJRxtuGdYEm9kge+zq5QO5x6Yv3R5BwpE= +github.com/DataDog/datadog-agent/pkg/util/log v0.48.0-beta.1/go.mod h1:Ci+eWLEPbZsqy9/eNBMN1FNJUqiPx+HrLcGGpVmujJ8= +github.com/DataDog/datadog-agent/pkg/util/pointer v0.48.0-beta.1 h1:detMhMfwchco20v12RjjRisxP3V0mtLEjcgJZGk2cmg= +github.com/DataDog/datadog-agent/pkg/util/pointer v0.48.0-beta.1/go.mod h1:HMpYpkuxDFYuYLjDTKoG0NjtPoAwIymvBEhlA3pJbJk= +github.com/DataDog/datadog-agent/pkg/util/scrubber v0.48.0-beta.1 h1:EOrKgyyubncuS4LpF8aCj/12i1+GmPV+PCfj8mDaF2c= +github.com/DataDog/datadog-agent/pkg/util/scrubber v0.48.0-beta.1/go.mod h1:Whfh1SJOwtp2YvDUNzqw/jmSbGOOso+HJHOEJULh1+M= github.com/DataDog/datadog-api-client-go/v2 v2.14.0 h1:cLkqg/D63I6BAxIIg6g8xMWjrAMXcvb5vbD8ixOVVyo= github.com/DataDog/datadog-api-client-go/v2 v2.14.0/go.mod h1:kntOqXEh1SmjwSDzW/eJkr9kS7EqttvEkelglWtJRbg= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/datadog-go/v5 v5.1.1 h1:JLZ6s2K1pG2h9GkvEvMdEGqMDyVLEAccdX5TltWcLMU= github.com/DataDog/datadog-go/v5 v5.1.1/go.mod h1:KhiYb2Badlv9/rofz+OznKoEF5XKTonWyhx5K83AP8E= -github.com/DataDog/go-tuf v0.3.0--fix-localmeta-fork h1:yBq5PrAtrM4yVeSzQ+bn050+Ysp++RKF1QmtkL4VqvU= -github.com/DataDog/go-tuf v0.3.0--fix-localmeta-fork/go.mod h1:yA5JwkZsHTLuqq3zaRgUQf35DfDkpOZqgtBqHKpwrBs= +github.com/DataDog/go-tuf v1.0.1-0.5.2 h1:gld/e3MXfFVB/O8hc3mloP1ayFk75Mmdkmll/9lyd9I= +github.com/DataDog/go-tuf v1.0.1-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0= github.com/DataDog/gohai v0.0.0-20220718130825-1776f9beb9cc h1:gtlKB6B50/UEuFm1LeMn0R5a+tubx69OecPqxfXJDmU= github.com/DataDog/gohai v0.0.0-20220718130825-1776f9beb9cc/go.mod h1:oyPC4jWHHjVVNjslDAKp8EqfQBaSmODjHt4HCX+C+9Q= -github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.5.2 h1:nwZgSRQb8edVTVcFj5tkl3u3BaP6XrFxSw+tEv9A0hY= -github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.5.2/go.mod h1:u+DVO6wIQjBFuz2YzDhxOhHB5vf9CTKxB+9cJYs8SRk= -github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.5.2 h1:W47xIROVye+D6WxkZcy8ETomfZlTNWoVZODwAh4LdeE= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.5.2 h1:JRVQga0KlFCMyuKF/ghrZtRpmYL3XWRGXpSB5Qdk5Ko= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.5.2/go.mod h1:6x6OujLzkt7Wwlu/6kYO5+8FNRBi1HEw8Qm6/qvTOQA= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.5.2 h1:xY5LVtbmcm3zZ8Ccxc8+mzkEmlOdeNQnXPDdZiXiXq4= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.5.2/go.mod h1:Ge92/UCQeo8i0RQgSnowR9uto3VhyxM6YS3W6xJD8rc= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.5.2 h1:FbQSZ6uXhuHzgwC73MUxqvHwV0uxKiGAeAAZIMrfUAc= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.5.2/go.mod h1:oPpGMNpwga8zTGUJfLy3Z/u4l6bvEYuRatJkgSUazr4= -github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.5.2 h1:C0uzQwHCKubfmbvaZF/Qi6ernigbcoWt9A+U+s0iQGg= -github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.5.2/go.mod h1:RT78x34OmVb0wuZLtmzzRRy43+7pCCA6ZEOGQ9mA5w0= +github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.7.0 h1:l21vDOju9zcCx+RYrNrsNs9qpWaLA8SKTHTDiHUhgEA= +github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.7.0/go.mod h1:0n4yKpsgezj7KqhkLM5weDi2kmtNlRCdlAmHN7WfMhQ= +github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.7.0 h1:mVnISj3nNq9fQM7C7zi5iuEHWe7tAHS/VNPBs3qc/ug= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.7.0 h1:8STZKmgRY3OvrUkaNglRiLgEvAMcTt2l+naAlW+p36k= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.7.0/go.mod h1:mpbmVkOkmJq5KmHxi+zlvYXQD0o/x1MMS16CNWO8p9U= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.7.0 h1:j2wXBnS0KwLzB7tG63vI+fi6hHRbvprRHmv8XsgLfbs= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.7.0/go.mod h1:CUx9KlayjXNeJeL5ZCjbXKJ/JFYrrCOFSKZ37LlXH/w= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.7.0 h1:433zmJS94Pids2V+l5fQGOSfZPxnibHXAd3iqB7P4HY= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.7.0/go.mod h1:uVTWlYOzK82Cf88d57GvcQ+zgPW/kyOBn4xp6tCqi5Y= +github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.7.0 h1:8sRT2Yb9eW7GhRAkqMBrcFDb6WW9D/KslM8D+6EcsYk= +github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.7.0/go.mod h1:m/Vn+wxCD5ND4e0RwIweiBfpihD3NHuVCRDjSvhHYps= github.com/DataDog/sketches-go v1.4.2 h1:gppNudE9d19cQ98RYABOetxIhpTCl4m7CnbRZjvVA/o= github.com/DataDog/sketches-go v1.4.2/go.mod h1:xJIXldczJyyjnbDop7ZZcLxJdV3+7Kra7H1KMgpgkLk= github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= @@ -145,6 +147,8 @@ github.com/HdrHistogram/hdrhistogram-go v1.0.1/go.mod h1:BWJ+nMSHY3L41Zj7CA3uXnl github.com/HdrHistogram/hdrhistogram-go v1.1.0/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM= github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= +github.com/IBM/sarama v1.40.1 h1:lL01NNg/iBeigUbT+wpPysuTYW6roHo6kc1QrffRf0k= +github.com/IBM/sarama v1.40.1/go.mod h1:+5OFwA5Du9I6QrznhaMHsuwWdWZNMjaBSIxEWEgKOYE= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= @@ -162,8 +166,6 @@ github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdko github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/sarama v1.22.2-0.20190604114437-cd910a683f9f/go.mod h1:XLH1GYJnLVE0XCr6KdJGVJRTwY30moWNJ4sERjXX6fs= github.com/Shopify/sarama v1.32.0/go.mod h1:+EmJJKZWVT/faR9RcOxJerP+LId4iWdQPBGLy1Y1Njs= -github.com/Shopify/sarama v1.38.1 h1:lqqPUPQZ7zPqYlWpTh+LQ9bhYNu2xJL6k1SJN4WVe2A= -github.com/Shopify/sarama v1.38.1/go.mod h1:iwv9a67Ha8VNa+TifujYoWGxWnu2kNVAQdSdZ4X2o5g= github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/Shopify/toxiproxy/v2 v2.3.0/go.mod h1:KvQTtB6RjCJY4zqNJn7C7JDFgsG5uoHYDirfUfpIm0c= @@ -193,8 +195,8 @@ github.com/andybalholm/brotli v1.0.0/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/antonmedv/expr v1.9.0/go.mod h1:5qsM3oLGDND7sDmQGDXHkYfkjYMUX14qsgqmHhwGEk8= -github.com/antonmedv/expr v1.12.5 h1:Fq4okale9swwL3OeLLs9WD9H6GbgBLJyN/NUHRv+n0E= -github.com/antonmedv/expr v1.12.5/go.mod h1:FPC8iWArxls7axbVLsW+kpg1mz29A1b2M6jt+hZfDkU= +github.com/antonmedv/expr v1.13.0 h1:8YrTtlCzlOtXw+hpeCLDLL2uo0C0k6jmYpYTGws5c5w= +github.com/antonmedv/expr v1.13.0/go.mod h1:FPC8iWArxls7axbVLsW+kpg1mz29A1b2M6jt+hZfDkU= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= @@ -223,8 +225,8 @@ github.com/aws/aws-sdk-go v1.35.24/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9 github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.38.68/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.40.45/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= -github.com/aws/aws-sdk-go v1.44.320 h1:o2cno15HVUYj+IAgZHJ5No6ifAxwa2HcluzahMEPfOw= -github.com/aws/aws-sdk-go v1.44.320/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.44.323 h1:97/dn93DWrN1VfhAWQ2tV+xuE6oO/LO9rSsEsuC4PLU= +github.com/aws/aws-sdk-go v1.44.323/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/aws/aws-sdk-go-v2 v1.7.0/go.mod h1:tb9wi5s61kTDA5qCkcDbt3KRVV74GGslQkl/DRdX/P4= github.com/aws/aws-sdk-go-v2 v1.9.1/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= @@ -307,7 +309,6 @@ github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqy github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -342,7 +343,6 @@ github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+g github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= -github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4= github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA= github.com/containerd/cgroups v1.0.4/go.mod h1:nLNQtsF7Sl2HxNebu77i1R0oDlhiTG+kO4JTrUzo6IA= github.com/containerd/console v1.0.3 h1:lIr7SlA5PxZyMV30bDW0MGbiOPXwc63yRuCP0ARubLw= @@ -394,8 +394,8 @@ github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUn github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/digitalocean/godo v1.97.0 h1:p9w1yCcWMZcxFSLPToNGXA96WfUVLXqoHti6GzVomL4= -github.com/digitalocean/godo v1.97.0/go.mod h1:NRpFznZFvhHjBoqZAaOD3khVzsJ3EibzKqFL4R60dmA= +github.com/digitalocean/godo v1.98.0 h1:potyC1eD0N9n5/P4/WmJuKgg+OGYZOBWEW+/aKTX6QQ= +github.com/digitalocean/godo v1.98.0/go.mod h1:NRpFznZFvhHjBoqZAaOD3khVzsJ3EibzKqFL4R60dmA= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= @@ -473,7 +473,6 @@ github.com/felixge/httpsnoop v1.0.2/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSw github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0= -github.com/flynn/go-docopt v0.0.0-20140912013429-f6dd2ebbb31e/go.mod h1:HyVoz1Mz5Co8TFO8EupIdlcpwShBmY98dkT2xeHkvEI= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= @@ -787,11 +786,10 @@ github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20230228050547-1710fef4ab10 h1:CqYfpuYIjnlNxM3msdyPRKabhXZWbKjf3Q8BWROFBso= +github.com/google/pprof v0.0.0-20230406165453-00490a63f317 h1:hFhpt7CTmR3DX+b4R19ydQFtofxT0Sv3QsKNMVQYTMQ= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc= github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= @@ -814,8 +812,8 @@ github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qK github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gookit/color v1.3.6/go.mod h1:R3ogXq2B9rTbXoSHJ1HyUVAZ3poOJHpd9nQmyGZsfvQ= -github.com/gophercloud/gophercloud v1.2.0 h1:1oXyj4g54KBg/kFtCdMM6jtxSzeIyg8wv4z1HoGPp1E= -github.com/gophercloud/gophercloud v1.2.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= +github.com/gophercloud/gophercloud v1.3.0 h1:RUKyCMiZoQR3VlVR5E3K7PK1AC3/qppsWYo6dtBiqs8= +github.com/gophercloud/gophercloud v1.3.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gordonklaus/ineffassign v0.0.0-20210225214923-2e10b2664254/go.mod h1:M9mZEtGIsR1oDaZagNPNG9iq9n2HrhZ17dsXk73V3Lw= @@ -856,8 +854,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t github.com/grpc-ecosystem/grpc-gateway v1.14.5/go.mod h1:UJ0EZAp832vCd54Wev9N1BMKEyvcZ5+IM0AwDrnlkEc= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.2 h1:dygLcbEBA+t/P7ck6a8AkXv6juQ4cK0RHBoh32jxhHM= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.2/go.mod h1:Ap9RLCIJVtgQg1/BBgVEfypOAySvvlcpcVQkSzJCH4Y= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= @@ -866,13 +864,13 @@ github.com/hashicorp/consul/api v1.8.1/go.mod h1:sDjTOq0yUyv5G4h+BqSea7Fn6BU+Xbo github.com/hashicorp/consul/api v1.10.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= github.com/hashicorp/consul/api v1.12.0/go.mod h1:6pVBMo0ebnYdt2S3H87XhekM/HHrUoTD2XXb/VrZVy0= github.com/hashicorp/consul/api v1.13.0/go.mod h1:ZlVrynguJKcYr54zGaDbaL3fOvKC9m72FhPvA8T35KQ= -github.com/hashicorp/consul/api v1.23.0 h1:L6e4v1AfoumqAHq/Rrsmuulev+nd7vltM3k8H329tyI= -github.com/hashicorp/consul/api v1.23.0/go.mod h1:SfvUIT74b0EplDuNgAJQ/FVqSO6KyK2ia80UI39/Ye8= +github.com/hashicorp/consul/api v1.24.0 h1:u2XyStA2j0jnCiVUU7Qyrt8idjRn4ORhK6DlvZ3bWhA= +github.com/hashicorp/consul/api v1.24.0/go.mod h1:NZJGRFYruc/80wYowkPFCp1LbGmJC9L8izrwfyVx/Wg= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.7.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= -github.com/hashicorp/consul/sdk v0.14.0 h1:Hly+BMNMssVzoWddbBnBFi3W+Fzytvm0haSkihhj3GU= +github.com/hashicorp/consul/sdk v0.14.1 h1:ZiwE2bKb+zro68sWzZ1SgHF3kRMBZ94TwOCFRF4ylPs= github.com/hashicorp/cronexpr v1.1.1 h1:NJZDd87hGXjoZBdvyCF9mX4DCq5Wy7+A/w+A7q0wn6c= github.com/hashicorp/cronexpr v1.1.1/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -931,8 +929,8 @@ github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4= -github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= +github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= @@ -944,8 +942,8 @@ github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOn github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM= github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0= -github.com/hashicorp/nomad/api v0.0.0-20230308192510-48e7d70fcd4b h1:EkuSTU8c/63q4LMayj8ilgg/4I5PXDFVcnqKfs9qcwI= -github.com/hashicorp/nomad/api v0.0.0-20230308192510-48e7d70fcd4b/go.mod h1:bKUb1ytds5KwUioHdvdq9jmrDqCThv95si0Ub7iNeBg= +github.com/hashicorp/nomad/api v0.0.0-20230418003350-3067191c5197 h1:I5xhKLePXpXgM6pZ4xZNTiurLLS3sGuZrZFFzAbM67A= +github.com/hashicorp/nomad/api v0.0.0-20230418003350-3067191c5197/go.mod h1:2TCrNvonL09r7EiQ6M2rNt+Cmjbn1QbzchFoTWJFpj4= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= @@ -957,8 +955,8 @@ github.com/hashicorp/vault/sdk v0.1.13/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvh github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20190923154419-df201c70410d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/hetznercloud/hcloud-go v1.41.0 h1:KJGFRRc68QiVu4PrEP5BmCQVveCP2CM26UGQUKGpIUs= -github.com/hetznercloud/hcloud-go v1.41.0/go.mod h1:NaHg47L6C77mngZhwBG652dTAztYrsZ2/iITJKhQkHA= +github.com/hetznercloud/hcloud-go v1.42.0 h1:Es/CDOForQN3nOOP5Vxh1N/YHjpCg386iYEX5zCgi+A= +github.com/hetznercloud/hcloud-go v1.42.0/go.mod h1:YADL8AbmQYH0Eo+1lkuyoc8LutT0UeMvaKP47nNUb+Y= github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= github.com/hjson/hjson-go/v4 v4.0.0/go.mod h1:KaYt3bTw3zhBjYqnXkYywcYctk0A2nxeEFTse3rH13E= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= @@ -976,8 +974,8 @@ github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2 github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= -github.com/ionos-cloud/sdk-go/v6 v6.1.4 h1:BJHhFA8Q1SZC7VOXqKKr2BV2ysQ2/4hlk1e4hZte7GY= -github.com/ionos-cloud/sdk-go/v6 v6.1.4/go.mod h1:Ox3W0iiEz0GHnfY9e5LmAxwklsxguuNFEUSu0gVRTME= +github.com/ionos-cloud/sdk-go/v6 v6.1.6 h1:0n4irdqNska+1s3YMCRhrAqKbibEgQ7SwwhAlHzYT5A= +github.com/ionos-cloud/sdk-go/v6 v6.1.6/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= github.com/jaegertracing/jaeger v1.22.0/go.mod h1:WnwW68MjJEViSLRQhe0nkIsBDaF3CzfFd8wJcpJv24k= github.com/jaegertracing/jaeger v1.34.1/go.mod h1:md+YcRcDgMCAgB9qyXl0PdstYiq8fjA8KG5cNuyV2kA= github.com/jaegertracing/jaeger v1.35.2/go.mod h1:e7FBVZ14ptsRjwiHEnLyxvOa4bSnZA0BDFE1OcvNiHs= @@ -985,6 +983,7 @@ github.com/jaegertracing/jaeger v1.36.0/go.mod h1:67uyR2zQgEk7EfguOR3eZOGvGDRzY5 github.com/jaegertracing/jaeger v1.38.0/go.mod h1:4MBTMxfCp3d4buDLxRlHnESQvTFCkN16OUIeE9BEdl4= github.com/jaegertracing/jaeger v1.41.0 h1:vVNky8dP46M2RjGaZ7qRENqylW+tBFay3h57N16Ip7M= github.com/jaegertracing/jaeger v1.41.0/go.mod h1:SIkAT75iVmA9U+mESGYuMH6UQv6V9Qy4qxo0lwfCQAc= +github.com/jarcoal/httpmock v1.3.0 h1:2RJ8GP0IIaWwcC9Fp2BmVi8Kog3v2Hn7VXM3fTd+nuc= github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= @@ -1094,8 +1093,8 @@ github.com/lightstep/go-expohisto v1.0.0 h1:UPtTS1rGdtehbbAF7o/dhkWLTDI73UifG8Lb github.com/lightstep/go-expohisto v1.0.0/go.mod h1:xDXD0++Mu2FOaItXtdDfksfgxfV0z1TMPa+e/EUd0cs= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/linode/linodego v1.14.1 h1:uGxQyy0BidoEpLGdvfi4cPgEW+0YUFsEGrLEhcTfjNc= -github.com/linode/linodego v1.14.1/go.mod h1:NJlzvlNtdMRRkXb0oN6UWzUkj6t+IBsyveHgZ5Ppjyk= +github.com/linode/linodego v1.16.1 h1:5otq57M4PdHycPERRfSFZ0s1yz1ETVWGjCp3hh7+F9w= +github.com/linode/linodego v1.16.1/go.mod h1:aESRAbpLY9R6IA1WGAWHikRI9DU9Lhesapv1MhKmPHM= github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= github.com/lucasb-eyer/go-colorful v1.0.2/go.mod h1:0MS4r+7BZKSJ5mw4/S5MPN+qHFF1fYclkSPilDOKW0s= github.com/lucasb-eyer/go-colorful v1.0.3/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= @@ -1149,6 +1148,7 @@ github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5 github.com/matttproud/golang_protobuf_extensions v1.0.2/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/maxatome/go-testdeep v1.12.0 h1:Ql7Go8Tg0C1D/uMMX59LAoYK7LffeJQ6X2T04nTH68g= github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc= github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg= github.com/mgechev/revive v1.0.3/go.mod h1:POGGZagSo/0frdr7VeAifzS5Uka0d0GPiM35MsTO8nE= @@ -1156,8 +1156,8 @@ github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3N github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= -github.com/miekg/dns v1.1.51 h1:0+Xg7vObnhrz/4ZCZcZh7zPXlmU0aveS2HDBd0m0qSo= -github.com/miekg/dns v1.1.51/go.mod h1:2Z9d3CP1LQWihRZUf29mQ19yDThaI4DAYzte2CaQW5c= +github.com/miekg/dns v1.1.53 h1:ZBkuHr5dxHtB1caEOlZTLPo7D3L3TWckgUUs/RHfDxw= +github.com/miekg/dns v1.1.53/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY= github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible h1:aKW/4cBs+yK6gpqU3K/oIwk9Q/XICqd3zOX/UFuvqmk= @@ -1268,13 +1268,11 @@ github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.14.2/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= +github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= -github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/ginkgo/v2 v2.9.5 h1:+6Hr4uxzP4XIUyAkg61dWBw8lb/gc4/X5luuxN/EC+Q= +github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= @@ -1284,167 +1282,165 @@ github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoT github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.4/go.mod h1:g/HbgYopi++010VEqkFgJHKC09uJiW9UkXvMUuKHUCQ= github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= -github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= -github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= -github.com/onsi/gomega v1.27.7 h1:fVih9JD6ogIiHUN6ePK7HJidyEDpWGVB5mzM7cWNXoU= +github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= -github.com/open-telemetry/opentelemetry-collector-contrib/confmap/provider/s3provider v0.82.0 h1:Ncaq8p3A+Pp9BA/xKrlzW0a1JbeSA0TBUG165Gl9pvc= -github.com/open-telemetry/opentelemetry-collector-contrib/confmap/provider/s3provider v0.82.0/go.mod h1:RlyFkcDE3CsZ2oJqLGyMwP+9XHzj2HvJOEWs1zkRcD4= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter v0.82.0 h1:wiri43hEKmq8pXfbRnrHEvKgN8nsZEeqxmTDV5GsHdg= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter v0.82.0/go.mod h1:LkTbjb66U2BXWWX2Jufayiq6+zbA81n/e2WXBDShLSY= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.82.0 h1:+KaCvd07PhWlI63T+Lsygo1fodbmRM35AgEDtlzw96A= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.82.0/go.mod h1:p6dJC7m2ET6cA3kCYBY6RqtHlZB0lM2YOeEteqEtNLE= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter v0.82.0 h1:qTwIwx2gAnxBK4yrSJr3dayohzbjeZdSsAH90awqMO8= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter v0.82.0/go.mod h1:PEiq66gxQahvL/fuH76TmqtIvXTzDELXLunxefNgNsA= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/dynatraceexporter v0.82.0 h1:2fLuZoX0ZqTDMwV1fgbu4rMlogO086dYrGop40brLVY= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/dynatraceexporter v0.82.0/go.mod h1:vGIBvYpVqmf6qOBFQ7oQpcmrkTYb0ivombpMEKhvA0c= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/fileexporter v0.82.0 h1:1PLJXk12CueltS9JA3X2BNzMKbYD3B/VEKTJBUR5Iy0= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/fileexporter v0.82.0/go.mod h1:5m7wwTnnssL2m9zzWwaK40t6VZLUlC7Tbx3hsSyrWQM= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.82.0 h1:6Zj2gzJl5sefJFQlCvL46kLyoozBstUQiB6wsPkPU2E= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.82.0/go.mod h1:1G20L9KOMqKQdqtNI0X4zfkMQ151bk4z3YUbx1Mcjh4= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter v0.82.0 h1:knTNY7nKG8KEJYijNFAJglRHu3j2z/I2wh5HyWuaatY= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter v0.82.0/go.mod h1:x8Dzlake6r6h1MAD24tL97bSt6SFLxpsMfKSRD0gduA= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/logzioexporter v0.82.0 h1:eFjoIXoEBjo3EXNQPJQcJE48M/K4p+v28Q6Qcarrrsw= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/logzioexporter v0.82.0/go.mod h1:0oAPVfaW0coeTHM7MNzhG2cNewtpcmbNpxbJ3is5ezw= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter v0.82.0 h1:tEhzqPgXasHxP3EwkIYmI1UcjwLP6Ni8sME5hdfyTa8= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter v0.82.0/go.mod h1:YLJ4+uqiVfA9AtUPNzZOMoUl3FFN5AiMkjXzqgbUq4M= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.82.0 h1:Pbdhyt4aVha6QgscPYRwD70/QlHJJpaGBJkHbH0D57s= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.82.0/go.mod h1:xF7jvMckjRKbE+92Q0BXqYCvPrOOGrwVKeoQsZ37dXA= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter v0.82.0 h1:L+A8ZQzzsV+Rg02ZJS1ugdzEuiNtJr0uONVD0NvgrSs= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter v0.82.0/go.mod h1:tua7yWaFweDM9mzRDFP1DNhsNVMYbRz/Rt3k52JBexc= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter v0.82.0 h1:i4LnK+f/XbJzGlCrYnLRg/EFbVYYXcmSF88pVN5loGI= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter v0.82.0/go.mod h1:4xBWenKUqvfXOwlBLX5nbr0YIMSsZne4FGdn+lTyajk= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/awsproxy v0.82.0 h1:t3pahsAi1gdqpJoVk392UZfw3ZbDyIzXFy89yPjT1MA= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/awsproxy v0.82.0/go.mod h1:/D1YL6TN6QY531RWxgimwbhwNh8Sjt+/zlhn9sRRbt4= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.82.0 h1:xzz0A7cxxtvraU22ZleikEc32ghZ3GMCDiCevs+37Zw= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.82.0/go.mod h1:eaA8YuyzBP1HikJUGYlS+3cbxndrCIAEYBU/1yMQNQo= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.82.0 h1:Qixnpq/0PQw9rKzGj7Eci4F3ucZRG46mLOL54a3u2Uo= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.82.0/go.mod h1:9ne2U15XVRwTRlUn5fvSDBDeLU6RH9YVeHOdFrj6kyw= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.82.0 h1:RhqTW7tIcDSc6bWt1TTjlMsHfAYgIiXrGHbk+CYOcSg= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.82.0/go.mod h1:JvWloYbaiC45XUV9odYlMo/y19mN1nil+OwJetJu870= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/sigv4authextension v0.82.0 h1:d+pQzGHday6ZwW0FOzm/vzhHl/KKvZLWoYEXMeubnvQ= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/sigv4authextension v0.82.0/go.mod h1:q+Dw/LtMZyiOhper76RZxVwIB19kZigsBADyXdyu+30= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/awsutil v0.82.0 h1:BxLqcuI8tf2R2OxARpCXF6cfZ0cVO76cohWclajF5jY= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/awsutil v0.82.0/go.mod h1:gaoep2XPjGkrGY1XH41ivdc3JaqBYLvZWXK6KffEsck= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/containerinsight v0.82.0 h1:F3CFU/uB6lov7AZ259rbbjKq9OYXr7KeOyCCzPdIEbA= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/containerinsight v0.82.0/go.mod h1:70uB2fD1x/ldzsXvuhHG1bFZeYhPouYpkrDWyO0sQS8= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/cwlogs v0.82.0 h1:/fP4YLK4SNJRmz5jOtT57LKbdNbEuvBL3FC56uxASsw= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/cwlogs v0.82.0/go.mod h1:tvp98P9ZEF9K0wUzyZSVLhxlaJHzBbzI0C5exIoD844= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.82.0 h1:9wJBkAfq0xgT++d0t6p79ZJKvGjjuy0ZlcXcl8lDEXU= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.82.0/go.mod h1:YvDlJ+zVkFiiPMEY6QP3E4ILXQrcZbk23KMW4GPGrVg= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/k8s v0.82.0 h1:djhO7YMcHKPLWUS5zEenBsbDwDA9XHW5OjStniJVXBA= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/k8s v0.82.0/go.mod h1:N8HzE0N2g9HlKn6KYD1ficMw2so5jLvy21uAmtf8NLI= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/metrics v0.82.0 h1:kcoYbBAQAt4InjM6BzjgCxHq/YUX5rXb0fMY00lbfNE= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/metrics v0.82.0/go.mod h1:MilkKO7o8LmdxM8Lgi7vBVU8qxrhZFuWPy2kPlL5Ho0= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/proxy v0.82.0 h1:W0UXQrlJhK5Gn2Ee1Ebidx+MCbTG39kH/hRfMAKUmM0= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/proxy v0.82.0/go.mod h1:E34bYW2k07QZ+uk3TBhctjQ9kltfRkPjcQkIo9ST4jg= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray v0.82.0 h1:z2cI8P5guS7wB5Zmvsq+4kNiAwi9k7wS2ZG/bWK28Ak= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray v0.82.0/go.mod h1:tT09nnraDrujjx9eGDjeuG0Lme3dwj3zSmAJERQgcEk= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.82.0 h1:UQkYg2ksIEbUibN97SPMITF/1wSRynAn+aD7tC3mQKQ= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.82.0/go.mod h1:5VGtCES+3CySFeCO8hFzrxVgeMI8L6cINAlqr0vrv/A= +github.com/open-telemetry/opentelemetry-collector-contrib/confmap/provider/s3provider v0.83.0 h1:9AN4dtHA4C8abrNBowBq7CwcrfzAOqkHVzgIe+mi3EM= +github.com/open-telemetry/opentelemetry-collector-contrib/confmap/provider/s3provider v0.83.0/go.mod h1:XQUP9bBRZu4XetB4Pok8qjaWh+hdBnmxriITOuwfUPM= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter v0.83.0 h1:xuJ7AgUJqHqBWaFNeph0fjID+4+KgBgv16RJr0WgfMA= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter v0.83.0/go.mod h1:3aJHSLrF4WO2LecdoPBcbKeCKhYVONPersDFOMLy1Vs= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.83.0 h1:gnc6xAyFcjhP8C3ebKXSyJHPinM+n2aiT507qfFxGWU= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.83.0/go.mod h1:n3ry12plQOS4XbIYpyeqHJKVl1y7eVZSLKt2M7v00Dk= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter v0.83.0 h1:RahPWJc7FILuSw6Nr5LjjFqhzhRoVp4iiPMQppUnA+0= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter v0.83.0/go.mod h1:OxipJxncCOdyxT1rPl+iVHRAov5JepMgJ9NkuAPmJgM= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/dynatraceexporter v0.83.0 h1:ZSrG3KAWvc4fZA0LPj+uv1qwpovVuWDXaj18ERayib4= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/dynatraceexporter v0.83.0/go.mod h1:eeaXESKotV8YV/2Rv4vJ+VAJ3bkcq4WKGWEz9AkZMMk= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/fileexporter v0.83.0 h1:GoOSCUUy3pNrVfu1sCzXQCr1/dKDFtgYBza6gCaMsfI= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/fileexporter v0.83.0/go.mod h1:vw5V9r2TOdcnL1kAIVARKtEa67rE90qEm0AH/eS7s/Y= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.83.0 h1:UY9GbD8VBY//agLPuCejj/Fqjf1uAWqT5LCpv11UcDY= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.83.0/go.mod h1:33K8WHr2YXDG1/vK+01BiMefL2Jjwrt4c4zli3Vqp8o= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter v0.83.0 h1:LzwGcOtWKNEoLsyZ8B7fdb73x7zEbyz5ghVdkpJqANg= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter v0.83.0/go.mod h1:fLGts4FyYaBEIxp+7Ag35Gq0bEks5L0tu2qhRFi/3KY= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/logzioexporter v0.83.0 h1:zuOyT4t0uB0Sqw4PnsOezXUyOn+kWp+HslxAiGmB5HE= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/logzioexporter v0.83.0/go.mod h1:09NFg+NzQA8p1CqVSuW52HZHNvdOIFVrSV17UANM9KI= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter v0.83.0 h1:GUovWPxEil/VpfkfPp9KBmidnZHX9SdD5vsfqzcssCM= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter v0.83.0/go.mod h1:zHOElC2vEg1cUv2P2Pwgd/YhYJTQEbOpJohpoZGWRfk= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.83.0 h1:980Ae/Y7mZndgWhngpEi8u5KIVFYQIF4NFpkLEqCZEI= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.83.0/go.mod h1:u2DDzR8+AdbeeMf7o47qqVQcPfX8bzm42/VAq7I1E2s= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter v0.83.0 h1:8qQgO1UgutKov4JFFIEf0WNS1moXK6c+z2vznQNcyf0= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter v0.83.0/go.mod h1:KkzODSIS07pR9yIYXyq1+CvEDILRIv5R7RmemG6w36c= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter v0.83.0 h1:s2lJhFQl2QTKFNnab9Wk+9hIPOUV2dLV3TQ8GoT+yKI= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter v0.83.0/go.mod h1:M8tIfUA1ULvRZp4wt5yTWI3dfJ9KMNEQmdyHlCxjaX4= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/awsproxy v0.83.0 h1:lOlIrn4npBaOTnJpjvH+MzqOjusgwmcEmtKGcTWmCzk= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/awsproxy v0.83.0/go.mod h1:YXAQndCP4LY42rn22ELjjnTmqbsxS2yrVerAzsCisQg= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.83.0 h1:Kjh+MVsvlV/vmgwnN667ln57kQiz6ZfYxyk+IONS5lA= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.83.0/go.mod h1:CMhTUJ14v2P/roC9GrV7fdVS7MCtXwOjw+JprJrfFKM= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.83.0 h1:0mq2UIQlFGuaZANvXaYyMISx44Ft0OEaMR1ByOjhdDk= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.83.0/go.mod h1:ds5yO3Gq5QF5x/lT3zYbQoQCrXdBfTVhYpAV+xINUG8= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.83.0 h1:zrwghw5g2Ma5u+mGo/PyodymkAQiAcPLs4Quq4MAZoU= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.83.0/go.mod h1:WQ5L7YwarCjW6beHMZy+2g/+HfD9WQOooTiwoQcQR/g= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/sigv4authextension v0.83.0 h1:jM9FjwI22dpD+p/0In2FA8ZVlH9EWiH5gJw8dUMQ3Ik= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/sigv4authextension v0.83.0/go.mod h1:rNSAmQoUAr5c0KpbxBulgo52csvKoM+qKueLFC9ayvA= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/awsutil v0.83.0 h1:K2rND8mgBGWDqFsBwjKdphBTBjjbBkcw84lkDVvv/ls= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/awsutil v0.83.0/go.mod h1:0pvuvcSdAJ7RopqLmtg1/DPpZH+xazbMACKMVHalCoo= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/containerinsight v0.83.0 h1:d9Wvwut7FoNSxfXF5SJABNppu9xGxQnf+I1IuBnGIjM= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/containerinsight v0.83.0/go.mod h1:jxeFqzSwMrDmEGPyCGhCyHY+6J+fA/jXRoa4CEzZ/W8= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/cwlogs v0.83.0 h1:RDQ862swYY3oAqtQE5UEf3gVo2K6STG76CgBurJIY08= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/cwlogs v0.83.0/go.mod h1:u2XpS95ZgRXO8yrTjw6eX9IesTd5aNANP/x3jVIrqc8= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.83.0 h1:pBdqndULnylTtLYo9Zg3C29FNZNMniZXIj31jYfjcTI= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.83.0/go.mod h1:BsWcWAne29O8gfMOvkPR4+OTshTvqpg+3ERZS2hnFNQ= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/k8s v0.83.0 h1:WrSJTAoVnqL74klHYl+ylGcWzw5B/iuHsICDG4Wg+pg= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/k8s v0.83.0/go.mod h1:gGNH996LguY9CzDK7xyPqjCPJkfRKg8Y3WZA0jgnw00= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/metrics v0.83.0 h1:1QgMSYj3rKm3BftrCSDVIf+zXYxdv3NNCkI+/zePTQM= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/metrics v0.83.0/go.mod h1:WAScm+oitM87OWSy+pPAC6eCzg3xhYz3VBSef2+zV60= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/proxy v0.83.0 h1:ovGXkuMEqzGjDAU+CC7BeAsTL/qwmmOJhkWrv1AmA1U= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/proxy v0.83.0/go.mod h1:zhysB7B8qSgvKI+TvNqAELwdCQ7I26IzuFNAWC+tILg= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray v0.83.0 h1:PLzi6IkVCbnNc3dyrmBSBqEng87LWCNca2np21XI2hQ= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray v0.83.0/go.mod h1:WWKANqgoMjOyn8yKjF1oEhpss9bmfiyWH1+mkE/y9m4= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.83.0 h1:pzq9yVMgLzeo8ZsgtjDzsR+B/dt+Fc3FROAu9SV+Qu4= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.83.0/go.mod h1:6XJ+dV/QjwFJf55/2nnVfVqA+qwa8Y/iYR6taPJN7n0= github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.52.0/go.mod h1:CIX1MWy+2JYdeYhqjK89vrRpCGbz6LTLinp+SM8kZyo= github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.54.0/go.mod h1:CSe1wsnLhSAEgAEXLfi8wTyl0VwPVkq7925aV8xm0oM= github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.57.2/go.mod h1:xPchY5YNOL9jr6phVkJEvkEakMYr8HMD4uGYEoKXIek= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.82.0 h1:0b6glbENAwPdasKKVOgpR/EaZG1sJhsUfXCRiwZ0drU= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.82.0/go.mod h1:MKnM9GFqPz4HY4NQDDao+dIjZz4BvThAijuJuPC8NOI= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/docker v0.82.0 h1:sw413Qe/67o0L35OeJEeySjzSAvVbY0jwhSkgNdTWmE= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.82.0 h1:czYBWuiriQyD/4UI61U/eAogi7qnhk9AGreZez20t0Q= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.82.0/go.mod h1:tiYWtXrv4+T9L+mo5hdzMiKN25rg7sB2tRIHUqyhF5U= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.82.0 h1:5aL7mnIupfnfyO6izVxwdOArKbJaAzNqH/W//2ux0kE= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.82.0/go.mod h1:F9orevxYo4hFXEYEbhn85znnIUWv1y++MKkXHabZZ58= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8stest v0.82.0 h1:ENaPzCM8oHWzwzA1Fj6dl/1zGOh1UC9wb2f17jh45aA= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/kubelet v0.82.0 h1:ms9AyP7vpW4CtsPKibY2kp/+kWr4mtBeF3TsgC66vVk= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/kubelet v0.82.0/go.mod h1:IEmBxonukahtUZUZLsRuUeFNQCsqIrtyWEFWU/CKyU8= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.82.0 h1:EFEWFZNTCTM2UVItheh1f/rAoZcVVNhTLk4xevSSbUg= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.82.0/go.mod h1:xbuPPTg7fSGwlMaM9iEjPdob6MH7Nd6KzJ//Qd0xMFs= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.82.0 h1:0Hdh8jj6biwzWzdF21c6XYqVhx0YMKF2OVnWhusAoFs= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.82.0/go.mod h1:UEmb9zd7Jf9KKc6Tmrh7FXlh0IO9FzSumUWzmEW+ANs= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk v0.82.0 h1:0AqiqVGbSnwz6n8CYy+/r0dJz95rqif/ctFlUr44FLw= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk v0.82.0/go.mod h1:lFuju3wV7f/AZRVAyMYqA9XCtmQ66VuBR1XPC4mxVRg= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchperresourceattr v0.82.0 h1:a0WFk4cpk7HuYi3CcKo44z/gY88XN3EMJTq7gW6Nj4A= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchperresourceattr v0.82.0/go.mod h1:aZdFtT+ay8aNiySaQw41KQvtRteKj2Eym63Cc0PWElQ= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.82.0 h1:0ayceOkG4qqLiwgttI7eJYCxXUy2/LGC3IxADCRYjS0= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.82.0/go.mod h1:Y2To/bsz97PpnjTRSIj1IfDECx0hBpEnfH2DJXejimE= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experimentalmetricmetadata v0.82.0 h1:noxLHxoQqYt3WO3Z2HpUExyYG7l4fuqC0FyqRPYb+BY= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experimentalmetricmetadata v0.82.0/go.mod h1:umq1KOdkQa2+djdxtxHmLigyFtLVqM7QXGeP3/s3cHA= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.82.0 h1:2efL2SE/dndrTLPQcpFzrsIJpYw0i3bkFG0n40xnsQI= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.82.0/go.mod h1:tqP4R7pPk5M0v0j8nP5h2o1fUqofC2kSrirzkwQW7p0= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.82.0 h1:wBX6PvwO5mopN+uuVU1pyfl54OdrrRT+VPRCyl22O1A= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.82.0 h1:zSQ0EolsXY3F18kFwEpqAkLc5C2/DE0vbFS3QfMpsDc= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.82.0/go.mod h1:wbgo9BklRN8M4Mi+76mo9bMVQY2C5gL/rPKwePQL3l0= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.82.0 h1:3S5WRpygfai9pfgt66oE/ppOEziBlL4NhuIlhb2qXkk= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.82.0/go.mod h1:BJji1qFT5xqu4osJjgwWYg2wO+FKjE1R4zeSxTcY5W0= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.82.0 h1:NbSJ/XsjeyiKSjn/f3eNp3HF3eKeXLfJLZvjbpV+P+s= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.83.0 h1:rwaJhPhZf/4A0exUs4cjALn6jnFU2QAL5cSJ/FwzcsE= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.83.0/go.mod h1:DaoCKBiconWYmXeexLiQeVOWLWbo4N5vJdVs+9X4V+c= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/docker v0.83.0 h1:dFsDXpyWukqXZRLw1GwG6npvLaTXuOQbVJ/57IP9LrM= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.83.0 h1:s7sUZ3/LCX3Tr0C0FfTwE3X9GwTeTBskoMaLlqPv8Kw= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.83.0/go.mod h1:3bOPivfVZIaylsOk9yhWGNeobT0Pn25UZs4OnuC+azA= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.83.0 h1:KllNzprs78NOFj4pLDu3hVImiJNiG3v9Hti+mvMpOBA= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.83.0/go.mod h1:oOJY5unMZP3pPkcZl2VdBOiJzS+8xh4o9oq5NEglCXc= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8stest v0.83.0 h1:lzm0ei+Q+jToNok1HM1YPC5sH+2LuWTwf5NhCztCgdk= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/kubelet v0.83.0 h1:wuArO/CYSrTcpO4h/3vfLC0Li4vuVSeERxz4OOWTPh4= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/kubelet v0.83.0/go.mod h1:rWbscJhWZAr/o+1RAZAKuXl2sGowDcs9jbR/DYQvysg= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.83.0 h1:tNUrUu6UFAycjz6NtL8/Zz0LF/2UpYvW4PL5xIYtw0I= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.83.0/go.mod h1:3UHvuM+X9vt8DXR3t/lt8JdpYhkVleQqc2SzvXLcfy0= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.83.0 h1:5TbtbiKZMwDVf1sEpN14Y4wqwANRpzmGWR0yLB4fNKU= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.83.0/go.mod h1:tGB4+26zqjQRyHhxMWy/YZFOzWQwqm9OS1Q+jKHVabw= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk v0.83.0 h1:s/mJMnap31p/fM/DGj3GWkubQRmaHMjo0RwDz0iAjl4= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk v0.83.0/go.mod h1:J3PBJi93/0NuvJGCSugg0Dt38G/LqTrgL5t9cFl4Q/A= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchperresourceattr v0.83.0 h1:xvFXYVkdzUsIuoi8asZU2Hm59msjdFbnp9gxydplIQo= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchperresourceattr v0.83.0/go.mod h1:/BdkcVWo8+/yO//dRT30o/eNgKfu6EGtAby00vl0VPw= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.83.0 h1:/qrROLCRzwIVrDVnEo9BOAGLzRIN8eCUXCO76Wi00Bs= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.83.0/go.mod h1:RBkF+g8DRJs+k+JhIVAxnUW+a7vlCajpWNZQ2yD61sU= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experimentalmetricmetadata v0.83.0 h1:qhLG2aVClSG2RMO2ERDfoUFn0YhHv0VhRoYJGyNXyEM= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experimentalmetricmetadata v0.83.0/go.mod h1:IdqHQZWFAyMN5/yHz8oFts8AVj2osf2b6DTvc44IMLI= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.83.0 h1:6DE+t1L3OjJMouOo+Ss5w1W/IJkPNFFJDb8f534eFnc= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.83.0/go.mod h1:uL+EJyqlx798NEB4FuY+05IZY9aXSbw5f+2YFNvOA0A= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.83.0 h1:/6X4L5BrCyy5Uz/jnLwkiv9E6VfuJl9EFJztWtkWvps= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.83.0 h1:yEuD/gKHNVMwjT4mRZAeUVCe/VO2tqeb5+aFim5jQJs= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.83.0/go.mod h1:WoSzL9NAVwvEy1lMwM2utn68g0Khoxm591WliToPnDQ= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.83.0 h1:e2j+kbNwZ1AAfh2RexJz8a07E/36IUmGW9SID7JtK+g= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.83.0/go.mod h1:fxZoTFjoKeAwXk7fiFTLbB6ptMk8vgj97e3nVyE/fJw= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.83.0 h1:Ru7vas+V32Py0ld/jaKAAeCfCL1PqOuMmhG4gX/1GR0= github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.52.0/go.mod h1:tU7s/ki/QePSIZ7ExYGWLMb9erCAt66brQHBEHxr8HU= github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.54.0/go.mod h1:uWFgO68sLEqWrQxL8rws1CkB8EUNQaedP3FWI1gMJOU= github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.57.2/go.mod h1:4rFuWKMbzM9H39RbDvPtJfAp/fxsHydhJhKns6skmK0= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.82.0 h1:fKTXkXX+iMAAiTu4r1j1DbzKYvbd6CvFoWNWLhTOJjk= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.82.0/go.mod h1:1SM5fbDUmJHQUNO0T/lDzMVmGpn+z9UJHyjfGg6IQ0Q= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus v0.82.0 h1:XW0HJBOWJLpzDsMAOoLxFL4qMmD/qI4qTpGfAA9afgU= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.82.0 h1:kx5UQGy8/TiZRst78xblTHvIf3HBJLNKoXvx1GrUHPc= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.82.0/go.mod h1:A9Z3SrF0Ngir1kd5t7UzjpUFoy4mhZjpt3B+3d78/qo= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.82.0 h1:8HOH8p6iFidN1VsZewH+ePdZM+w/89dCS6dW24Enhng= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.82.0/go.mod h1:2FVfNpoH3v34K0Lx4s37n54U11ir9Wz8rAab8DaL6ho= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/signalfx v0.82.0 h1:0uuo5x+/RFtMhPqo7+CU/lYCmvpJ77fPLdpPxA92Kes= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/signalfx v0.82.0/go.mod h1:rfHBidHrLvlRi0E5NvQuljP3r4Uv7qtNTPWIFtt79z0= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.82.0 h1:NC8LNVLj2UXfPZoW4vpAf9+NWqw1vzwuSjdA2xRPIvA= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.82.0/go.mod h1:t2x45aFpeo5tc6oM2nNyKPLy5gBhT/R/uJNdPp7TBbc= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.82.0 h1:R+UjA60N26I1gGGzlcxp1IeEYxg9kCktiqle3vEFRBU= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.82.0/go.mod h1:VoKLInODf1ZwhLHiYWJe/2Gte8BeSlvRdlrM8gO7VTQ= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.82.0 h1:2j71XxgoPi+/WxQM9eYmo8rhrV9qqX8CP1ivNNaDrGE= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.82.0/go.mod h1:YoTWA0QuEunCmoGPhYibhD/AHrk6jaPSv0upcOgv7W4= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatorateprocessor v0.82.0 h1:iA9kPemK86SmIHlghMFHlrwDxX3oAmouAgZ03p5sLSo= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatorateprocessor v0.82.0/go.mod h1:Yvf9bKaGcHyKSnxsVVsIS1aeGotnlYRzTZuExzTNnSY= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.82.0 h1:h9FsCZ9ppDRL4cC3QeFSL8uh7auqFsa9BMLLeVx0qFs= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.82.0/go.mod h1:2e36/jV+bd1rPXUJmS1Jk5g3Mrs3EN1Hve/eZrwYEYo= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbytraceprocessor v0.82.0 h1:UZlfIF0j35lrIdLt3I2blKi2gknhS4B7FsygXB/PJIw= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbytraceprocessor v0.82.0/go.mod h1:hpAZugKUIeHZYS1QamT7kfxhy2qmh8j9F4krEmwlv1Q= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.82.0 h1:UQVbkb/xvXhL5TU3w63cIfrV6Lhtk/shtvQDZDVx75I= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.82.0/go.mod h1:efjxEAMpzeK27fnU9AxWFGDX7oJji6AYz/CqtvlIKHM= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricsgenerationprocessor v0.82.0 h1:QnVw/sc1XkHjIkpPzq8gGw8A7/0tS3KiBeacNZ6R27s= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricsgenerationprocessor v0.82.0/go.mod h1:aki1yK0PqmA4UvtbRRN45DQ2iuAoBZlPFXfkXr83avY= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor v0.82.0 h1:Ev7liq1TQyhDgcsfizXC/lO1lKWQX6sv7OAKVe679pI= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor v0.82.0/go.mod h1:WksfpqoJ0O5IKYn57Qs3uoQRjKEoidlG1Axy/VFDc2U= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.82.0 h1:FMfmm1qOrd/UTFydSaLdDq27dz5/Z6vk9lf9WhLB0YM= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.82.0/go.mod h1:UTtjfRbvrkSypo7gHWkOO6l3LjmWaf3JxjtCbnCuAAE= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.82.0 h1:ToEAh/x/66CfvlNdMzAa9hh0zzZOa2hneCdnDBj+U4U= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.82.0/go.mod h1:SvJsVQdfVYVJ0/uG7jzesLDj97ej/8Pkq88MOLD4Ko8= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.82.0 h1:iIzRlJaR5YPuRLjtbeSALwn0IxdgdbOwlO8DEUPkDz4= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.82.0/go.mod h1:0fxeFp/yAbCRAbcaPztO1j9JH3HYwItPonmEz6OXlJg= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanprocessor v0.82.0 h1:PTjUqRSExzDY8AS1RA+fDNRIyP6ilh/4kJRLi1Gwz6g= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanprocessor v0.82.0/go.mod h1:agArAIGzj1fcKeS2J4voljJ1a2WetcV8IIjK7X+i48U= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.82.0 h1:1HuCZhKP4QrB1U0NRx9HaG5qCHXaYAubFMVPlhSg6Kk= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.82.0/go.mod h1:GvmND1rGYv6k6fNHWyKbRi6yq8XlOTe1ymfER+BcDc0= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.82.0 h1:9/IOzBxXZpeUkoMFl9eXiLhNBbhumhjqdgnDwl0nO1E= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.82.0/go.mod h1:zXztXG/c1eWh6o5+HAvibDBl6B/FPPcKZHjoVvgvRdQ= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsecscontainermetricsreceiver v0.82.0 h1:Pbq7+HacFVFbUKnLshTcHrzn7olhEKG3PVeEuzsufKk= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsecscontainermetricsreceiver v0.82.0/go.mod h1:GyhYenUIr/aotZ5w3N7nOLiA3uF1+blq2pOQAbtrZws= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.82.0 h1:aZ7P5unH4YlZTEt+eh0ojkhtOj3xKqo8Ywat5+dHAzM= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.82.0/go.mod h1:ouGAw/O/qJWPen9L4BPeHXTUANRwafYyWU/yvk1GEdY= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/dockerstatsreceiver v0.82.0 h1:t7E/nglM5F9p7w2rfKFSFm0bkmjND2kq5oxIN+XhT1o= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver v0.82.0 h1:zYD9pYKlF7nLpwyrZ9Nvgj4K8TtdnMtX0PS2qSJvo8Y= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver v0.82.0 h1:J8jwjrVQU5l6EnsaDBY8qOsW32nrylhwlCuV5WbvZ2M= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.82.0 h1:JMJF6BiLMovfCW/+s+rTnndp7IY9qrVyZ7xZnZEtCtA= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.82.0/go.mod h1:UMwCyO7+q2f3S01fU40NYYXUNSWZy5X1RmnTtJR92fk= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.82.0 h1:ucTSzZJfzy4xav3bjGFZWAlf/rdxoIN620ZW+wW5u2Y= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.82.0/go.mod h1:jep8pXp8tTHpr4Fe3w6PH9vHGHo/m+g1MukPZx9mzHI= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.82.0 h1:oJ9H90C/qwP+3dSxEL+fAYXpyN/8GqNp6j0CODeI2yo= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.82.0/go.mod h1:LdHYaAdHsKYhVNOwGJS2Egu+Rrb4xWEerC2JNpC4wrs= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver v0.82.0 h1:iZtAbhXHFR9ID2ZttCbR9wvS2J3Ivi/9nJQj0/7YuCI= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver v0.82.0/go.mod h1:8DsPwUYp7YpJJALNimxVe0IXwtaRC3Aj79BWlmeG8N8= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.82.0 h1:DAuk2nc0eCgZIVdrZ8OPIJ2w343zLN3e98vS9D+Osk4= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.82.0/go.mod h1:8aOC0UNBTUuRt5Bmw77bxcFJYd1HXffvU8wlVo/1DXc= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.83.0 h1:fGK6FQaUCOFGywGRvsiDtjlfGLZGcgkmgZlIomdsr3Q= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.83.0/go.mod h1:wvJRWQDZGEeTvcfzNHII+niIHWMlO/fg6vSS3x7XYzE= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.83.0 h1:qEO5xxh8yRYiWhXiQhEcXxFV4RcO7lUkejfxBjXLPNo= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.83.0/go.mod h1:a8Jm3NY18nTt9mM8XJeWvwmiPxDKJQvbpYIlzA7H3Yc= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.83.0 h1:q5QvnGvi9JNh4vPmK3BuX/C7C8YWZetCrli3BxCAZKU= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.83.0/go.mod h1:dZGmQr+gcI6fUEs6sPxlUQyV/4j9KcL31YcsHL9OJ9k= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/signalfx v0.83.0 h1:asapsoRz2d/HRjT0tWfJsCqBe0qwFSw44P5VKYx2OGw= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/signalfx v0.83.0/go.mod h1:SQ+BuwNerr2p1jmezJDF4ruJ1WojRgqQinkyey6NaMg= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.83.0 h1:1FnX4XXS9IOC9h5nA8gQsPv21n0rbECjBqW/VfJ4c8o= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.83.0/go.mod h1:BK0l1fWX36sfssQie9lH46tzVzV/UTOcUPRKytLbb/I= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.83.0 h1:zre+RUseqdEblLwaE9FidObbtmx3Mm7tiwMC3Fejoy8= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.83.0/go.mod h1:QEy9JCi68LCoxfwPn2v1byOf3KxSe3bJ/p3jgNzpsOY= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.83.0 h1:cnmSn6LVIIkBRfaSuBQNNphLXonFr5qD0+bCHgvAFws= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.83.0/go.mod h1:bHhc2kT0wki+U0boNf5E0bmDbPDaI6AyzdXh/zvtFrE= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatorateprocessor v0.83.0 h1:de91BCQwWkwZ+Tp7OsCpkrrc7vBRdzPQhDE5ENcxPMI= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatorateprocessor v0.83.0/go.mod h1:7NGof68ffjxgOoN7NE0TSMNAmGXxpxVmw6p9KGvxerU= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.83.0 h1:Ul48EqO0xeSn57H8jYK2GNQzJfF7W9l1nQemlfUGYEs= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.83.0/go.mod h1:ZIMmKTtWzTrpj4z1+yBu7tbzSHi4PbtyLNAWq+AwmlU= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbytraceprocessor v0.83.0 h1:g2YnCEJ4SUQG0m7yqDpSPOYsfPe74M1ivy2JYqWUCWY= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbytraceprocessor v0.83.0/go.mod h1:vtc7IGVX6m4tYkNs0KLdEA7HpuRgxRhHqJiFBLyWdyc= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.83.0 h1:hv92fwXj9f834zUGLoyxtF07zpEM34yDL76OAwVTdqY= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.83.0/go.mod h1:3wxQn8Bp5c19UgSvGx+32/xVLsOZMjNaTU3g3n6t6Cc= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricsgenerationprocessor v0.83.0 h1:88XaF5XSbe7b9J77yvr/joO+pT7ZVpmm5GoHjI5/Wes= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricsgenerationprocessor v0.83.0/go.mod h1:14tnFp/J3PFriWAuvi3Jf5hOkc/p4zhfZORbNDzoHH0= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor v0.83.0 h1:rfTAZuRV54Qb/Aah8sdnpF5Z9qmtotyR1L8Z79R/QYU= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor v0.83.0/go.mod h1:C3P6u4x0CwtLgTVdeAvSR9m1CPjHeJRjklfaTvQ7JRU= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.83.0 h1:3S7vb8bttl9eC3qQRi5Rm4q01taM+ue8loomADB0qSA= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.83.0/go.mod h1:Y9TezTRfqJ3cOTsjWxe7rgDKZU71uU28mJ10GKYBK5g= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.83.0 h1:TNoRtrLHLk5Xr8JZwxiHxnpy0+UYnDHl7SPtZIoTC0E= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.83.0/go.mod h1:KDZS6tI2Q8aZ7TCJzZuqEbrwDJPBAaSQIbeXsO6Nvvs= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.83.0 h1:Jpa2cCa1CxWgc6kUP6fTfYvqRZzpNcMDFyzjzoXzWPA= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.83.0/go.mod h1:a7EPC0J3PIU+x3BZSDKxhB7cxeJjf6mgy6nmdhGGQ8M= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanprocessor v0.83.0 h1:mchNw/c+HM4jW22fFhHRjYX7pyk4t7eiG/QFhoUv020= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanprocessor v0.83.0/go.mod h1:yJJ/GpM8NeloEgznZyGTH8neThCL4KzQd8gsI+eJDxM= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.83.0 h1:4D72ZwITvSbNchy/LAISnFlCjGDZYblPmaqn3LKFKiE= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.83.0/go.mod h1:HP+yAae+1agolvpTo2GRT3BXgPDt1NaYfm5PWtPrI7I= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.83.0 h1:pJTvexBqzytA/I5Djq8MXsKlqyT+wZ3ZmZLqkBX0GTI= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.83.0/go.mod h1:Z5rds2ALV45/OFitkS3ayHhe1u2Lxo3+nRQ9mFwMaxA= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsecscontainermetricsreceiver v0.83.0 h1:wANd57Lpv/ueBpmqVzusAMWyXy+y5ko09QwZxGZw5+Y= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsecscontainermetricsreceiver v0.83.0/go.mod h1:RT4UNOaHyOXGMEq1Azp6Bnm5qUHpx6BT0Ih8o0n9GP8= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.83.0 h1:NKNHzx3qvSl5G5drW90XeiDLNZL0Qmgf3j5EdPv36UE= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.83.0/go.mod h1:yDRLZz1bwaTXmyTo5b317TN7a5oHkHMuio19nyJdK90= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/dockerstatsreceiver v0.83.0 h1:3mtM33u9B2CqZmG5ryP349HvaDYQs8useSJIA441Vgs= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver v0.83.0 h1:TNBKlerQbOS5cTMNxn/pfxxE/PpYLajPaJYQ8wiHrIw= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver v0.83.0 h1:6y1yhypoixL19d4PpmNlPbDt9zhs3btg54RQiC6D7lg= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.83.0 h1:wu0ZdOTDqjl0Ug6J7rAO43GIe7nW/hvPXZ/Kcs7Hmis= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.83.0/go.mod h1:qhWK3ADYGvYaS54XbaH6yK8/iPGQLtimdtIcXevTLkw= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.83.0 h1:rVuP6uUEXa/z8+D6N/Z9ni4czrFEjs7hsWe+DozKpqE= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.83.0/go.mod h1:7sh5S7LJEtuknmYyRzQoWlqbYerr5OvViF/AdkGUsOQ= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.83.0 h1:WaisWqr64HIHo0Gj+FJO2YzjRAwXpq29HRaMbstnXeg= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.83.0/go.mod h1:3Q0ttKtPcn9wY7VvT3P0E6Y6FMDA+yOu64xbr4bqHrM= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver v0.83.0 h1:7MpD62lDNplm6ONFQTQB+Gkb+slo2MzCkpxurldcJAk= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver v0.83.0/go.mod h1:MHvgdYigNKOZmiO6N7Fqa1ZerEyIlLL7cCydn3n9R+c= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.83.0 h1:IJdOOFBDG2Z16utgTc9DnLBc3Q0eiRUgudBjlh85x7o= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.83.0/go.mod h1:1z7uIIoZgcqjYfDefFnjfO6frg3t2UorVaxB1DzcHlA= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.1.0-rc2 h1:2zx/Stx4Wc5pIPDvIxHXvXtQFW/7XWJGmnM7r3wg034= -github.com/opencontainers/image-spec v1.1.0-rc2/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ= +github.com/opencontainers/image-spec v1.1.0-rc4 h1:oOxKUJWnFC4YGHCCMNql1x4YaDfYBTS5Y4x/Cgeo1E0= +github.com/opencontainers/image-spec v1.1.0-rc4/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= github.com/opencontainers/runc v1.1.4/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= github.com/opencontainers/runc v1.1.8 h1:zICRlc+C1XzivLc3nzE+cbJV4LIi8tib6YG0MqC6OqA= github.com/opencontainers/runc v1.1.8/go.mod h1:CbUumNnWCuTGFukNXahoo/RFBZvDAgRh/smNYNOhA50= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.3-0.20220909204839-494a5a6aca78 h1:R5M2qXZiK/mWPMT4VldCOiSL9HIAMuxQZWdG0CSM5+4= github.com/opencontainers/runtime-spec v1.0.3-0.20220909204839-494a5a6aca78/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.1.0-rc.3 h1:l04uafi6kxByhbxev7OWiuUv0LZxEsYUfDWZ6bztAuU= +github.com/opencontainers/runtime-spec v1.1.0-rc.3/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/selinux v1.10.0 h1:rAiKF8hTcgLI3w0DHm6i0ylVVcOrlgR1kK99DRLDhyU= github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= github.com/openshift/api v0.0.0-20180801171038-322a19404e37 h1:05irGU4HK4IauGGDbsk+ZHrm1wOzMLYjMlfaiqMrBYc= @@ -1467,14 +1463,14 @@ github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJ github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.5/go.mod h1:KpXfKdgRDnnhsxw4pNIH9Md5lyFqKUa4YDFlwRYAMyE= -github.com/openzipkin/zipkin-go v0.4.1 h1:kNd/ST2yLLWhaWrkgchya40TJabe8Hioj9udfPcEO5A= -github.com/openzipkin/zipkin-go v0.4.1/go.mod h1:qY0VqDSN1pOBN94dBc6w2GJlWLiovAyg7Qt6/I9HecM= +github.com/openzipkin/zipkin-go v0.4.2 h1:zjqfqHjUpPmB3c1GlCvvgsM1G4LkvqQbBDueDOCg/jA= +github.com/openzipkin/zipkin-go v0.4.2/go.mod h1:ZeVkFjuuBiSy13y8vpSDCjMi9GoI3hPpCJSBx/EYFhY= github.com/ory/go-acc v0.2.6/go.mod h1:4Kb/UnPcT8qRAk3IAxta+hvVapdxTLWtrr7bFLlEgpw= github.com/ory/viper v1.7.5/go.mod h1:ypOuyJmEUb3oENywQZRgeAMwqgOyDqwboO1tj3DjTaM= github.com/outcaste-io/ristretto v0.2.1 h1:KCItuNIGJZcursqHr3ghO7fc5ddZLEHspL9UR0cQM64= github.com/outcaste-io/ristretto v0.2.1/go.mod h1:W8HywhmtlopSB1jeMg3JtdIhf+DYkLAr0VN/s4+MHac= -github.com/ovh/go-ovh v1.3.0 h1:mvZaddk4E4kLcXhzb+cxBsMPYp2pHqiQpWYkInsuZPQ= -github.com/ovh/go-ovh v1.3.0/go.mod h1:AxitLZ5HBRPyUd+Zl60Ajaag+rNTdVXWIkzfrVuTXWA= +github.com/ovh/go-ovh v1.4.1 h1:VBGa5wMyQtTP7Zb+w97zRCh9sLtM/2YKRyy+MEJmWaM= +github.com/ovh/go-ovh v1.4.1/go.mod h1:6bL6pPyUT7tBfI0pqOegJgRjgjuO+mOo+MyXd1EEC0M= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= @@ -1582,8 +1578,8 @@ github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1 github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= -github.com/prometheus/prometheus v0.43.1 h1:Z/Z0S0CoPUVtUnHGokFksWMssSw2Y1Ir9NnWS1pPWU0= -github.com/prometheus/prometheus v0.43.1/go.mod h1:2BA14LgBeqlPuzObSEbh+Y+JwLH2GcqDlJKbF2sA6FM= +github.com/prometheus/prometheus v0.44.0 h1:sgn8Fdx+uE5tHQn0/622swlk2XnIj6udoZCnbVjHIgc= +github.com/prometheus/prometheus v0.44.0/go.mod h1:aPsmIK3py5XammeTguyqTmuqzX/jeCdyOWWobLHNKQg= github.com/prometheus/statsd_exporter v0.21.0/go.mod h1:rbT83sZq2V+p73lHhPZfMc3MLCHmSHelCh9hSGYNLTQ= github.com/prometheus/statsd_exporter v0.22.7 h1:7Pji/i2GuhK6Lu7DHrtTkFmNBCudCPT1pX2CziuyQR0= github.com/prometheus/statsd_exporter v0.22.7/go.mod h1:N/TevpjkIh9ccs6nuzY3jQn9dFqnUakOjnEuMPJJJnI= @@ -1626,16 +1622,15 @@ github.com/sagikazarmark/crypt v0.6.0/go.mod h1:U8+INwJo3nBv1m6A/8OBXAq7Jnpspk5A github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/sanity-io/litter v1.2.0/go.mod h1:JF6pZUFgu2Q0sBZ+HSV35P8TVPI1TTzEwyu9FXAw2W4= github.com/sanposhiho/wastedassign v0.1.3/go.mod h1:LGpq5Hsv74QaqM47WtIsRSF/ik9kqk07kchgv66tLVE= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.14 h1:yFl3jyaSVLNYXlnNYM5z2pagEk1dYQhfr1p20T1NyKY= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.14/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.15 h1:Y7xOFbD+3jaPw+VN7lkakNJ/pa+ZSQVFp1ONtJaBxns= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.15/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= github.com/schollz/progressbar/v2 v2.13.2/go.mod h1:6YZjqdthH6SCZKv2rqGryrxPtfmRB/DWZxSMfCXPyD8= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646 h1:RpforrEYXWkmGwJHIGnLZ3tTWStkjVVstwzNGqxX2Ds= github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= -github.com/secure-systems-lab/go-securesystemslib v0.3.1/go.mod h1:o8hhjkbNl2gOamKUA/eNW3xUrntHT9L4W89W1nfj43U= -github.com/secure-systems-lab/go-securesystemslib v0.5.0 h1:oTiNu0QnulMQgN/hLK124wJD/r2f9ZhIUuKIeBsCBT8= -github.com/secure-systems-lab/go-securesystemslib v0.5.0/go.mod h1:uoCqUC0Ap7jrBSEanxT+SdACYJTVplRXWLkGMuDjXqk= +github.com/secure-systems-lab/go-securesystemslib v0.7.0 h1:OwvJ5jQf9LnIAS83waAjPbcMsODrTQUpJ02eNLUoxBg= +github.com/secure-systems-lab/go-securesystemslib v0.7.0/go.mod h1:/2gYnlnHVQ6xeGtfIqFy7Do03K4cdCY0A/GlJLDKLHI= github.com/securego/gosec v0.0.0-20200203094520-d13bb6d2420c/go.mod h1:gp0gaHj0WlmPh9BdsTmo1aq6C27yIPWdxCKGFGdVKBE= github.com/securego/gosec/v2 v2.6.1/go.mod h1:I76p3NTHBXsGhybUW+cEQ692q2Vp+A0Z6ZLzDIZy+Ao= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= @@ -1645,8 +1640,8 @@ github.com/shirou/gopsutil/v3 v3.22.2/go.mod h1:WapW1AOOPlHyXr+yOyw3uYx36enocrtS github.com/shirou/gopsutil/v3 v3.22.4/go.mod h1:D01hZJ4pVHPpCTZ3m3T2+wDF2YAGfd+H4ifUguaQzHM= github.com/shirou/gopsutil/v3 v3.22.5/go.mod h1:so9G9VzeHt/hsd0YwqprnjHnfARAUktauykSbr+y2gA= github.com/shirou/gopsutil/v3 v3.22.6/go.mod h1:EdIubSnZhbAvBS1yJ7Xi+AShB/hxwLHOMz4MCYz7yMs= -github.com/shirou/gopsutil/v3 v3.23.6 h1:5y46WPI9QBKBbK7EEccUPNXpJpNrvPuTD0O2zHEHT08= -github.com/shirou/gopsutil/v3 v3.23.6/go.mod h1:j7QX50DrXYggrpN30W0Mo+I4/8U2UUIQrnrhqUeWrAU= +github.com/shirou/gopsutil/v3 v3.23.7 h1:C+fHO8hfIppoJ1WdsVm1RoI0RwXoNdfTK7yWXV0wVj4= +github.com/shirou/gopsutil/v3 v3.23.7/go.mod h1:c4gnmoRC0hQuaLqvxnx1//VXQ0Ms/X9UnJF8pddY5z4= github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= @@ -1749,6 +1744,7 @@ github.com/stretchr/testify v1.7.4/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stvp/go-udp-testing v0.0.0-20201019212854-469649b16807/go.mod h1:7jxmlfBCDBXRzr0eAQJ48XC1hBu1np4CS5+cHEYfwpc= @@ -1757,7 +1753,6 @@ github.com/subosito/gotenv v1.3.0/go.mod h1:YzJjq/33h7nrwdY+iHMhEOEEbW0ovIz0tB6t github.com/subosito/gotenv v1.4.1 h1:jyEFiXpy21Wm81FBN71l9VoMMV8H8jG+qIK3GCpY6Qs= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM= github.com/tetafro/godot v1.4.4/go.mod h1:FVDd4JuKliW3UgjswZfJfHq4vAx0bD/Jd5brJjGeaz4= github.com/tidwall/gjson v1.10.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= @@ -1888,50 +1883,50 @@ go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/collector v0.52.0/go.mod h1:a9GvaOhyc0nVOUzqvdv5mxyWghCSso/WRO2GgRl4I1g= go.opentelemetry.io/collector v0.54.0/go.mod h1:FgNzyfb4sAGb5cqusB5znETJ8Pz4OQUBGbOeGIZ2rlQ= go.opentelemetry.io/collector v0.57.2/go.mod h1:9TwWyMRhbFNzaaGLtm/6poWNDJw+etvQMS6Fy+8/8Xs= -go.opentelemetry.io/collector v0.82.0 h1:MaKqWT0R4GCdkZDhYWOQkLfoJj9V7GsMbk1gsAuogaw= -go.opentelemetry.io/collector v0.82.0/go.mod h1:PMmDJkZzC1xpcViHlwMMEVeAnRRl3HYy3nXgD8KJwG0= -go.opentelemetry.io/collector/component v0.82.0 h1:ID9nOGKBf5G0avhuYQlTzmwAyIMvh9B+tlckLE/4qw4= -go.opentelemetry.io/collector/component v0.82.0/go.mod h1:jSdGG4L1Ger6ob6lWpr8jmKC2qqC+XZ/gOgu7GUA5xs= -go.opentelemetry.io/collector/config/configauth v0.82.0 h1:H5xrWyPMotSqajiiH/bay8bpVsT4aq6Vih4OuArXv4Q= -go.opentelemetry.io/collector/config/configauth v0.82.0/go.mod h1:P0ukmBIUk+HP0O7yfUOKRmPmffneAQgmEL9/iTOo1CU= -go.opentelemetry.io/collector/config/configcompression v0.82.0 h1:M6a7eiHdBUB8mIioDhWugJfNm7Sw85cvv/OXyTDhtY0= -go.opentelemetry.io/collector/config/configcompression v0.82.0/go.mod h1:xhHm1sEH7BTECAJo1xn64NMxeIvZGKdVGdSKUUc+YuM= -go.opentelemetry.io/collector/config/configgrpc v0.82.0 h1:taZWDbtVBm0OOcgnfpVA1X43pmU2oNhj39B2uV3COQk= -go.opentelemetry.io/collector/config/configgrpc v0.82.0/go.mod h1:NHXHRI40Q7TT/d38DKT30B7DOrVUkj7anEFOD59R9o8= -go.opentelemetry.io/collector/config/confighttp v0.82.0 h1:2LhyqVTd+Bsr8SgsCq6+q731F81uddK9GwvGhwD/Co0= -go.opentelemetry.io/collector/config/confighttp v0.82.0/go.mod h1:OHGx/aJqGJ9z2jaBXvaylwkAuiUwikg1/n+RRDpsfOo= -go.opentelemetry.io/collector/config/confignet v0.82.0 h1:zN9JaFTn7Dth3u5ot6KZJcBZACTEzGqFWYyO5qAlYfo= -go.opentelemetry.io/collector/config/confignet v0.82.0/go.mod h1:unOg7BZvpt6T5xsf+LyeOQvUhD8ld/2AbfOsmUZ/bPM= -go.opentelemetry.io/collector/config/configopaque v0.82.0 h1:0Ma63QTr4AkODzEABZHtgiU5Dig8SItpHOuB28UnVSw= -go.opentelemetry.io/collector/config/configopaque v0.82.0/go.mod h1:pM1oy6gasukw3H6jAvc9Q9OtFaaY2IbfeuwCPAjOgXc= -go.opentelemetry.io/collector/config/configtelemetry v0.82.0 h1:Zln2K4S5gBDcOpBNIzM0cZS5P6cohEYstHngVvIbGBY= -go.opentelemetry.io/collector/config/configtelemetry v0.82.0/go.mod h1:KEYQRiYJdx38iZkvcLKBZWH9fK4NeafxBwGRrRKMgyA= -go.opentelemetry.io/collector/config/configtls v0.82.0 h1:eE/8muTszLlviOGLy5N08BaXLCcYqDW3mKIoKyDDa8o= -go.opentelemetry.io/collector/config/configtls v0.82.0/go.mod h1:unBTmL1bdpkp9mYEDz7N+Ln4yEwh7Ug74I1HgZMplCk= -go.opentelemetry.io/collector/config/internal v0.82.0 h1:JnnDARkXrC3OJDsMfQkBgfI0Np4s+18zvoDqZ4OH0+I= -go.opentelemetry.io/collector/config/internal v0.82.0/go.mod h1:RKcLV1gQxhgwx+6rlPYsvGMq1RZNne3UeOUZkHxJnIg= -go.opentelemetry.io/collector/confmap v0.82.0 h1:s1Rd8jz21DGlLJfED0Py9VaEq2qPWmWwWy5MriDCX+4= -go.opentelemetry.io/collector/confmap v0.82.0/go.mod h1:IS/PoUYHETtxV6+fJammTkCxxa4LEwK2u4Cx/bVCH/s= -go.opentelemetry.io/collector/connector v0.82.0 h1:sCzfcROg0IbmmwoAeLzVfcAs1ZpwlA+UzLzc3xRjOr4= -go.opentelemetry.io/collector/connector v0.82.0/go.mod h1:yXr1degja36+aAdY3qOv66jCXHs5QjiIeoerygLYC44= -go.opentelemetry.io/collector/consumer v0.82.0 h1:vZecylW6bpaphetSTjCLgwXLxSYQ6oe/kzwkx4iF5oE= -go.opentelemetry.io/collector/consumer v0.82.0/go.mod h1:qrhd0i0Gp0RkihcEXb+7Rb584Kal2NmGH1eA4Zg6puA= -go.opentelemetry.io/collector/exporter v0.82.0 h1:BWsx4rWfVwlV+qNuevSMm+2Cv6uGZYYZ9CEFqq0q+F4= -go.opentelemetry.io/collector/exporter v0.82.0/go.mod h1:e3VPpLYVNRaF+G2HuKw6A5hTBMYZ4tgRYYzMusfwFJE= -go.opentelemetry.io/collector/exporter/loggingexporter v0.82.0 h1:HlgFz6qqpjqk9ZmGbaLSdUJxOo6Q3jo3PiJHcuugpaA= -go.opentelemetry.io/collector/exporter/loggingexporter v0.82.0/go.mod h1:jMMN2fKXx+RKDI3tpqIym5HK6uZnJ3X22hyFgK24cK4= -go.opentelemetry.io/collector/exporter/otlpexporter v0.82.0 h1:tYCEUQpfyuS/NgrWg9Ulps6f0ffPSCBRTBdK6sXnSaw= -go.opentelemetry.io/collector/exporter/otlpexporter v0.82.0/go.mod h1:CGeXJuRYxrzTtJUHlpLPHirzcmGq5qbcPff0ec+If14= -go.opentelemetry.io/collector/exporter/otlphttpexporter v0.82.0 h1:GdnfmEgOY3/GHFereYRcfr8RcDTR0vlK9a3Qtyr0jCg= -go.opentelemetry.io/collector/exporter/otlphttpexporter v0.82.0/go.mod h1:1a6is4De7GYERjFOa1K9dPbhRwsip5Zj7jt96taViY8= -go.opentelemetry.io/collector/extension v0.82.0 h1:DH4tqrTOz0HmGDJ6FT/jRD2woQf3ugqC6QqSiQdH3wg= -go.opentelemetry.io/collector/extension v0.82.0/go.mod h1:n7d0XTh7fdyorZWTc+gLpJh78FS7GjRqIjUiW1xdhe0= -go.opentelemetry.io/collector/extension/auth v0.82.0 h1:iaxwFslRj6mfzs1wVzbnj+gDU2G98IeXW4tcrq78p5s= -go.opentelemetry.io/collector/extension/auth v0.82.0/go.mod h1:O1xBcb06pKD8g3FadLDvMa1xKZwPGdHQp4CI8vW3RCM= -go.opentelemetry.io/collector/extension/ballastextension v0.82.0 h1:GiNzI6Z3iX9DQwJ/fI44o3yWDtecfgAgxs5C8kptP0Q= -go.opentelemetry.io/collector/extension/ballastextension v0.82.0/go.mod h1:s15/A21hPRjlXH7EelcHlvW2g7A8tEVfReO2T6Wz+C4= -go.opentelemetry.io/collector/extension/zpagesextension v0.82.0 h1:rZN8OxNy+YBjaDXYGnFoGRPBDruET1lxjVL8hzzgH5k= -go.opentelemetry.io/collector/extension/zpagesextension v0.82.0/go.mod h1:mUJk+sX47AdkdASvXu26cK/NXOh+5j+TtEdxJA6K+W4= +go.opentelemetry.io/collector v0.83.0 h1:rKFch1CANepajPwBTvzYj/hKz7RsMyUkPPPNjRCpJ/I= +go.opentelemetry.io/collector v0.83.0/go.mod h1:MNN79VDXXaRP2ZqcDVOfWH0Jl8BbcMttJ3SY/pU6vxo= +go.opentelemetry.io/collector/component v0.83.0 h1:7bMbOHQezVx9RhSLu9KQRBhjXmO+CbOVhBk5uySb0fY= +go.opentelemetry.io/collector/component v0.83.0/go.mod h1:Qy2mIP32UKN1x8rsjJbkgB9obAVu4hRusc1wKNFeV+o= +go.opentelemetry.io/collector/config/configauth v0.83.0 h1:caIkUszP+kTRVx9HW6z7x05CMnzlaBoP2BKyWDIr2ag= +go.opentelemetry.io/collector/config/configauth v0.83.0/go.mod h1:PqSIaQryPWiPVMuxlNPEvTpuvhdRq8ySN9nKlk3YbH4= +go.opentelemetry.io/collector/config/configcompression v0.83.0 h1:WwGfHyGey8JSUsBGUmRHaOzwllrLmsjjo5SZCYfSP14= +go.opentelemetry.io/collector/config/configcompression v0.83.0/go.mod h1:Mi1/3D+qNlRohrVMbBOj6XSHo7YKAKbgWYisNW2Qobc= +go.opentelemetry.io/collector/config/configgrpc v0.83.0 h1:bmX6M/L0+gtBSqAvPGh2cV8c4htNFfxa/9ZT8FreOHE= +go.opentelemetry.io/collector/config/configgrpc v0.83.0/go.mod h1:VpRhSIukmgVjx0HISN5r+y6EYQNGDYLU8j8hVUlcMjc= +go.opentelemetry.io/collector/config/confighttp v0.83.0 h1:yBra00XanzqXL0kLs3Aaas7RLoL50bM/Za8223vwJik= +go.opentelemetry.io/collector/config/confighttp v0.83.0/go.mod h1:Eu2WVZa8cy3F8mlxXIFPgzeAeLnaVc5UZzcEtufrOWs= +go.opentelemetry.io/collector/config/confignet v0.83.0 h1:xaQkMXvId8y7o6ke2qVRZZDqNc315CGkIcZ6LSVxDE0= +go.opentelemetry.io/collector/config/confignet v0.83.0/go.mod h1:I0iJQDhns1GgXBIumB64WHLPMmJpNdDaEDHQnmaaqsU= +go.opentelemetry.io/collector/config/configopaque v0.83.0 h1:nhYguW1zVFnQlaZWhwbXJS4/+WEPdQSEL8kTF/j/zeI= +go.opentelemetry.io/collector/config/configopaque v0.83.0/go.mod h1:Ga1x7xLQXWmwxfW1pPqjI4qT+eNxf9wu2/Mx7O2u01U= +go.opentelemetry.io/collector/config/configtelemetry v0.83.0 h1:Dx+POy68CFsec9JDYd7cxQPULLfSOAG8ma5Jl3ZZ3+Y= +go.opentelemetry.io/collector/config/configtelemetry v0.83.0/go.mod h1:8wZuTKLdcWwdB82Jd07TOHsHKuv8l47T+MUGEsPe4z4= +go.opentelemetry.io/collector/config/configtls v0.83.0 h1:qeAqwvw7qs3fY8wVZzN54E+SNMES7YdATY0ASEbJlUw= +go.opentelemetry.io/collector/config/configtls v0.83.0/go.mod h1:YMf+YSUhPB/LD5pZSyb3wRi7x6vbiMbONXOWFQnJnZ4= +go.opentelemetry.io/collector/config/internal v0.83.0 h1:yQZegCOPl4dWUVkr/fscVFU/AjANT5+Tu5XpKztTTSA= +go.opentelemetry.io/collector/config/internal v0.83.0/go.mod h1:BQs+X52s4BCIshht8qgbT4dqCM5YM2h6RQWln6zWhRA= +go.opentelemetry.io/collector/confmap v0.83.0 h1:eUaiFdhTLkFdNpMi5FLSHSQ6X2FcEHe0KfEUt9ZtVlI= +go.opentelemetry.io/collector/confmap v0.83.0/go.mod h1:ZsmLyJ+4VeO+qz5o1RKadRoY4Db+d8PYwiLCJ3Z5Et8= +go.opentelemetry.io/collector/connector v0.83.0 h1:wUxpqBTjJ9WSgZnosU26ALGYtwPVzEQxQk7w+mKSOs4= +go.opentelemetry.io/collector/connector v0.83.0/go.mod h1:o9xnA7C+2JzYQMSyWIJz/28tc3lGs3JkKLpckpAdzB8= +go.opentelemetry.io/collector/consumer v0.83.0 h1:8wg0UfFxxaGYsTkQGWuf1pE7C/dTvPkkYmBtR6N5BKc= +go.opentelemetry.io/collector/consumer v0.83.0/go.mod h1:YLbmTqvgIOYUlEeWun8wQ4RZ0HaYjsABWKw7nwU9F3c= +go.opentelemetry.io/collector/exporter v0.83.0 h1:1MPrMaCFvEvl291pAE0hTgPb7YybjSak9O5akzXqnXs= +go.opentelemetry.io/collector/exporter v0.83.0/go.mod h1:5XIrrkfRI7Ndt5FnH0CC6It0VxTHRviGv/I350EWGBs= +go.opentelemetry.io/collector/exporter/loggingexporter v0.83.0 h1:1k0zCEqUfNhWYw8X9zuQ4LNU4o5qwG6f1J3+P8lpe+E= +go.opentelemetry.io/collector/exporter/loggingexporter v0.83.0/go.mod h1:/Sa1r32rwJpBRHSzWclacQlyr6BG/uRuaXqi/CmPvb0= +go.opentelemetry.io/collector/exporter/otlpexporter v0.83.0 h1:k5bJVlXJCJGraslJtOcQPELbRE3gB5MCzzvYurp5aF4= +go.opentelemetry.io/collector/exporter/otlpexporter v0.83.0/go.mod h1:MIGlrd6rhbfsRUgFqGfu7xWfBlG72ZFNGUj2ZR53LGE= +go.opentelemetry.io/collector/exporter/otlphttpexporter v0.83.0 h1:5JeQ6JKiZiRlrcjw4LkzpTkdb3wOflvzYj1kbmr1h+I= +go.opentelemetry.io/collector/exporter/otlphttpexporter v0.83.0/go.mod h1:twNJ2isyvMaDZ7K3OeBtwOHW95uYQ5ylpgMbgyJqhks= +go.opentelemetry.io/collector/extension v0.83.0 h1:O47qpJTeav6jATvnIUvUrO5KBMqa6ySMA5i+7XXW7GY= +go.opentelemetry.io/collector/extension v0.83.0/go.mod h1:gPfwNimQiscUpaUGC/pUniTn4b5O+8IxHVKHDUkGqSI= +go.opentelemetry.io/collector/extension/auth v0.83.0 h1:H0orp7a7/NZae4/ymnC5JpuvO6GNcGLNz+nEDAw9ciU= +go.opentelemetry.io/collector/extension/auth v0.83.0/go.mod h1:Br0OyLU0p+2xS0UvQRvgWmH0Kv/4kPkNVr9AMzee5GM= +go.opentelemetry.io/collector/extension/ballastextension v0.83.0 h1:t0ITNPF7JAXa3+PA4INN6sORIYYgleP84ufPV+yceyU= +go.opentelemetry.io/collector/extension/ballastextension v0.83.0/go.mod h1:ZcsZT3S2EcM8DXz1R5tSVNL9AZmoxpbB65itsrWblhU= +go.opentelemetry.io/collector/extension/zpagesextension v0.83.0 h1:a2Avt+yjaJbHPbiST3I/4GgfxrB3iEpTtgIEGermItw= +go.opentelemetry.io/collector/extension/zpagesextension v0.83.0/go.mod h1:oJBN3N0uusy36qMo7FeWCUv2F9S6JCYw1omvtMtUQ4o= go.opentelemetry.io/collector/featuregate v1.0.0-rcv0014 h1:C9o0mbP0MyygqFnKueVQK/v9jef6zvuttmTGlKaqhgw= go.opentelemetry.io/collector/featuregate v1.0.0-rcv0014/go.mod h1:0mE3mDLmUrOXVoNsuvj+7dV14h/9HFl/Fy9YTLoLObo= go.opentelemetry.io/collector/model v0.49.0/go.mod h1:nOYQv9KoFPs6ihJwOi24qB209EOhS9HkwhGj54YiEAw= @@ -1942,22 +1937,22 @@ go.opentelemetry.io/collector/pdata v0.56.0/go.mod h1:mYcCREWiIJyHss0dbU+GSiz2tm go.opentelemetry.io/collector/pdata v0.57.2/go.mod h1:RU9I8lwBUxucwOsSYzHEcHi15M9QaX78hgQ2PRdSxV0= go.opentelemetry.io/collector/pdata v1.0.0-rcv0014 h1:iT5qH0NLmkGeIdDtnBogYDx7L58t6CaWGL378DEo2QY= go.opentelemetry.io/collector/pdata v1.0.0-rcv0014/go.mod h1:BRvDrx43kiSoUx3mr7SoA7h9B8+OY99mUK+CZSQFWW4= -go.opentelemetry.io/collector/processor v0.82.0 h1:DoqVrrnGYThu/h1sOr6E0hR1Fj5nQT4VT0ptFZcltRk= -go.opentelemetry.io/collector/processor v0.82.0/go.mod h1:B0MtfLWCYNBJ+PXf9k77M2Yn08MKItNB2vuvwhqrtt0= -go.opentelemetry.io/collector/processor/batchprocessor v0.82.0 h1:cUS+9wkzgp5+kgYB7ppSW1HRT+L5fzo3Wmjcm0W6Fho= -go.opentelemetry.io/collector/processor/batchprocessor v0.82.0/go.mod h1:q/+ywtFMrB3yTSSfxw/rpEq07CcgpQeQoROJdi9JOm8= -go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.82.0 h1:ACdNV8fO2LM1yw1gBIXN5ybydxZHqAHomkEf1WljPyc= -go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.82.0/go.mod h1:LbeXquV0D0yi+qIohuxSAvp4LBaJbIer9ZCP9+bGBtU= -go.opentelemetry.io/collector/receiver v0.82.0 h1:bc6jc8jmSgc0/C9zqTqqWOGJFVx0AJ53jiToSmQs2SE= -go.opentelemetry.io/collector/receiver v0.82.0/go.mod h1:Uh6BgcTmmrA1Bm/GpKGRY6WwQyPio4yEDsYkUo0A5Gk= -go.opentelemetry.io/collector/receiver/otlpreceiver v0.82.0 h1:LzcmQ9d7NauTVEWfPNwRwqNd/NBQDi+JU0OHWearcEA= -go.opentelemetry.io/collector/receiver/otlpreceiver v0.82.0/go.mod h1:Qt9Ha/yWaU6ni0XwFslNCBX5zZBQHcnxma/sU1s7LH4= +go.opentelemetry.io/collector/processor v0.83.0 h1:oWMpPzHLkzlPXRIa27UsfsaDSbXaF/0qeiCn3BaesGo= +go.opentelemetry.io/collector/processor v0.83.0/go.mod h1:sLxTTqkIhmNtekO0HebXgVclPpm/xoQ4+g8CbzgYBCM= +go.opentelemetry.io/collector/processor/batchprocessor v0.83.0 h1:Zj4VKcO+NPXEONd0pr6y94nbJdJr/I2VLNxCYcfH0Go= +go.opentelemetry.io/collector/processor/batchprocessor v0.83.0/go.mod h1:ZA8h5ZJYFzcRqp33+I/M81RZjnnLWrtQ9Q/I5lVBlLs= +go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.83.0 h1:OZPN7wOunbPnMeoCDPkoseUamtuG8CjiY+hhmD+yU7w= +go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.83.0/go.mod h1:8DM+L0qvJudvIxSwd7wiVMZpiipYJgX5GnS9Zq7hZzQ= +go.opentelemetry.io/collector/receiver v0.83.0 h1:T2LI6BGNGMGBN8DLWUy7KyFXVaQR8ah+7ssCwb8OqNs= +go.opentelemetry.io/collector/receiver v0.83.0/go.mod h1:yEo8Mv57a53Psd2BvUbP/he5ZtdrwHezeLUCTUtf6PA= +go.opentelemetry.io/collector/receiver/otlpreceiver v0.83.0 h1:A0xNr1N/d5jkO+42G9CQ7C69UZhcTsnAibo1FzRA/PA= +go.opentelemetry.io/collector/receiver/otlpreceiver v0.83.0/go.mod h1:h+WL9WneDgX4uHVBdt/yVINSB/NUzVTOqukoBF1F7uc= go.opentelemetry.io/collector/semconv v0.52.0/go.mod h1:SxK0rUnUP7YeDakexzbE/vhimTOHwE6m/4aKKd9e27Q= go.opentelemetry.io/collector/semconv v0.54.0/go.mod h1:HAGkPKNMhc4kEHevEqVIEtUuvsRQMIbUWBb8yBrqEwk= go.opentelemetry.io/collector/semconv v0.56.0/go.mod h1:EH1wbDvTyqKpKBBpoMIe0KQk2plCcFS66Mo17WtR7CQ= go.opentelemetry.io/collector/semconv v0.57.2/go.mod h1:84YnUjmm+nhGu4YTDLnHCbxnL74ooWpismPG79tFD7w= -go.opentelemetry.io/collector/semconv v0.82.0 h1:WUeT2a+uZjI6kLvwcBaJnGvo7KSQ/9dIFRcxOQdXucc= -go.opentelemetry.io/collector/semconv v0.82.0/go.mod h1:TlYPtzvsXyHOgr5eATi43qEMqwSmIziivJB2uctKswo= +go.opentelemetry.io/collector/semconv v0.83.0 h1:zfBJaGiC7XI8dLD/8QIyKre98RHcq3DaG1g1B+U/Dow= +go.opentelemetry.io/collector/semconv v0.83.0/go.mod h1:j/8THcqVxFna1FpvA2zYIsUperEtOaRaqoLYIN4doWw= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.32.0/go.mod h1:J0dBVrt7dPS/lKJyQoW0xzQiUr4r2Ik1VwPjAUWnofI= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.33.0/go.mod h1:y/SlJpJQPd2UzfBCj0E9Flk9FDCtTyqUmaCB41qFrWI= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.1-0.20230612162650-64be7e574a17 h1:mdcNStUIXngF/mH3xxAo4nbR4g65IXqLL1SvYMjz7JQ= @@ -2077,13 +2072,13 @@ golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20210915214749-c084706c2272/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk= golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -2177,7 +2172,6 @@ golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= @@ -2209,9 +2203,9 @@ golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2/go.mod h1:CfG3xpIq0wQ8r1q4Su golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220725212005-46097bf591d3/go.mod h1:AaygXjzTFtRAg2ttMY5RMuhpJ3cNnI0XpyFJD1iQRSM= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14= golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -2234,8 +2228,8 @@ golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= -golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= +golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= +golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -2319,7 +2313,6 @@ golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201024232916-9f70ab9862d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2381,7 +2374,8 @@ golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= @@ -2389,8 +2383,8 @@ golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9sn golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0= golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2406,6 +2400,7 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -2526,7 +2521,6 @@ golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= golang.org/x/tools v0.12.0 h1:YW6HUoUmYBpwSgyaGaZq1fHjrBjX1rlpZ54T6mu2kss= golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= @@ -2582,8 +2576,8 @@ google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRR google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= google.golang.org/api v0.81.0/go.mod h1:FA6Mb/bZxj706H2j+j2d6mHEEaHBmbbWnkfvmorOCko= -google.golang.org/api v0.134.0 h1:ktL4Goua+UBgoP1eL1/60LwZJqa1sIzkLmvoR3hR6Gw= -google.golang.org/api v0.134.0/go.mod h1:sjRL3UnjTx5UqNQS9EWr9N8p7xbHpy1k0XGRLCf3Spk= +google.golang.org/api v0.136.0 h1:e/6enzUE1s4tGPa6Q3ZYShKTtvRc+1Jq0rrafhppmOs= +google.golang.org/api v0.136.0/go.mod h1:XtJfF+V2zgUxelOn5Zs3kECtluMxneJG8ZxUTlLNTPA= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -2679,12 +2673,12 @@ google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130 h1:Au6te5hbKUV8pIYWHqOUZ1pva5qK/rwbIhoXEUB9Lu8= -google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:O9kGHb51iE/nOGvQaDUuadVYqovW56s5emA88lQnj6Y= -google.golang.org/genproto/googleapis/api v0.0.0-20230706204954-ccb25ca9f130 h1:XVeBY8d/FaK4848myy41HBqnDwvxeV3zMZhwN1TvAMU= -google.golang.org/genproto/googleapis/api v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:mPBs5jNgx2GuQGvFwUvVKqtn6HsUw9nP64BedgvqEsQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230720185612-659f7aaaa771 h1:Z8qdAF9GFsmcUuWQ5KVYIpP3PCKydn/YKORnghIalu4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230720185612-659f7aaaa771/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= +google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 h1:L6iMMGrtzgHsWofoFcihmDEMYeDR9KN/ThbPWGrh++g= +google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8= +google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5 h1:nIgk/EEq3/YlnmVVXVnm14rC2oxgs1o0ong4sD/rd44= +google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5/go.mod h1:5DZzOUPCLYL3mNkQ0ms0F3EuUNZ7py1Bqeq6sxzI7/Q= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230807174057-1744710a1577 h1:wukfNtZmZUurLN/atp2hiIeTKn7QJWIQdHzqmsOnAOk= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230807174057-1744710a1577/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= @@ -2846,8 +2840,8 @@ rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8 rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/controller-runtime v0.15.0 h1:ML+5Adt3qZnMSYxZ7gAverBLNPSMQEibtzAgp0UPojU= -sigs.k8s.io/controller-runtime v0.15.0/go.mod h1:7ngYvp1MLT+9GeZ+6lH3LOlcHkp/+tzA/fmHa4iq9kk= +sigs.k8s.io/controller-runtime v0.15.1 h1:9UvgKD4ZJGcj24vefUFgZFP3xej/3igL9BsOUTb/+4c= +sigs.k8s.io/controller-runtime v0.15.1/go.mod h1:7ngYvp1MLT+9GeZ+6lH3LOlcHkp/+tzA/fmHa4iq9kk= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= diff --git a/testbed/go.mod b/testbed/go.mod index aa3f544dd..fe318bf04 100644 --- a/testbed/go.mod +++ b/testbed/go.mod @@ -4,54 +4,55 @@ go 1.20 require ( github.com/aws-observability/aws-otel-collector v0.31.0 - github.com/open-telemetry/opentelemetry-collector-contrib/testbed v0.82.0 + github.com/open-telemetry/opentelemetry-collector-contrib/testbed v0.83.0 github.com/stretchr/testify v1.8.4 go.opentelemetry.io/collector/pdata v1.0.0-rcv0014 ) require ( - cloud.google.com/go/compute v1.20.1 // indirect + cloud.google.com/go/compute v1.23.0 // indirect cloud.google.com/go/compute/metadata v0.2.4-0.20230617002413-005d2dfb6b68 // indirect contrib.go.opencensus.io/exporter/prometheus v0.4.2 // indirect github.com/Azure/azure-sdk-for-go v65.0.0+incompatible // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect github.com/Azure/go-autorest/autorest v0.11.28 // indirect - github.com/Azure/go-autorest/autorest/adal v0.9.22 // indirect + github.com/Azure/go-autorest/autorest/adal v0.9.23 // indirect github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect github.com/DataDog/agent-payload/v5 v5.0.89 // indirect - github.com/DataDog/datadog-agent/pkg/obfuscate v0.47.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.47.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/obfuscate v0.48.0-beta.1 // indirect + github.com/DataDog/datadog-agent/pkg/proto v0.48.0-beta.1 // indirect + github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.48.0-beta.1 // indirect github.com/DataDog/datadog-agent/pkg/trace v0.48.0-devel // indirect github.com/DataDog/datadog-agent/pkg/trace/exportable v0.0.0-20201016145401-4646cf596b02 // indirect - github.com/DataDog/datadog-agent/pkg/util/cgroups v0.47.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/log v0.47.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.47.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.47.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/cgroups v0.48.0-beta.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.48.0-beta.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.48.0-beta.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.48.0-beta.1 // indirect github.com/DataDog/datadog-api-client-go/v2 v2.14.0 // indirect github.com/DataDog/datadog-go/v5 v5.1.1 // indirect - github.com/DataDog/go-tuf v0.3.0--fix-localmeta-fork // indirect + github.com/DataDog/go-tuf v1.0.1-0.5.2 // indirect github.com/DataDog/gohai v0.0.0-20220718130825-1776f9beb9cc // indirect - github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.5.2 // indirect - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.5.2 // indirect - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.5.2 // indirect - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.5.2 // indirect - github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.5.2 // indirect + github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.7.0 // indirect + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.7.0 // indirect + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.7.0 // indirect + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.7.0 // indirect + github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.7.0 // indirect github.com/DataDog/sketches-go v1.4.2 // indirect github.com/DataDog/zstd v1.5.2 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.18.0 // indirect + github.com/IBM/sarama v1.40.1 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect - github.com/Shopify/sarama v1.38.1 // indirect github.com/Showmax/go-fqdn v1.0.0 // indirect github.com/alecthomas/participle/v2 v2.0.0 // indirect github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect - github.com/antonmedv/expr v1.12.5 // indirect + github.com/antonmedv/expr v1.13.0 // indirect github.com/apache/thrift v0.18.1 // indirect github.com/armon/go-metrics v0.4.1 // indirect - github.com/aws/aws-sdk-go v1.44.320 // indirect + github.com/aws/aws-sdk-go v1.44.323 // indirect github.com/aws/aws-sdk-go-v2 v1.20.1 // indirect github.com/aws/aws-sdk-go-v2/config v1.18.33 // indirect github.com/aws/aws-sdk-go-v2/credentials v1.13.32 // indirect @@ -81,7 +82,7 @@ require ( github.com/cyphar/filepath-securejoin v0.2.3 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dennwc/varint v1.0.0 // indirect - github.com/digitalocean/godo v1.97.0 // indirect + github.com/digitalocean/godo v1.98.0 // indirect github.com/docker/distribution v2.8.2+incompatible // indirect github.com/docker/docker v24.0.5+incompatible // indirect github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11 // indirect @@ -128,12 +129,12 @@ require ( github.com/google/uuid v1.3.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.2.5 // indirect github.com/googleapis/gax-go/v2 v2.12.0 // indirect - github.com/gophercloud/gophercloud v1.2.0 // indirect + github.com/gophercloud/gophercloud v1.3.0 // indirect github.com/gorilla/mux v1.8.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect - github.com/hashicorp/consul/api v1.23.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.2 // indirect + github.com/hashicorp/consul/api v1.24.0 // indirect github.com/hashicorp/cronexpr v1.1.1 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect @@ -143,14 +144,14 @@ require ( github.com/hashicorp/go-retryablehttp v0.7.2 // indirect github.com/hashicorp/go-rootcerts v1.0.2 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect - github.com/hashicorp/golang-lru v0.6.0 // indirect - github.com/hashicorp/nomad/api v0.0.0-20230308192510-48e7d70fcd4b // indirect + github.com/hashicorp/golang-lru v1.0.2 // indirect + github.com/hashicorp/nomad/api v0.0.0-20230418003350-3067191c5197 // indirect github.com/hashicorp/serf v0.10.1 // indirect - github.com/hetznercloud/hcloud-go v1.41.0 // indirect + github.com/hetznercloud/hcloud-go v1.42.0 // indirect github.com/iancoleman/strcase v0.3.0 // indirect github.com/imdario/mergo v0.3.15 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/ionos-cloud/sdk-go/v6 v6.1.4 // indirect + github.com/ionos-cloud/sdk-go/v6 v6.1.6 // indirect github.com/jaegertracing/jaeger v1.41.0 // indirect github.com/jcmturner/aescts/v2 v2.0.0 // indirect github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect @@ -167,13 +168,13 @@ require ( github.com/knadh/koanf/v2 v2.0.1 // indirect github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b // indirect github.com/lightstep/go-expohisto v1.0.0 // indirect - github.com/linode/linodego v1.14.1 // indirect + github.com/linode/linodego v1.16.1 // indirect github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.17 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect - github.com/miekg/dns v1.1.51 // indirect + github.com/miekg/dns v1.1.53 // indirect github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect @@ -187,95 +188,95 @@ require ( github.com/mrunalp/fileutils v0.5.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/carbonexporter v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/dynatraceexporter v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/fileexporter v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/jaegerexporter v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/logzioexporter v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/opencensusexporter v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/extension/awsproxy v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/extension/sigv4authextension v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/awsutil v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/containerinsight v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/cwlogs v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/k8s v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/metrics v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/proxy v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/kubelet v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchperresourceattr v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experimentalmetricmetadata v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/signalfx v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatorateprocessor v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbytraceprocessor v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricsgenerationprocessor v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanprocessor v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsecscontainermetricsreceiver v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/carbonreceiver v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/datadogreceiver v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/sapmreceiver v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/signalfxreceiver v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkhecreceiver v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/testbed/mockdatareceivers/mockawsxrayreceiver v0.82.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/testbed/mockdatasenders/mockdatadogagentexporter v0.82.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/carbonexporter v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/dynatraceexporter v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/fileexporter v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/jaegerexporter v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/logzioexporter v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/opencensusexporter v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/extension/awsproxy v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/extension/sigv4authextension v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/awsutil v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/containerinsight v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/cwlogs v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/k8s v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/metrics v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/proxy v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/kubelet v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchperresourceattr v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experimentalmetricmetadata v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/signalfx v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatorateprocessor v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbytraceprocessor v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricsgenerationprocessor v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanprocessor v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsecscontainermetricsreceiver v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/carbonreceiver v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/datadogreceiver v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/sapmreceiver v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/signalfxreceiver v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkhecreceiver v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/testbed/mockdatareceivers/mockawsxrayreceiver v0.83.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/testbed/mockdatasenders/mockdatadogagentexporter v0.83.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.1.0-rc2 // indirect + github.com/opencontainers/image-spec v1.1.0-rc4 // indirect github.com/opencontainers/runc v1.1.8 // indirect - github.com/opencontainers/runtime-spec v1.0.3-0.20220909204839-494a5a6aca78 // indirect + github.com/opencontainers/runtime-spec v1.1.0-rc.3 // indirect github.com/opencontainers/selinux v1.10.0 // indirect github.com/openshift/api v3.9.0+incompatible // indirect github.com/openshift/client-go v0.0.0-20210521082421-73d9475a9142 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect - github.com/openzipkin/zipkin-go v0.4.1 // indirect + github.com/openzipkin/zipkin-go v0.4.2 // indirect github.com/outcaste-io/ristretto v0.2.1 // indirect - github.com/ovh/go-ovh v1.3.0 // indirect + github.com/ovh/go-ovh v1.4.1 // indirect github.com/patrickmn/go-cache v2.1.0+incompatible // indirect github.com/philhofer/fwd v1.1.2 // indirect github.com/pierrec/lz4/v4 v4.1.17 // indirect @@ -287,14 +288,14 @@ require ( github.com/prometheus/common v0.44.0 // indirect github.com/prometheus/common/sigv4 v0.1.0 // indirect github.com/prometheus/procfs v0.10.1 // indirect - github.com/prometheus/prometheus v0.43.1 // indirect + github.com/prometheus/prometheus v0.44.0 // indirect github.com/prometheus/statsd_exporter v0.22.7 // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect github.com/rs/cors v1.9.0 // indirect - github.com/scaleway/scaleway-sdk-go v1.0.0-beta.14 // indirect + github.com/scaleway/scaleway-sdk-go v1.0.0-beta.15 // indirect github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646 // indirect - github.com/secure-systems-lab/go-securesystemslib v0.5.0 // indirect - github.com/shirou/gopsutil/v3 v3.23.6 // indirect + github.com/secure-systems-lab/go-securesystemslib v0.7.0 // indirect + github.com/shirou/gopsutil/v3 v3.23.7 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect github.com/signalfx/com_signalfx_metrics_protobuf v0.0.3 // indirect github.com/signalfx/gohistogram v0.0.0-20160107210732-1ccfd2ff5083 // indirect @@ -325,35 +326,35 @@ require ( github.com/xdg-go/stringprep v1.0.4 // indirect github.com/yusufpapurcu/wmi v1.2.3 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/collector v0.82.0 // indirect - go.opentelemetry.io/collector/component v0.82.0 // indirect - go.opentelemetry.io/collector/config/configauth v0.82.0 // indirect - go.opentelemetry.io/collector/config/configcompression v0.82.0 // indirect - go.opentelemetry.io/collector/config/configgrpc v0.82.0 // indirect - go.opentelemetry.io/collector/config/confighttp v0.82.0 // indirect - go.opentelemetry.io/collector/config/confignet v0.82.0 // indirect - go.opentelemetry.io/collector/config/configopaque v0.82.0 // indirect - go.opentelemetry.io/collector/config/configtelemetry v0.82.0 // indirect - go.opentelemetry.io/collector/config/configtls v0.82.0 // indirect - go.opentelemetry.io/collector/config/internal v0.82.0 // indirect - go.opentelemetry.io/collector/confmap v0.82.0 // indirect - go.opentelemetry.io/collector/connector v0.82.0 // indirect - go.opentelemetry.io/collector/consumer v0.82.0 // indirect - go.opentelemetry.io/collector/exporter v0.82.0 // indirect - go.opentelemetry.io/collector/exporter/loggingexporter v0.82.0 // indirect - go.opentelemetry.io/collector/exporter/otlpexporter v0.82.0 // indirect - go.opentelemetry.io/collector/exporter/otlphttpexporter v0.82.0 // indirect - go.opentelemetry.io/collector/extension v0.82.0 // indirect - go.opentelemetry.io/collector/extension/auth v0.82.0 // indirect - go.opentelemetry.io/collector/extension/ballastextension v0.82.0 // indirect - go.opentelemetry.io/collector/extension/zpagesextension v0.82.0 // indirect + go.opentelemetry.io/collector v0.83.0 // indirect + go.opentelemetry.io/collector/component v0.83.0 // indirect + go.opentelemetry.io/collector/config/configauth v0.83.0 // indirect + go.opentelemetry.io/collector/config/configcompression v0.83.0 // indirect + go.opentelemetry.io/collector/config/configgrpc v0.83.0 // indirect + go.opentelemetry.io/collector/config/confighttp v0.83.0 // indirect + go.opentelemetry.io/collector/config/confignet v0.83.0 // indirect + go.opentelemetry.io/collector/config/configopaque v0.83.0 // indirect + go.opentelemetry.io/collector/config/configtelemetry v0.83.0 // indirect + go.opentelemetry.io/collector/config/configtls v0.83.0 // indirect + go.opentelemetry.io/collector/config/internal v0.83.0 // indirect + go.opentelemetry.io/collector/confmap v0.83.0 // indirect + go.opentelemetry.io/collector/connector v0.83.0 // indirect + go.opentelemetry.io/collector/consumer v0.83.0 // indirect + go.opentelemetry.io/collector/exporter v0.83.0 // indirect + go.opentelemetry.io/collector/exporter/loggingexporter v0.83.0 // indirect + go.opentelemetry.io/collector/exporter/otlpexporter v0.83.0 // indirect + go.opentelemetry.io/collector/exporter/otlphttpexporter v0.83.0 // indirect + go.opentelemetry.io/collector/extension v0.83.0 // indirect + go.opentelemetry.io/collector/extension/auth v0.83.0 // indirect + go.opentelemetry.io/collector/extension/ballastextension v0.83.0 // indirect + go.opentelemetry.io/collector/extension/zpagesextension v0.83.0 // indirect go.opentelemetry.io/collector/featuregate v1.0.0-rcv0014 // indirect - go.opentelemetry.io/collector/processor v0.82.0 // indirect - go.opentelemetry.io/collector/processor/batchprocessor v0.82.0 // indirect - go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.82.0 // indirect - go.opentelemetry.io/collector/receiver v0.82.0 // indirect - go.opentelemetry.io/collector/receiver/otlpreceiver v0.82.0 // indirect - go.opentelemetry.io/collector/semconv v0.82.0 // indirect + go.opentelemetry.io/collector/processor v0.83.0 // indirect + go.opentelemetry.io/collector/processor/batchprocessor v0.83.0 // indirect + go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.83.0 // indirect + go.opentelemetry.io/collector/receiver v0.83.0 // indirect + go.opentelemetry.io/collector/receiver/otlpreceiver v0.83.0 // indirect + go.opentelemetry.io/collector/semconv v0.83.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.1-0.20230612162650-64be7e574a17 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0 // indirect go.opentelemetry.io/contrib/propagators/b3 v1.17.0 // indirect @@ -380,18 +381,18 @@ require ( golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 // indirect golang.org/x/mod v0.12.0 // indirect golang.org/x/net v0.14.0 // indirect - golang.org/x/oauth2 v0.10.0 // indirect + golang.org/x/oauth2 v0.11.0 // indirect golang.org/x/sys v0.11.0 // indirect golang.org/x/term v0.11.0 // indirect golang.org/x/text v0.12.0 // indirect golang.org/x/time v0.3.0 // indirect golang.org/x/tools v0.12.0 // indirect gonum.org/v1/gonum v0.13.0 // indirect - google.golang.org/api v0.134.0 // indirect + google.golang.org/api v0.136.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20230706204954-ccb25ca9f130 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230720185612-659f7aaaa771 // indirect + google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230807174057-1744710a1577 // indirect google.golang.org/grpc v1.57.0 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect @@ -407,7 +408,7 @@ require ( k8s.io/klog/v2 v2.90.1 // indirect k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f // indirect k8s.io/utils v0.0.0-20230308161112-d77c459e9343 // indirect - sigs.k8s.io/controller-runtime v0.15.0 // indirect + sigs.k8s.io/controller-runtime v0.15.1 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect sigs.k8s.io/yaml v1.3.0 // indirect diff --git a/testbed/go.sum b/testbed/go.sum index b3c3e979a..14d0e9f82 100644 --- a/testbed/go.sum +++ b/testbed/go.sum @@ -41,8 +41,8 @@ cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJW cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= -cloud.google.com/go/compute v1.20.1 h1:6aKEtlUiwEpJzM001l0yFkpXmUVXaN8W+fbkb2AZNbg= -cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= +cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= cloud.google.com/go/compute/metadata v0.2.4-0.20230617002413-005d2dfb6b68 h1:aRVqY1p2IJaBGStWMsQMpkAa83cPkCDLl80eOj0Rbz4= cloud.google.com/go/compute/metadata v0.2.4-0.20230617002413-005d2dfb6b68/go.mod h1:1a3eRNYX12fs5UABBIXS8HXVvQbX9hRB/RkEBPORpe8= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= @@ -76,8 +76,8 @@ github.com/Azure/go-autorest/autorest v0.11.28 h1:ndAExarwr5Y+GaHE6VCaY1kyS/HwwG github.com/Azure/go-autorest/autorest v0.11.28/go.mod h1:MrkzG3Y3AH668QyF9KRk5neJnGgmhQ6krbhR8Q5eMvA= github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= -github.com/Azure/go-autorest/autorest/adal v0.9.22 h1:/GblQdIudfEM3AWWZ0mrYJQSd7JS4S/Mbzh6F0ov0Xc= -github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk= +github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8= +github.com/Azure/go-autorest/autorest/adal v0.9.23/go.mod h1:5pcMqFkdPhviJdlEy3kC/v1ZLnQl0MH6XA5YCcMhy4c= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= @@ -97,24 +97,26 @@ github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DataDog/agent-payload/v5 v5.0.89 h1:uuLWf/exyNYBnheG9OH2dOWZpCJvaEHX3W9CAd8KarU= github.com/DataDog/agent-payload/v5 v5.0.89/go.mod h1:oQZi1VZp1e3QvlSUX4iphZCpJaFepUxWq0hNXxihKBM= -github.com/DataDog/datadog-agent/pkg/obfuscate v0.47.0-rc.3 h1:imU7+gtggz9YPG/wJdrtLmL+bvafq+08oivQ8VRO4g8= -github.com/DataDog/datadog-agent/pkg/obfuscate v0.47.0-rc.3/go.mod h1:e933RWa4kAWuHi5jpzEuOiULlv21HcCFEVIYegmaB5c= -github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.47.0-rc.3 h1:LFckPfptq8yevWp5TSAbHxGv4LqaEWyRyEQAj+ioKl0= -github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.47.0-rc.3/go.mod h1:7uPrckBTIabtHAuoJnQes2XuDmopCImBPhM+o66DvA0= +github.com/DataDog/datadog-agent/pkg/obfuscate v0.48.0-beta.1 h1:g7kb8NGjApkncwuXjkEpYHjYj08hqklvjqB3Gs2uPpQ= +github.com/DataDog/datadog-agent/pkg/obfuscate v0.48.0-beta.1/go.mod h1:e933RWa4kAWuHi5jpzEuOiULlv21HcCFEVIYegmaB5c= +github.com/DataDog/datadog-agent/pkg/proto v0.48.0-beta.1 h1:Htxj/RE55AeDZ+OE6+x+kJQz3toGWzR40Baq0Dknv8U= +github.com/DataDog/datadog-agent/pkg/proto v0.48.0-beta.1/go.mod h1:O3WwGRPZxs4BpB2ccUvIIPprhscWBRpudJT6mC+7sr8= +github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.48.0-beta.1 h1:We9Y6+kwCnSOQilk2koeADjbZgMHFDl6iHBaobU5nAw= +github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.48.0-beta.1/go.mod h1:5Q39ZOIOwZMnFyRadp+5gH1bFdjmb+Pgxe+j5XOwaTg= github.com/DataDog/datadog-agent/pkg/trace v0.48.0-devel h1:sC2wq2fuI1r3U6FmUsn4clsrFOql5XBfs1EG15LPDEc= github.com/DataDog/datadog-agent/pkg/trace v0.48.0-devel/go.mod h1:CmdN7Zrj+S+2hOSGW5hFT2LC2FVIF/avJTyvhUjaueI= github.com/DataDog/datadog-agent/pkg/trace/exportable v0.0.0-20201016145401-4646cf596b02 h1:N2BRKjJ/c+ipDwt5b+ijqEc2EsmK3zXq2lNeIPnSwMI= github.com/DataDog/datadog-agent/pkg/trace/exportable v0.0.0-20201016145401-4646cf596b02/go.mod h1:EalMiS87Guu6PkLdxz7gmWqi+dRs9sjYLTOyTrM/aVU= -github.com/DataDog/datadog-agent/pkg/util/cgroups v0.47.0-rc.3 h1:thSia6kXdVcSozdPQOAzSDCpcPBF91ScafOzETLbJP8= -github.com/DataDog/datadog-agent/pkg/util/cgroups v0.47.0-rc.3/go.mod h1:TmxM8Pe+1QBWfM1JisS3xjvX1/kk655XY/IjqA36g6s= +github.com/DataDog/datadog-agent/pkg/util/cgroups v0.48.0-beta.1 h1:9iyw6jSwJwsFe8TooU8mqMhMfFiW6N/05OnNMg91kBY= +github.com/DataDog/datadog-agent/pkg/util/cgroups v0.48.0-beta.1/go.mod h1:TmxM8Pe+1QBWfM1JisS3xjvX1/kk655XY/IjqA36g6s= github.com/DataDog/datadog-agent/pkg/util/log v0.0.0-20201009091607-ce4e57cdf8f4/go.mod h1:cRy7lwapA3jcjnX74kU6NFkXaRGQyB0l/QZA0IwYGEQ= github.com/DataDog/datadog-agent/pkg/util/log v0.0.0-20201009092105-58e18918b2db/go.mod h1:cRy7lwapA3jcjnX74kU6NFkXaRGQyB0l/QZA0IwYGEQ= -github.com/DataDog/datadog-agent/pkg/util/log v0.47.0-rc.3 h1:Pb223YrOHuT2io1nDzToc+bSDoikxAITjTl9kZvjFSY= -github.com/DataDog/datadog-agent/pkg/util/log v0.47.0-rc.3/go.mod h1:Ci+eWLEPbZsqy9/eNBMN1FNJUqiPx+HrLcGGpVmujJ8= -github.com/DataDog/datadog-agent/pkg/util/pointer v0.47.0-rc.3 h1:CPrsO0OU+MkjvNQhW153DQF4zuHvGkdkxGOp2M2/y34= -github.com/DataDog/datadog-agent/pkg/util/pointer v0.47.0-rc.3/go.mod h1:HMpYpkuxDFYuYLjDTKoG0NjtPoAwIymvBEhlA3pJbJk= -github.com/DataDog/datadog-agent/pkg/util/scrubber v0.47.0-rc.3 h1:gAL3HM+Tg5S0MLBjv5K8+elDJS6COf+9Io9dVh7EwYc= -github.com/DataDog/datadog-agent/pkg/util/scrubber v0.47.0-rc.3/go.mod h1:Whfh1SJOwtp2YvDUNzqw/jmSbGOOso+HJHOEJULh1+M= +github.com/DataDog/datadog-agent/pkg/util/log v0.48.0-beta.1 h1:k4tcg077NsPJRxtuGdYEm9kge+zq5QO5x6Yv3R5BwpE= +github.com/DataDog/datadog-agent/pkg/util/log v0.48.0-beta.1/go.mod h1:Ci+eWLEPbZsqy9/eNBMN1FNJUqiPx+HrLcGGpVmujJ8= +github.com/DataDog/datadog-agent/pkg/util/pointer v0.48.0-beta.1 h1:detMhMfwchco20v12RjjRisxP3V0mtLEjcgJZGk2cmg= +github.com/DataDog/datadog-agent/pkg/util/pointer v0.48.0-beta.1/go.mod h1:HMpYpkuxDFYuYLjDTKoG0NjtPoAwIymvBEhlA3pJbJk= +github.com/DataDog/datadog-agent/pkg/util/scrubber v0.48.0-beta.1 h1:EOrKgyyubncuS4LpF8aCj/12i1+GmPV+PCfj8mDaF2c= +github.com/DataDog/datadog-agent/pkg/util/scrubber v0.48.0-beta.1/go.mod h1:Whfh1SJOwtp2YvDUNzqw/jmSbGOOso+HJHOEJULh1+M= github.com/DataDog/datadog-agent/pkg/util/winutil v0.0.0-20201009092105-58e18918b2db/go.mod h1:EtS4X73GXAyrpVddkLQ4SewSQX+zv284e8iIkVBXgtk= github.com/DataDog/datadog-api-client-go/v2 v2.14.0 h1:cLkqg/D63I6BAxIIg6g8xMWjrAMXcvb5vbD8ixOVVyo= github.com/DataDog/datadog-api-client-go/v2 v2.14.0/go.mod h1:kntOqXEh1SmjwSDzW/eJkr9kS7EqttvEkelglWtJRbg= @@ -122,21 +124,21 @@ github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3 github.com/DataDog/datadog-go v3.5.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/datadog-go/v5 v5.1.1 h1:JLZ6s2K1pG2h9GkvEvMdEGqMDyVLEAccdX5TltWcLMU= github.com/DataDog/datadog-go/v5 v5.1.1/go.mod h1:KhiYb2Badlv9/rofz+OznKoEF5XKTonWyhx5K83AP8E= -github.com/DataDog/go-tuf v0.3.0--fix-localmeta-fork h1:yBq5PrAtrM4yVeSzQ+bn050+Ysp++RKF1QmtkL4VqvU= -github.com/DataDog/go-tuf v0.3.0--fix-localmeta-fork/go.mod h1:yA5JwkZsHTLuqq3zaRgUQf35DfDkpOZqgtBqHKpwrBs= +github.com/DataDog/go-tuf v1.0.1-0.5.2 h1:gld/e3MXfFVB/O8hc3mloP1ayFk75Mmdkmll/9lyd9I= +github.com/DataDog/go-tuf v1.0.1-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0= github.com/DataDog/gohai v0.0.0-20220718130825-1776f9beb9cc h1:gtlKB6B50/UEuFm1LeMn0R5a+tubx69OecPqxfXJDmU= github.com/DataDog/gohai v0.0.0-20220718130825-1776f9beb9cc/go.mod h1:oyPC4jWHHjVVNjslDAKp8EqfQBaSmODjHt4HCX+C+9Q= -github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.5.2 h1:nwZgSRQb8edVTVcFj5tkl3u3BaP6XrFxSw+tEv9A0hY= -github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.5.2/go.mod h1:u+DVO6wIQjBFuz2YzDhxOhHB5vf9CTKxB+9cJYs8SRk= -github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.5.2 h1:W47xIROVye+D6WxkZcy8ETomfZlTNWoVZODwAh4LdeE= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.5.2 h1:JRVQga0KlFCMyuKF/ghrZtRpmYL3XWRGXpSB5Qdk5Ko= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.5.2/go.mod h1:6x6OujLzkt7Wwlu/6kYO5+8FNRBi1HEw8Qm6/qvTOQA= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.5.2 h1:xY5LVtbmcm3zZ8Ccxc8+mzkEmlOdeNQnXPDdZiXiXq4= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.5.2/go.mod h1:Ge92/UCQeo8i0RQgSnowR9uto3VhyxM6YS3W6xJD8rc= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.5.2 h1:FbQSZ6uXhuHzgwC73MUxqvHwV0uxKiGAeAAZIMrfUAc= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.5.2/go.mod h1:oPpGMNpwga8zTGUJfLy3Z/u4l6bvEYuRatJkgSUazr4= -github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.5.2 h1:C0uzQwHCKubfmbvaZF/Qi6ernigbcoWt9A+U+s0iQGg= -github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.5.2/go.mod h1:RT78x34OmVb0wuZLtmzzRRy43+7pCCA6ZEOGQ9mA5w0= +github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.7.0 h1:l21vDOju9zcCx+RYrNrsNs9qpWaLA8SKTHTDiHUhgEA= +github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.7.0/go.mod h1:0n4yKpsgezj7KqhkLM5weDi2kmtNlRCdlAmHN7WfMhQ= +github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.7.0 h1:mVnISj3nNq9fQM7C7zi5iuEHWe7tAHS/VNPBs3qc/ug= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.7.0 h1:8STZKmgRY3OvrUkaNglRiLgEvAMcTt2l+naAlW+p36k= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.7.0/go.mod h1:mpbmVkOkmJq5KmHxi+zlvYXQD0o/x1MMS16CNWO8p9U= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.7.0 h1:j2wXBnS0KwLzB7tG63vI+fi6hHRbvprRHmv8XsgLfbs= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.7.0/go.mod h1:CUx9KlayjXNeJeL5ZCjbXKJ/JFYrrCOFSKZ37LlXH/w= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.7.0 h1:433zmJS94Pids2V+l5fQGOSfZPxnibHXAd3iqB7P4HY= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.7.0/go.mod h1:uVTWlYOzK82Cf88d57GvcQ+zgPW/kyOBn4xp6tCqi5Y= +github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.7.0 h1:8sRT2Yb9eW7GhRAkqMBrcFDb6WW9D/KslM8D+6EcsYk= +github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.7.0/go.mod h1:m/Vn+wxCD5ND4e0RwIweiBfpihD3NHuVCRDjSvhHYps= github.com/DataDog/sketches-go v1.4.2 h1:gppNudE9d19cQ98RYABOetxIhpTCl4m7CnbRZjvVA/o= github.com/DataDog/sketches-go v1.4.2/go.mod h1:xJIXldczJyyjnbDop7ZZcLxJdV3+7Kra7H1KMgpgkLk= github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= @@ -151,6 +153,8 @@ github.com/HdrHistogram/hdrhistogram-go v1.0.1/go.mod h1:BWJ+nMSHY3L41Zj7CA3uXnl github.com/HdrHistogram/hdrhistogram-go v1.1.0/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM= github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= +github.com/IBM/sarama v1.40.1 h1:lL01NNg/iBeigUbT+wpPysuTYW6roHo6kc1QrffRf0k= +github.com/IBM/sarama v1.40.1/go.mod h1:+5OFwA5Du9I6QrznhaMHsuwWdWZNMjaBSIxEWEgKOYE= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= @@ -168,8 +172,6 @@ github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdko github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/sarama v1.22.2-0.20190604114437-cd910a683f9f/go.mod h1:XLH1GYJnLVE0XCr6KdJGVJRTwY30moWNJ4sERjXX6fs= github.com/Shopify/sarama v1.32.0/go.mod h1:+EmJJKZWVT/faR9RcOxJerP+LId4iWdQPBGLy1Y1Njs= -github.com/Shopify/sarama v1.38.1 h1:lqqPUPQZ7zPqYlWpTh+LQ9bhYNu2xJL6k1SJN4WVe2A= -github.com/Shopify/sarama v1.38.1/go.mod h1:iwv9a67Ha8VNa+TifujYoWGxWnu2kNVAQdSdZ4X2o5g= github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/Shopify/toxiproxy/v2 v2.3.0/go.mod h1:KvQTtB6RjCJY4zqNJn7C7JDFgsG5uoHYDirfUfpIm0c= @@ -199,8 +201,8 @@ github.com/andybalholm/brotli v1.0.0/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/antonmedv/expr v1.9.0/go.mod h1:5qsM3oLGDND7sDmQGDXHkYfkjYMUX14qsgqmHhwGEk8= -github.com/antonmedv/expr v1.12.5 h1:Fq4okale9swwL3OeLLs9WD9H6GbgBLJyN/NUHRv+n0E= -github.com/antonmedv/expr v1.12.5/go.mod h1:FPC8iWArxls7axbVLsW+kpg1mz29A1b2M6jt+hZfDkU= +github.com/antonmedv/expr v1.13.0 h1:8YrTtlCzlOtXw+hpeCLDLL2uo0C0k6jmYpYTGws5c5w= +github.com/antonmedv/expr v1.13.0/go.mod h1:FPC8iWArxls7axbVLsW+kpg1mz29A1b2M6jt+hZfDkU= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= @@ -229,8 +231,8 @@ github.com/aws/aws-sdk-go v1.35.24/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9 github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.38.68/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.40.45/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= -github.com/aws/aws-sdk-go v1.44.320 h1:o2cno15HVUYj+IAgZHJ5No6ifAxwa2HcluzahMEPfOw= -github.com/aws/aws-sdk-go v1.44.320/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.44.323 h1:97/dn93DWrN1VfhAWQ2tV+xuE6oO/LO9rSsEsuC4PLU= +github.com/aws/aws-sdk-go v1.44.323/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/aws/aws-sdk-go-v2 v1.7.0/go.mod h1:tb9wi5s61kTDA5qCkcDbt3KRVV74GGslQkl/DRdX/P4= github.com/aws/aws-sdk-go-v2 v1.9.1/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= @@ -337,7 +339,6 @@ github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+g github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= -github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4= github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA= github.com/containerd/cgroups v1.0.4/go.mod h1:nLNQtsF7Sl2HxNebu77i1R0oDlhiTG+kO4JTrUzo6IA= github.com/containerd/console v1.0.3 h1:lIr7SlA5PxZyMV30bDW0MGbiOPXwc63yRuCP0ARubLw= @@ -390,8 +391,8 @@ github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUn github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/digitalocean/godo v1.97.0 h1:p9w1yCcWMZcxFSLPToNGXA96WfUVLXqoHti6GzVomL4= -github.com/digitalocean/godo v1.97.0/go.mod h1:NRpFznZFvhHjBoqZAaOD3khVzsJ3EibzKqFL4R60dmA= +github.com/digitalocean/godo v1.98.0 h1:potyC1eD0N9n5/P4/WmJuKgg+OGYZOBWEW+/aKTX6QQ= +github.com/digitalocean/godo v1.98.0/go.mod h1:NRpFznZFvhHjBoqZAaOD3khVzsJ3EibzKqFL4R60dmA= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= @@ -471,7 +472,6 @@ github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSw github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0= github.com/fluent/fluent-logger-golang v1.9.0 h1:zUdY44CHX2oIUc7VTNZc+4m+ORuO/mldQDA7czhWXEg= github.com/fluent/fluent-logger-golang v1.9.0/go.mod h1:2/HCT/jTy78yGyeNGQLGQsjF3zzzAuy6Xlk6FCMV5eU= -github.com/flynn/go-docopt v0.0.0-20140912013429-f6dd2ebbb31e/go.mod h1:HyVoz1Mz5Co8TFO8EupIdlcpwShBmY98dkT2xeHkvEI= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= @@ -787,11 +787,10 @@ github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20230228050547-1710fef4ab10 h1:CqYfpuYIjnlNxM3msdyPRKabhXZWbKjf3Q8BWROFBso= +github.com/google/pprof v0.0.0-20230406165453-00490a63f317 h1:hFhpt7CTmR3DX+b4R19ydQFtofxT0Sv3QsKNMVQYTMQ= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc= github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= @@ -814,8 +813,8 @@ github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qK github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gookit/color v1.3.6/go.mod h1:R3ogXq2B9rTbXoSHJ1HyUVAZ3poOJHpd9nQmyGZsfvQ= -github.com/gophercloud/gophercloud v1.2.0 h1:1oXyj4g54KBg/kFtCdMM6jtxSzeIyg8wv4z1HoGPp1E= -github.com/gophercloud/gophercloud v1.2.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= +github.com/gophercloud/gophercloud v1.3.0 h1:RUKyCMiZoQR3VlVR5E3K7PK1AC3/qppsWYo6dtBiqs8= +github.com/gophercloud/gophercloud v1.3.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gordonklaus/ineffassign v0.0.0-20210225214923-2e10b2664254/go.mod h1:M9mZEtGIsR1oDaZagNPNG9iq9n2HrhZ17dsXk73V3Lw= @@ -856,8 +855,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t github.com/grpc-ecosystem/grpc-gateway v1.14.5/go.mod h1:UJ0EZAp832vCd54Wev9N1BMKEyvcZ5+IM0AwDrnlkEc= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.2 h1:dygLcbEBA+t/P7ck6a8AkXv6juQ4cK0RHBoh32jxhHM= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.2/go.mod h1:Ap9RLCIJVtgQg1/BBgVEfypOAySvvlcpcVQkSzJCH4Y= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= @@ -866,13 +865,13 @@ github.com/hashicorp/consul/api v1.8.1/go.mod h1:sDjTOq0yUyv5G4h+BqSea7Fn6BU+Xbo github.com/hashicorp/consul/api v1.10.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= github.com/hashicorp/consul/api v1.12.0/go.mod h1:6pVBMo0ebnYdt2S3H87XhekM/HHrUoTD2XXb/VrZVy0= github.com/hashicorp/consul/api v1.13.0/go.mod h1:ZlVrynguJKcYr54zGaDbaL3fOvKC9m72FhPvA8T35KQ= -github.com/hashicorp/consul/api v1.23.0 h1:L6e4v1AfoumqAHq/Rrsmuulev+nd7vltM3k8H329tyI= -github.com/hashicorp/consul/api v1.23.0/go.mod h1:SfvUIT74b0EplDuNgAJQ/FVqSO6KyK2ia80UI39/Ye8= +github.com/hashicorp/consul/api v1.24.0 h1:u2XyStA2j0jnCiVUU7Qyrt8idjRn4ORhK6DlvZ3bWhA= +github.com/hashicorp/consul/api v1.24.0/go.mod h1:NZJGRFYruc/80wYowkPFCp1LbGmJC9L8izrwfyVx/Wg= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.7.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= -github.com/hashicorp/consul/sdk v0.14.0 h1:Hly+BMNMssVzoWddbBnBFi3W+Fzytvm0haSkihhj3GU= +github.com/hashicorp/consul/sdk v0.14.1 h1:ZiwE2bKb+zro68sWzZ1SgHF3kRMBZ94TwOCFRF4ylPs= github.com/hashicorp/cronexpr v1.1.1 h1:NJZDd87hGXjoZBdvyCF9mX4DCq5Wy7+A/w+A7q0wn6c= github.com/hashicorp/cronexpr v1.1.1/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -931,8 +930,8 @@ github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4= -github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= +github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= @@ -944,8 +943,8 @@ github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOn github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM= github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0= -github.com/hashicorp/nomad/api v0.0.0-20230308192510-48e7d70fcd4b h1:EkuSTU8c/63q4LMayj8ilgg/4I5PXDFVcnqKfs9qcwI= -github.com/hashicorp/nomad/api v0.0.0-20230308192510-48e7d70fcd4b/go.mod h1:bKUb1ytds5KwUioHdvdq9jmrDqCThv95si0Ub7iNeBg= +github.com/hashicorp/nomad/api v0.0.0-20230418003350-3067191c5197 h1:I5xhKLePXpXgM6pZ4xZNTiurLLS3sGuZrZFFzAbM67A= +github.com/hashicorp/nomad/api v0.0.0-20230418003350-3067191c5197/go.mod h1:2TCrNvonL09r7EiQ6M2rNt+Cmjbn1QbzchFoTWJFpj4= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= @@ -957,8 +956,8 @@ github.com/hashicorp/vault/sdk v0.1.13/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvh github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20190923154419-df201c70410d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/hetznercloud/hcloud-go v1.41.0 h1:KJGFRRc68QiVu4PrEP5BmCQVveCP2CM26UGQUKGpIUs= -github.com/hetznercloud/hcloud-go v1.41.0/go.mod h1:NaHg47L6C77mngZhwBG652dTAztYrsZ2/iITJKhQkHA= +github.com/hetznercloud/hcloud-go v1.42.0 h1:Es/CDOForQN3nOOP5Vxh1N/YHjpCg386iYEX5zCgi+A= +github.com/hetznercloud/hcloud-go v1.42.0/go.mod h1:YADL8AbmQYH0Eo+1lkuyoc8LutT0UeMvaKP47nNUb+Y= github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= github.com/hjson/hjson-go/v4 v4.0.0/go.mod h1:KaYt3bTw3zhBjYqnXkYywcYctk0A2nxeEFTse3rH13E= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= @@ -976,8 +975,8 @@ github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2 github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= -github.com/ionos-cloud/sdk-go/v6 v6.1.4 h1:BJHhFA8Q1SZC7VOXqKKr2BV2ysQ2/4hlk1e4hZte7GY= -github.com/ionos-cloud/sdk-go/v6 v6.1.4/go.mod h1:Ox3W0iiEz0GHnfY9e5LmAxwklsxguuNFEUSu0gVRTME= +github.com/ionos-cloud/sdk-go/v6 v6.1.6 h1:0n4irdqNska+1s3YMCRhrAqKbibEgQ7SwwhAlHzYT5A= +github.com/ionos-cloud/sdk-go/v6 v6.1.6/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= github.com/jaegertracing/jaeger v1.22.0/go.mod h1:WnwW68MjJEViSLRQhe0nkIsBDaF3CzfFd8wJcpJv24k= github.com/jaegertracing/jaeger v1.34.1/go.mod h1:md+YcRcDgMCAgB9qyXl0PdstYiq8fjA8KG5cNuyV2kA= github.com/jaegertracing/jaeger v1.35.2/go.mod h1:e7FBVZ14ptsRjwiHEnLyxvOa4bSnZA0BDFE1OcvNiHs= @@ -985,6 +984,7 @@ github.com/jaegertracing/jaeger v1.36.0/go.mod h1:67uyR2zQgEk7EfguOR3eZOGvGDRzY5 github.com/jaegertracing/jaeger v1.38.0/go.mod h1:4MBTMxfCp3d4buDLxRlHnESQvTFCkN16OUIeE9BEdl4= github.com/jaegertracing/jaeger v1.41.0 h1:vVNky8dP46M2RjGaZ7qRENqylW+tBFay3h57N16Ip7M= github.com/jaegertracing/jaeger v1.41.0/go.mod h1:SIkAT75iVmA9U+mESGYuMH6UQv6V9Qy4qxo0lwfCQAc= +github.com/jarcoal/httpmock v1.3.0 h1:2RJ8GP0IIaWwcC9Fp2BmVi8Kog3v2Hn7VXM3fTd+nuc= github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= @@ -1094,8 +1094,8 @@ github.com/lightstep/go-expohisto v1.0.0 h1:UPtTS1rGdtehbbAF7o/dhkWLTDI73UifG8Lb github.com/lightstep/go-expohisto v1.0.0/go.mod h1:xDXD0++Mu2FOaItXtdDfksfgxfV0z1TMPa+e/EUd0cs= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/linode/linodego v1.14.1 h1:uGxQyy0BidoEpLGdvfi4cPgEW+0YUFsEGrLEhcTfjNc= -github.com/linode/linodego v1.14.1/go.mod h1:NJlzvlNtdMRRkXb0oN6UWzUkj6t+IBsyveHgZ5Ppjyk= +github.com/linode/linodego v1.16.1 h1:5otq57M4PdHycPERRfSFZ0s1yz1ETVWGjCp3hh7+F9w= +github.com/linode/linodego v1.16.1/go.mod h1:aESRAbpLY9R6IA1WGAWHikRI9DU9Lhesapv1MhKmPHM= github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= github.com/lucasb-eyer/go-colorful v1.0.2/go.mod h1:0MS4r+7BZKSJ5mw4/S5MPN+qHFF1fYclkSPilDOKW0s= github.com/lucasb-eyer/go-colorful v1.0.3/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= @@ -1149,6 +1149,7 @@ github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5 github.com/matttproud/golang_protobuf_extensions v1.0.2/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/maxatome/go-testdeep v1.12.0 h1:Ql7Go8Tg0C1D/uMMX59LAoYK7LffeJQ6X2T04nTH68g= github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc= github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg= github.com/mgechev/revive v1.0.3/go.mod h1:POGGZagSo/0frdr7VeAifzS5Uka0d0GPiM35MsTO8nE= @@ -1156,8 +1157,8 @@ github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3N github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= -github.com/miekg/dns v1.1.51 h1:0+Xg7vObnhrz/4ZCZcZh7zPXlmU0aveS2HDBd0m0qSo= -github.com/miekg/dns v1.1.51/go.mod h1:2Z9d3CP1LQWihRZUf29mQ19yDThaI4DAYzte2CaQW5c= +github.com/miekg/dns v1.1.53 h1:ZBkuHr5dxHtB1caEOlZTLPo7D3L3TWckgUUs/RHfDxw= +github.com/miekg/dns v1.1.53/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY= github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible h1:aKW/4cBs+yK6gpqU3K/oIwk9Q/XICqd3zOX/UFuvqmk= @@ -1268,13 +1269,11 @@ github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.14.2/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= +github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= -github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/ginkgo/v2 v2.9.5 h1:+6Hr4uxzP4XIUyAkg61dWBw8lb/gc4/X5luuxN/EC+Q= +github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= @@ -1284,193 +1283,192 @@ github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoT github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.4/go.mod h1:g/HbgYopi++010VEqkFgJHKC09uJiW9UkXvMUuKHUCQ= github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= -github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= -github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= -github.com/onsi/gomega v1.27.7 h1:fVih9JD6ogIiHUN6ePK7HJidyEDpWGVB5mzM7cWNXoU= +github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter v0.82.0 h1:wiri43hEKmq8pXfbRnrHEvKgN8nsZEeqxmTDV5GsHdg= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter v0.82.0/go.mod h1:LkTbjb66U2BXWWX2Jufayiq6+zbA81n/e2WXBDShLSY= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.82.0 h1:+KaCvd07PhWlI63T+Lsygo1fodbmRM35AgEDtlzw96A= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.82.0/go.mod h1:p6dJC7m2ET6cA3kCYBY6RqtHlZB0lM2YOeEteqEtNLE= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/carbonexporter v0.82.0 h1:roc3T9BN5Pe37o9ULVVaWXc1FHeiT5VHt4D+aDHmD2c= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/carbonexporter v0.82.0/go.mod h1:TlxzEM1Ka755DfpbfXbaI1f0hI92BtWTivDQ4hjET38= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter v0.82.0 h1:qTwIwx2gAnxBK4yrSJr3dayohzbjeZdSsAH90awqMO8= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter v0.82.0/go.mod h1:PEiq66gxQahvL/fuH76TmqtIvXTzDELXLunxefNgNsA= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/dynatraceexporter v0.82.0 h1:2fLuZoX0ZqTDMwV1fgbu4rMlogO086dYrGop40brLVY= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/dynatraceexporter v0.82.0/go.mod h1:vGIBvYpVqmf6qOBFQ7oQpcmrkTYb0ivombpMEKhvA0c= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/fileexporter v0.82.0 h1:1PLJXk12CueltS9JA3X2BNzMKbYD3B/VEKTJBUR5Iy0= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/fileexporter v0.82.0/go.mod h1:5m7wwTnnssL2m9zzWwaK40t6VZLUlC7Tbx3hsSyrWQM= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/jaegerexporter v0.82.0 h1:vMqXTnkiMGja1pfM7hM1R4kteuo/RclSeHm8uykVWUg= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/jaegerexporter v0.82.0/go.mod h1:RZqqs3F8a+uAdNba7oD3dfuSxHOYvXelzQ0RvhsFbP0= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.82.0 h1:6Zj2gzJl5sefJFQlCvL46kLyoozBstUQiB6wsPkPU2E= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.82.0/go.mod h1:1G20L9KOMqKQdqtNI0X4zfkMQ151bk4z3YUbx1Mcjh4= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter v0.82.0 h1:knTNY7nKG8KEJYijNFAJglRHu3j2z/I2wh5HyWuaatY= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter v0.82.0/go.mod h1:x8Dzlake6r6h1MAD24tL97bSt6SFLxpsMfKSRD0gduA= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/logzioexporter v0.82.0 h1:eFjoIXoEBjo3EXNQPJQcJE48M/K4p+v28Q6Qcarrrsw= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/logzioexporter v0.82.0/go.mod h1:0oAPVfaW0coeTHM7MNzhG2cNewtpcmbNpxbJ3is5ezw= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/opencensusexporter v0.82.0 h1:6X3kCjPZ+EB7Yi1z4jM4FMjcY4twr5msK2DM91IWcOk= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/opencensusexporter v0.82.0/go.mod h1:tyzL9I69xSJPlxm1OeeJd5sNXg6m5XGcJCuIYQSXvQY= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter v0.82.0 h1:tEhzqPgXasHxP3EwkIYmI1UcjwLP6Ni8sME5hdfyTa8= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter v0.82.0/go.mod h1:YLJ4+uqiVfA9AtUPNzZOMoUl3FFN5AiMkjXzqgbUq4M= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.82.0 h1:Pbdhyt4aVha6QgscPYRwD70/QlHJJpaGBJkHbH0D57s= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.82.0/go.mod h1:xF7jvMckjRKbE+92Q0BXqYCvPrOOGrwVKeoQsZ37dXA= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter v0.82.0 h1:L+A8ZQzzsV+Rg02ZJS1ugdzEuiNtJr0uONVD0NvgrSs= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter v0.82.0/go.mod h1:tua7yWaFweDM9mzRDFP1DNhsNVMYbRz/Rt3k52JBexc= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter v0.82.0 h1:i4LnK+f/XbJzGlCrYnLRg/EFbVYYXcmSF88pVN5loGI= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter v0.82.0/go.mod h1:4xBWenKUqvfXOwlBLX5nbr0YIMSsZne4FGdn+lTyajk= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/splunkhecexporter v0.82.0 h1:5eJb+8ibFf/3CZpSnob9P+Yc+ru7UY3ypAbYJ879jpI= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter v0.82.0 h1:zk5wY94//Qhqb7Y/G92nmyyMEJXGl+ksz8FRMuJ3VhI= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter v0.82.0/go.mod h1:aVs122Dh9bfoCTicNu2wXbsbv1gamxAtkwyMdnDwPsg= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/awsproxy v0.82.0 h1:t3pahsAi1gdqpJoVk392UZfw3ZbDyIzXFy89yPjT1MA= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/awsproxy v0.82.0/go.mod h1:/D1YL6TN6QY531RWxgimwbhwNh8Sjt+/zlhn9sRRbt4= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.82.0 h1:xzz0A7cxxtvraU22ZleikEc32ghZ3GMCDiCevs+37Zw= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.82.0/go.mod h1:eaA8YuyzBP1HikJUGYlS+3cbxndrCIAEYBU/1yMQNQo= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.82.0 h1:Qixnpq/0PQw9rKzGj7Eci4F3ucZRG46mLOL54a3u2Uo= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.82.0/go.mod h1:9ne2U15XVRwTRlUn5fvSDBDeLU6RH9YVeHOdFrj6kyw= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.82.0 h1:RhqTW7tIcDSc6bWt1TTjlMsHfAYgIiXrGHbk+CYOcSg= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.82.0/go.mod h1:JvWloYbaiC45XUV9odYlMo/y19mN1nil+OwJetJu870= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/sigv4authextension v0.82.0 h1:d+pQzGHday6ZwW0FOzm/vzhHl/KKvZLWoYEXMeubnvQ= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/sigv4authextension v0.82.0/go.mod h1:q+Dw/LtMZyiOhper76RZxVwIB19kZigsBADyXdyu+30= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/awsutil v0.82.0 h1:BxLqcuI8tf2R2OxARpCXF6cfZ0cVO76cohWclajF5jY= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/awsutil v0.82.0/go.mod h1:gaoep2XPjGkrGY1XH41ivdc3JaqBYLvZWXK6KffEsck= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/containerinsight v0.82.0 h1:F3CFU/uB6lov7AZ259rbbjKq9OYXr7KeOyCCzPdIEbA= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/containerinsight v0.82.0/go.mod h1:70uB2fD1x/ldzsXvuhHG1bFZeYhPouYpkrDWyO0sQS8= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/cwlogs v0.82.0 h1:/fP4YLK4SNJRmz5jOtT57LKbdNbEuvBL3FC56uxASsw= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/cwlogs v0.82.0/go.mod h1:tvp98P9ZEF9K0wUzyZSVLhxlaJHzBbzI0C5exIoD844= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.82.0 h1:9wJBkAfq0xgT++d0t6p79ZJKvGjjuy0ZlcXcl8lDEXU= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.82.0/go.mod h1:YvDlJ+zVkFiiPMEY6QP3E4ILXQrcZbk23KMW4GPGrVg= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/k8s v0.82.0 h1:djhO7YMcHKPLWUS5zEenBsbDwDA9XHW5OjStniJVXBA= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/k8s v0.82.0/go.mod h1:N8HzE0N2g9HlKn6KYD1ficMw2so5jLvy21uAmtf8NLI= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/metrics v0.82.0 h1:kcoYbBAQAt4InjM6BzjgCxHq/YUX5rXb0fMY00lbfNE= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/metrics v0.82.0/go.mod h1:MilkKO7o8LmdxM8Lgi7vBVU8qxrhZFuWPy2kPlL5Ho0= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/proxy v0.82.0 h1:W0UXQrlJhK5Gn2Ee1Ebidx+MCbTG39kH/hRfMAKUmM0= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/proxy v0.82.0/go.mod h1:E34bYW2k07QZ+uk3TBhctjQ9kltfRkPjcQkIo9ST4jg= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray v0.82.0 h1:z2cI8P5guS7wB5Zmvsq+4kNiAwi9k7wS2ZG/bWK28Ak= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray v0.82.0/go.mod h1:tT09nnraDrujjx9eGDjeuG0Lme3dwj3zSmAJERQgcEk= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.82.0 h1:UQkYg2ksIEbUibN97SPMITF/1wSRynAn+aD7tC3mQKQ= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.82.0/go.mod h1:5VGtCES+3CySFeCO8hFzrxVgeMI8L6cINAlqr0vrv/A= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter v0.83.0 h1:xuJ7AgUJqHqBWaFNeph0fjID+4+KgBgv16RJr0WgfMA= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter v0.83.0/go.mod h1:3aJHSLrF4WO2LecdoPBcbKeCKhYVONPersDFOMLy1Vs= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.83.0 h1:gnc6xAyFcjhP8C3ebKXSyJHPinM+n2aiT507qfFxGWU= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.83.0/go.mod h1:n3ry12plQOS4XbIYpyeqHJKVl1y7eVZSLKt2M7v00Dk= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/carbonexporter v0.83.0 h1:gDA4OLeR+1los/r0HOk3QHYSyEKteSCWvy2K4coAByw= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/carbonexporter v0.83.0/go.mod h1:gDdbegXiz2J1jRqt40cZiRng/cY3PRe+oLAwHKNGPSs= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter v0.83.0 h1:RahPWJc7FILuSw6Nr5LjjFqhzhRoVp4iiPMQppUnA+0= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter v0.83.0/go.mod h1:OxipJxncCOdyxT1rPl+iVHRAov5JepMgJ9NkuAPmJgM= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/dynatraceexporter v0.83.0 h1:ZSrG3KAWvc4fZA0LPj+uv1qwpovVuWDXaj18ERayib4= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/dynatraceexporter v0.83.0/go.mod h1:eeaXESKotV8YV/2Rv4vJ+VAJ3bkcq4WKGWEz9AkZMMk= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/fileexporter v0.83.0 h1:GoOSCUUy3pNrVfu1sCzXQCr1/dKDFtgYBza6gCaMsfI= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/fileexporter v0.83.0/go.mod h1:vw5V9r2TOdcnL1kAIVARKtEa67rE90qEm0AH/eS7s/Y= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/jaegerexporter v0.83.0 h1:NVPfnIP870JbKs99pvYs6lvu3a0YG9zbiawX4hbWy3Y= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/jaegerexporter v0.83.0/go.mod h1:MZZc7q2ISS95bCH2Oo13aLAynr2n+rWOeUollKxnQm4= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.83.0 h1:UY9GbD8VBY//agLPuCejj/Fqjf1uAWqT5LCpv11UcDY= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.83.0/go.mod h1:33K8WHr2YXDG1/vK+01BiMefL2Jjwrt4c4zli3Vqp8o= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter v0.83.0 h1:LzwGcOtWKNEoLsyZ8B7fdb73x7zEbyz5ghVdkpJqANg= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter v0.83.0/go.mod h1:fLGts4FyYaBEIxp+7Ag35Gq0bEks5L0tu2qhRFi/3KY= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/logzioexporter v0.83.0 h1:zuOyT4t0uB0Sqw4PnsOezXUyOn+kWp+HslxAiGmB5HE= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/logzioexporter v0.83.0/go.mod h1:09NFg+NzQA8p1CqVSuW52HZHNvdOIFVrSV17UANM9KI= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/opencensusexporter v0.83.0 h1:EXm69xbvQxXhjdUwmwJFCCFRYRUpivGRpONm+DyoBD8= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/opencensusexporter v0.83.0/go.mod h1:orWUFaocRVBHqzn316qgLGwnwx6enXVHFZ0AlT3ZujQ= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter v0.83.0 h1:GUovWPxEil/VpfkfPp9KBmidnZHX9SdD5vsfqzcssCM= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter v0.83.0/go.mod h1:zHOElC2vEg1cUv2P2Pwgd/YhYJTQEbOpJohpoZGWRfk= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.83.0 h1:980Ae/Y7mZndgWhngpEi8u5KIVFYQIF4NFpkLEqCZEI= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.83.0/go.mod h1:u2DDzR8+AdbeeMf7o47qqVQcPfX8bzm42/VAq7I1E2s= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter v0.83.0 h1:8qQgO1UgutKov4JFFIEf0WNS1moXK6c+z2vznQNcyf0= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter v0.83.0/go.mod h1:KkzODSIS07pR9yIYXyq1+CvEDILRIv5R7RmemG6w36c= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter v0.83.0 h1:s2lJhFQl2QTKFNnab9Wk+9hIPOUV2dLV3TQ8GoT+yKI= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter v0.83.0/go.mod h1:M8tIfUA1ULvRZp4wt5yTWI3dfJ9KMNEQmdyHlCxjaX4= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/splunkhecexporter v0.83.0 h1:sobY+f0KV1l5lrzzMx8Pssoa3UC1s0B1lfISxR951uU= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter v0.83.0 h1:fseOiXFQtcyP6DFz8emPJyazIm8dX7Hg2qqlpJeD7BE= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter v0.83.0/go.mod h1:FtZpDmvyPusD+MYHyiPS8RTK4aiRpeM9sw0ELEoorRk= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/awsproxy v0.83.0 h1:lOlIrn4npBaOTnJpjvH+MzqOjusgwmcEmtKGcTWmCzk= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/awsproxy v0.83.0/go.mod h1:YXAQndCP4LY42rn22ELjjnTmqbsxS2yrVerAzsCisQg= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.83.0 h1:Kjh+MVsvlV/vmgwnN667ln57kQiz6ZfYxyk+IONS5lA= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.83.0/go.mod h1:CMhTUJ14v2P/roC9GrV7fdVS7MCtXwOjw+JprJrfFKM= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.83.0 h1:0mq2UIQlFGuaZANvXaYyMISx44Ft0OEaMR1ByOjhdDk= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.83.0/go.mod h1:ds5yO3Gq5QF5x/lT3zYbQoQCrXdBfTVhYpAV+xINUG8= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.83.0 h1:zrwghw5g2Ma5u+mGo/PyodymkAQiAcPLs4Quq4MAZoU= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.83.0/go.mod h1:WQ5L7YwarCjW6beHMZy+2g/+HfD9WQOooTiwoQcQR/g= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/sigv4authextension v0.83.0 h1:jM9FjwI22dpD+p/0In2FA8ZVlH9EWiH5gJw8dUMQ3Ik= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/sigv4authextension v0.83.0/go.mod h1:rNSAmQoUAr5c0KpbxBulgo52csvKoM+qKueLFC9ayvA= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/awsutil v0.83.0 h1:K2rND8mgBGWDqFsBwjKdphBTBjjbBkcw84lkDVvv/ls= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/awsutil v0.83.0/go.mod h1:0pvuvcSdAJ7RopqLmtg1/DPpZH+xazbMACKMVHalCoo= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/containerinsight v0.83.0 h1:d9Wvwut7FoNSxfXF5SJABNppu9xGxQnf+I1IuBnGIjM= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/containerinsight v0.83.0/go.mod h1:jxeFqzSwMrDmEGPyCGhCyHY+6J+fA/jXRoa4CEzZ/W8= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/cwlogs v0.83.0 h1:RDQ862swYY3oAqtQE5UEf3gVo2K6STG76CgBurJIY08= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/cwlogs v0.83.0/go.mod h1:u2XpS95ZgRXO8yrTjw6eX9IesTd5aNANP/x3jVIrqc8= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.83.0 h1:pBdqndULnylTtLYo9Zg3C29FNZNMniZXIj31jYfjcTI= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.83.0/go.mod h1:BsWcWAne29O8gfMOvkPR4+OTshTvqpg+3ERZS2hnFNQ= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/k8s v0.83.0 h1:WrSJTAoVnqL74klHYl+ylGcWzw5B/iuHsICDG4Wg+pg= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/k8s v0.83.0/go.mod h1:gGNH996LguY9CzDK7xyPqjCPJkfRKg8Y3WZA0jgnw00= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/metrics v0.83.0 h1:1QgMSYj3rKm3BftrCSDVIf+zXYxdv3NNCkI+/zePTQM= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/metrics v0.83.0/go.mod h1:WAScm+oitM87OWSy+pPAC6eCzg3xhYz3VBSef2+zV60= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/proxy v0.83.0 h1:ovGXkuMEqzGjDAU+CC7BeAsTL/qwmmOJhkWrv1AmA1U= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/proxy v0.83.0/go.mod h1:zhysB7B8qSgvKI+TvNqAELwdCQ7I26IzuFNAWC+tILg= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray v0.83.0 h1:PLzi6IkVCbnNc3dyrmBSBqEng87LWCNca2np21XI2hQ= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray v0.83.0/go.mod h1:WWKANqgoMjOyn8yKjF1oEhpss9bmfiyWH1+mkE/y9m4= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.83.0 h1:pzq9yVMgLzeo8ZsgtjDzsR+B/dt+Fc3FROAu9SV+Qu4= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.83.0/go.mod h1:6XJ+dV/QjwFJf55/2nnVfVqA+qwa8Y/iYR6taPJN7n0= github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.52.0/go.mod h1:CIX1MWy+2JYdeYhqjK89vrRpCGbz6LTLinp+SM8kZyo= github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.54.0/go.mod h1:CSe1wsnLhSAEgAEXLfi8wTyl0VwPVkq7925aV8xm0oM= github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.57.2/go.mod h1:xPchY5YNOL9jr6phVkJEvkEakMYr8HMD4uGYEoKXIek= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.82.0 h1:0b6glbENAwPdasKKVOgpR/EaZG1sJhsUfXCRiwZ0drU= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.82.0/go.mod h1:MKnM9GFqPz4HY4NQDDao+dIjZz4BvThAijuJuPC8NOI= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/docker v0.82.0 h1:sw413Qe/67o0L35OeJEeySjzSAvVbY0jwhSkgNdTWmE= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.82.0 h1:czYBWuiriQyD/4UI61U/eAogi7qnhk9AGreZez20t0Q= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.82.0/go.mod h1:tiYWtXrv4+T9L+mo5hdzMiKN25rg7sB2tRIHUqyhF5U= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.82.0 h1:5aL7mnIupfnfyO6izVxwdOArKbJaAzNqH/W//2ux0kE= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.82.0/go.mod h1:F9orevxYo4hFXEYEbhn85znnIUWv1y++MKkXHabZZ58= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8stest v0.82.0 h1:ENaPzCM8oHWzwzA1Fj6dl/1zGOh1UC9wb2f17jh45aA= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/kubelet v0.82.0 h1:ms9AyP7vpW4CtsPKibY2kp/+kWr4mtBeF3TsgC66vVk= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/kubelet v0.82.0/go.mod h1:IEmBxonukahtUZUZLsRuUeFNQCsqIrtyWEFWU/CKyU8= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.82.0 h1:EFEWFZNTCTM2UVItheh1f/rAoZcVVNhTLk4xevSSbUg= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.82.0/go.mod h1:xbuPPTg7fSGwlMaM9iEjPdob6MH7Nd6KzJ//Qd0xMFs= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.82.0 h1:0Hdh8jj6biwzWzdF21c6XYqVhx0YMKF2OVnWhusAoFs= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.82.0/go.mod h1:UEmb9zd7Jf9KKc6Tmrh7FXlh0IO9FzSumUWzmEW+ANs= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk v0.82.0 h1:0AqiqVGbSnwz6n8CYy+/r0dJz95rqif/ctFlUr44FLw= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk v0.82.0/go.mod h1:lFuju3wV7f/AZRVAyMYqA9XCtmQ66VuBR1XPC4mxVRg= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchperresourceattr v0.82.0 h1:a0WFk4cpk7HuYi3CcKo44z/gY88XN3EMJTq7gW6Nj4A= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchperresourceattr v0.82.0/go.mod h1:aZdFtT+ay8aNiySaQw41KQvtRteKj2Eym63Cc0PWElQ= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.82.0 h1:0ayceOkG4qqLiwgttI7eJYCxXUy2/LGC3IxADCRYjS0= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.82.0/go.mod h1:Y2To/bsz97PpnjTRSIj1IfDECx0hBpEnfH2DJXejimE= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experimentalmetricmetadata v0.82.0 h1:noxLHxoQqYt3WO3Z2HpUExyYG7l4fuqC0FyqRPYb+BY= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experimentalmetricmetadata v0.82.0/go.mod h1:umq1KOdkQa2+djdxtxHmLigyFtLVqM7QXGeP3/s3cHA= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.82.0 h1:2efL2SE/dndrTLPQcpFzrsIJpYw0i3bkFG0n40xnsQI= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.82.0/go.mod h1:tqP4R7pPk5M0v0j8nP5h2o1fUqofC2kSrirzkwQW7p0= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.82.0 h1:wBX6PvwO5mopN+uuVU1pyfl54OdrrRT+VPRCyl22O1A= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.82.0 h1:zSQ0EolsXY3F18kFwEpqAkLc5C2/DE0vbFS3QfMpsDc= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.82.0/go.mod h1:wbgo9BklRN8M4Mi+76mo9bMVQY2C5gL/rPKwePQL3l0= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.82.0 h1:3S5WRpygfai9pfgt66oE/ppOEziBlL4NhuIlhb2qXkk= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.82.0/go.mod h1:BJji1qFT5xqu4osJjgwWYg2wO+FKjE1R4zeSxTcY5W0= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.82.0 h1:NbSJ/XsjeyiKSjn/f3eNp3HF3eKeXLfJLZvjbpV+P+s= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.83.0 h1:rwaJhPhZf/4A0exUs4cjALn6jnFU2QAL5cSJ/FwzcsE= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.83.0/go.mod h1:DaoCKBiconWYmXeexLiQeVOWLWbo4N5vJdVs+9X4V+c= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/docker v0.83.0 h1:dFsDXpyWukqXZRLw1GwG6npvLaTXuOQbVJ/57IP9LrM= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.83.0 h1:s7sUZ3/LCX3Tr0C0FfTwE3X9GwTeTBskoMaLlqPv8Kw= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.83.0/go.mod h1:3bOPivfVZIaylsOk9yhWGNeobT0Pn25UZs4OnuC+azA= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.83.0 h1:KllNzprs78NOFj4pLDu3hVImiJNiG3v9Hti+mvMpOBA= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.83.0/go.mod h1:oOJY5unMZP3pPkcZl2VdBOiJzS+8xh4o9oq5NEglCXc= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8stest v0.83.0 h1:lzm0ei+Q+jToNok1HM1YPC5sH+2LuWTwf5NhCztCgdk= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/kubelet v0.83.0 h1:wuArO/CYSrTcpO4h/3vfLC0Li4vuVSeERxz4OOWTPh4= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/kubelet v0.83.0/go.mod h1:rWbscJhWZAr/o+1RAZAKuXl2sGowDcs9jbR/DYQvysg= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.83.0 h1:tNUrUu6UFAycjz6NtL8/Zz0LF/2UpYvW4PL5xIYtw0I= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.83.0/go.mod h1:3UHvuM+X9vt8DXR3t/lt8JdpYhkVleQqc2SzvXLcfy0= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.83.0 h1:5TbtbiKZMwDVf1sEpN14Y4wqwANRpzmGWR0yLB4fNKU= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.83.0/go.mod h1:tGB4+26zqjQRyHhxMWy/YZFOzWQwqm9OS1Q+jKHVabw= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk v0.83.0 h1:s/mJMnap31p/fM/DGj3GWkubQRmaHMjo0RwDz0iAjl4= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk v0.83.0/go.mod h1:J3PBJi93/0NuvJGCSugg0Dt38G/LqTrgL5t9cFl4Q/A= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchperresourceattr v0.83.0 h1:xvFXYVkdzUsIuoi8asZU2Hm59msjdFbnp9gxydplIQo= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchperresourceattr v0.83.0/go.mod h1:/BdkcVWo8+/yO//dRT30o/eNgKfu6EGtAby00vl0VPw= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.83.0 h1:/qrROLCRzwIVrDVnEo9BOAGLzRIN8eCUXCO76Wi00Bs= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.83.0/go.mod h1:RBkF+g8DRJs+k+JhIVAxnUW+a7vlCajpWNZQ2yD61sU= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experimentalmetricmetadata v0.83.0 h1:qhLG2aVClSG2RMO2ERDfoUFn0YhHv0VhRoYJGyNXyEM= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experimentalmetricmetadata v0.83.0/go.mod h1:IdqHQZWFAyMN5/yHz8oFts8AVj2osf2b6DTvc44IMLI= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.83.0 h1:6DE+t1L3OjJMouOo+Ss5w1W/IJkPNFFJDb8f534eFnc= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.83.0/go.mod h1:uL+EJyqlx798NEB4FuY+05IZY9aXSbw5f+2YFNvOA0A= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.83.0 h1:/6X4L5BrCyy5Uz/jnLwkiv9E6VfuJl9EFJztWtkWvps= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.83.0 h1:yEuD/gKHNVMwjT4mRZAeUVCe/VO2tqeb5+aFim5jQJs= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.83.0/go.mod h1:WoSzL9NAVwvEy1lMwM2utn68g0Khoxm591WliToPnDQ= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.83.0 h1:e2j+kbNwZ1AAfh2RexJz8a07E/36IUmGW9SID7JtK+g= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.83.0/go.mod h1:fxZoTFjoKeAwXk7fiFTLbB6ptMk8vgj97e3nVyE/fJw= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.83.0 h1:Ru7vas+V32Py0ld/jaKAAeCfCL1PqOuMmhG4gX/1GR0= github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.52.0/go.mod h1:tU7s/ki/QePSIZ7ExYGWLMb9erCAt66brQHBEHxr8HU= github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.54.0/go.mod h1:uWFgO68sLEqWrQxL8rws1CkB8EUNQaedP3FWI1gMJOU= github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.57.2/go.mod h1:4rFuWKMbzM9H39RbDvPtJfAp/fxsHydhJhKns6skmK0= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.82.0 h1:fKTXkXX+iMAAiTu4r1j1DbzKYvbd6CvFoWNWLhTOJjk= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.82.0/go.mod h1:1SM5fbDUmJHQUNO0T/lDzMVmGpn+z9UJHyjfGg6IQ0Q= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus v0.82.0 h1:XW0HJBOWJLpzDsMAOoLxFL4qMmD/qI4qTpGfAA9afgU= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus v0.82.0/go.mod h1:vX/Qm9YG+C4N3hZeLr/M1ndTqbOK37pPV7MZwhWY/S0= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.82.0 h1:kx5UQGy8/TiZRst78xblTHvIf3HBJLNKoXvx1GrUHPc= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.82.0/go.mod h1:A9Z3SrF0Ngir1kd5t7UzjpUFoy4mhZjpt3B+3d78/qo= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.82.0 h1:8HOH8p6iFidN1VsZewH+ePdZM+w/89dCS6dW24Enhng= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.82.0/go.mod h1:2FVfNpoH3v34K0Lx4s37n54U11ir9Wz8rAab8DaL6ho= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/signalfx v0.82.0 h1:0uuo5x+/RFtMhPqo7+CU/lYCmvpJ77fPLdpPxA92Kes= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/signalfx v0.82.0/go.mod h1:rfHBidHrLvlRi0E5NvQuljP3r4Uv7qtNTPWIFtt79z0= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.82.0 h1:NC8LNVLj2UXfPZoW4vpAf9+NWqw1vzwuSjdA2xRPIvA= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.82.0/go.mod h1:t2x45aFpeo5tc6oM2nNyKPLy5gBhT/R/uJNdPp7TBbc= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.82.0 h1:R+UjA60N26I1gGGzlcxp1IeEYxg9kCktiqle3vEFRBU= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.82.0/go.mod h1:VoKLInODf1ZwhLHiYWJe/2Gte8BeSlvRdlrM8gO7VTQ= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.82.0 h1:2j71XxgoPi+/WxQM9eYmo8rhrV9qqX8CP1ivNNaDrGE= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.82.0/go.mod h1:YoTWA0QuEunCmoGPhYibhD/AHrk6jaPSv0upcOgv7W4= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatorateprocessor v0.82.0 h1:iA9kPemK86SmIHlghMFHlrwDxX3oAmouAgZ03p5sLSo= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatorateprocessor v0.82.0/go.mod h1:Yvf9bKaGcHyKSnxsVVsIS1aeGotnlYRzTZuExzTNnSY= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.82.0 h1:h9FsCZ9ppDRL4cC3QeFSL8uh7auqFsa9BMLLeVx0qFs= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.82.0/go.mod h1:2e36/jV+bd1rPXUJmS1Jk5g3Mrs3EN1Hve/eZrwYEYo= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbytraceprocessor v0.82.0 h1:UZlfIF0j35lrIdLt3I2blKi2gknhS4B7FsygXB/PJIw= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbytraceprocessor v0.82.0/go.mod h1:hpAZugKUIeHZYS1QamT7kfxhy2qmh8j9F4krEmwlv1Q= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.82.0 h1:UQVbkb/xvXhL5TU3w63cIfrV6Lhtk/shtvQDZDVx75I= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.82.0/go.mod h1:efjxEAMpzeK27fnU9AxWFGDX7oJji6AYz/CqtvlIKHM= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricsgenerationprocessor v0.82.0 h1:QnVw/sc1XkHjIkpPzq8gGw8A7/0tS3KiBeacNZ6R27s= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricsgenerationprocessor v0.82.0/go.mod h1:aki1yK0PqmA4UvtbRRN45DQ2iuAoBZlPFXfkXr83avY= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor v0.82.0 h1:Ev7liq1TQyhDgcsfizXC/lO1lKWQX6sv7OAKVe679pI= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor v0.82.0/go.mod h1:WksfpqoJ0O5IKYn57Qs3uoQRjKEoidlG1Axy/VFDc2U= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.82.0 h1:FMfmm1qOrd/UTFydSaLdDq27dz5/Z6vk9lf9WhLB0YM= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.82.0/go.mod h1:UTtjfRbvrkSypo7gHWkOO6l3LjmWaf3JxjtCbnCuAAE= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.82.0 h1:ToEAh/x/66CfvlNdMzAa9hh0zzZOa2hneCdnDBj+U4U= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.82.0/go.mod h1:SvJsVQdfVYVJ0/uG7jzesLDj97ej/8Pkq88MOLD4Ko8= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.82.0 h1:iIzRlJaR5YPuRLjtbeSALwn0IxdgdbOwlO8DEUPkDz4= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.82.0/go.mod h1:0fxeFp/yAbCRAbcaPztO1j9JH3HYwItPonmEz6OXlJg= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanprocessor v0.82.0 h1:PTjUqRSExzDY8AS1RA+fDNRIyP6ilh/4kJRLi1Gwz6g= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanprocessor v0.82.0/go.mod h1:agArAIGzj1fcKeS2J4voljJ1a2WetcV8IIjK7X+i48U= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.82.0 h1:1HuCZhKP4QrB1U0NRx9HaG5qCHXaYAubFMVPlhSg6Kk= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.82.0/go.mod h1:GvmND1rGYv6k6fNHWyKbRi6yq8XlOTe1ymfER+BcDc0= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.82.0 h1:9/IOzBxXZpeUkoMFl9eXiLhNBbhumhjqdgnDwl0nO1E= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.82.0/go.mod h1:zXztXG/c1eWh6o5+HAvibDBl6B/FPPcKZHjoVvgvRdQ= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsecscontainermetricsreceiver v0.82.0 h1:Pbq7+HacFVFbUKnLshTcHrzn7olhEKG3PVeEuzsufKk= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsecscontainermetricsreceiver v0.82.0/go.mod h1:GyhYenUIr/aotZ5w3N7nOLiA3uF1+blq2pOQAbtrZws= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.82.0 h1:aZ7P5unH4YlZTEt+eh0ojkhtOj3xKqo8Ywat5+dHAzM= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.82.0/go.mod h1:ouGAw/O/qJWPen9L4BPeHXTUANRwafYyWU/yvk1GEdY= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/carbonreceiver v0.82.0 h1:zh2dxXaKJIbYzF1/5ggdMbGOdi2j8jCMFP14/zCX25M= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/carbonreceiver v0.82.0/go.mod h1:Uk/9MwFO9wVpoULrevcSlu61bHP2K9JSpNk1lbmyufM= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/datadogreceiver v0.82.0 h1:ytJRwUptGO/Ckb6JGBVHvLxYNR8NAbzZMVe3fY86FTs= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/datadogreceiver v0.82.0/go.mod h1:gXgWns0geX0gGRh/nSxCU0R0P6DFv6EkUxVeiWF1TCc= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/dockerstatsreceiver v0.82.0 h1:t7E/nglM5F9p7w2rfKFSFm0bkmjND2kq5oxIN+XhT1o= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver v0.82.0 h1:zYD9pYKlF7nLpwyrZ9Nvgj4K8TtdnMtX0PS2qSJvo8Y= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver v0.82.0 h1:J8jwjrVQU5l6EnsaDBY8qOsW32nrylhwlCuV5WbvZ2M= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.82.0 h1:JMJF6BiLMovfCW/+s+rTnndp7IY9qrVyZ7xZnZEtCtA= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.82.0/go.mod h1:UMwCyO7+q2f3S01fU40NYYXUNSWZy5X1RmnTtJR92fk= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.82.0 h1:ucTSzZJfzy4xav3bjGFZWAlf/rdxoIN620ZW+wW5u2Y= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.82.0/go.mod h1:jep8pXp8tTHpr4Fe3w6PH9vHGHo/m+g1MukPZx9mzHI= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver v0.82.0 h1:7HP4EowOkMf83QvDbxiR6SqdMuA205Abtx/AimZoo4M= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver v0.82.0/go.mod h1:9mneURe6HzE6/w0DRk/3elPa9kxbZsZjONaXfDtPjns= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.82.0 h1:oJ9H90C/qwP+3dSxEL+fAYXpyN/8GqNp6j0CODeI2yo= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.82.0/go.mod h1:LdHYaAdHsKYhVNOwGJS2Egu+Rrb4xWEerC2JNpC4wrs= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/sapmreceiver v0.82.0 h1:7iYtLgzW7l8emYmSUFMt5DlHh1qwmU5smyHS4S3kxNw= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/sapmreceiver v0.82.0/go.mod h1:tE3o9dhGQIBa71Go8nqRGjSLhNkIL9wsx15BOnTNGu0= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/signalfxreceiver v0.82.0 h1:NI6AUgoQlwxsWjCELaIqeINcQLZLpWPE3KaowtULoYw= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/signalfxreceiver v0.82.0/go.mod h1:LiZGzVtx+dvcOzeX7R7n+zAV1fBldjal1HwQjewRSiQ= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkhecreceiver v0.82.0 h1:IgwGzS1ftJjafCug0a38JHv/c7rKeAo1OMIsqt/WvE4= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkhecreceiver v0.82.0/go.mod h1:EfG/z06CjGdh0WHKi41ZKi+vo51bJsvx3nSELzFxwMI= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver v0.82.0 h1:iZtAbhXHFR9ID2ZttCbR9wvS2J3Ivi/9nJQj0/7YuCI= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver v0.82.0/go.mod h1:8DsPwUYp7YpJJALNimxVe0IXwtaRC3Aj79BWlmeG8N8= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.82.0 h1:DAuk2nc0eCgZIVdrZ8OPIJ2w343zLN3e98vS9D+Osk4= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.82.0/go.mod h1:8aOC0UNBTUuRt5Bmw77bxcFJYd1HXffvU8wlVo/1DXc= -github.com/open-telemetry/opentelemetry-collector-contrib/testbed v0.82.0 h1:0eyN0T0BtOwuV/5snelJnAfypwFBQ3SpfP1DwutcEEs= -github.com/open-telemetry/opentelemetry-collector-contrib/testbed v0.82.0/go.mod h1:wfY0OYweooPIY7a5z6fo7MnA5iBBAnlbmiKoZfB001c= -github.com/open-telemetry/opentelemetry-collector-contrib/testbed/mockdatareceivers/mockawsxrayreceiver v0.82.0 h1:igF7KmHVnawyrlJAXm8z0MocPSBoNAR+cTdVASCF678= -github.com/open-telemetry/opentelemetry-collector-contrib/testbed/mockdatareceivers/mockawsxrayreceiver v0.82.0/go.mod h1:820X+LYdWs+/1qr0B6oo48t48Yh+OmslnOEX5QwVhTE= -github.com/open-telemetry/opentelemetry-collector-contrib/testbed/mockdatasenders/mockdatadogagentexporter v0.82.0 h1:1HTD4z9YrYTXOz3Q4HaZ+oRwStXwXmHa4Kcjy3aw1Vw= -github.com/open-telemetry/opentelemetry-collector-contrib/testbed/mockdatasenders/mockdatadogagentexporter v0.82.0/go.mod h1:SMYYyAZ6gWwe1IMp0rmlX/JcR34/BQrx2mDF5NEJgbw= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.83.0 h1:fGK6FQaUCOFGywGRvsiDtjlfGLZGcgkmgZlIomdsr3Q= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.83.0/go.mod h1:wvJRWQDZGEeTvcfzNHII+niIHWMlO/fg6vSS3x7XYzE= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus v0.83.0 h1:4beUB/Hg6/2JSPcOf4vMXyvoSEoP092smr4Uft/zxDU= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus v0.83.0/go.mod h1:18nzlp10gqpvewhRFxlzn5TteLrkHUYXGimf8fAC4k0= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.83.0 h1:qEO5xxh8yRYiWhXiQhEcXxFV4RcO7lUkejfxBjXLPNo= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.83.0/go.mod h1:a8Jm3NY18nTt9mM8XJeWvwmiPxDKJQvbpYIlzA7H3Yc= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.83.0 h1:q5QvnGvi9JNh4vPmK3BuX/C7C8YWZetCrli3BxCAZKU= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.83.0/go.mod h1:dZGmQr+gcI6fUEs6sPxlUQyV/4j9KcL31YcsHL9OJ9k= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/signalfx v0.83.0 h1:asapsoRz2d/HRjT0tWfJsCqBe0qwFSw44P5VKYx2OGw= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/signalfx v0.83.0/go.mod h1:SQ+BuwNerr2p1jmezJDF4ruJ1WojRgqQinkyey6NaMg= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.83.0 h1:1FnX4XXS9IOC9h5nA8gQsPv21n0rbECjBqW/VfJ4c8o= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.83.0/go.mod h1:BK0l1fWX36sfssQie9lH46tzVzV/UTOcUPRKytLbb/I= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.83.0 h1:zre+RUseqdEblLwaE9FidObbtmx3Mm7tiwMC3Fejoy8= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.83.0/go.mod h1:QEy9JCi68LCoxfwPn2v1byOf3KxSe3bJ/p3jgNzpsOY= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.83.0 h1:cnmSn6LVIIkBRfaSuBQNNphLXonFr5qD0+bCHgvAFws= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.83.0/go.mod h1:bHhc2kT0wki+U0boNf5E0bmDbPDaI6AyzdXh/zvtFrE= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatorateprocessor v0.83.0 h1:de91BCQwWkwZ+Tp7OsCpkrrc7vBRdzPQhDE5ENcxPMI= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatorateprocessor v0.83.0/go.mod h1:7NGof68ffjxgOoN7NE0TSMNAmGXxpxVmw6p9KGvxerU= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.83.0 h1:Ul48EqO0xeSn57H8jYK2GNQzJfF7W9l1nQemlfUGYEs= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.83.0/go.mod h1:ZIMmKTtWzTrpj4z1+yBu7tbzSHi4PbtyLNAWq+AwmlU= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbytraceprocessor v0.83.0 h1:g2YnCEJ4SUQG0m7yqDpSPOYsfPe74M1ivy2JYqWUCWY= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbytraceprocessor v0.83.0/go.mod h1:vtc7IGVX6m4tYkNs0KLdEA7HpuRgxRhHqJiFBLyWdyc= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.83.0 h1:hv92fwXj9f834zUGLoyxtF07zpEM34yDL76OAwVTdqY= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.83.0/go.mod h1:3wxQn8Bp5c19UgSvGx+32/xVLsOZMjNaTU3g3n6t6Cc= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricsgenerationprocessor v0.83.0 h1:88XaF5XSbe7b9J77yvr/joO+pT7ZVpmm5GoHjI5/Wes= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricsgenerationprocessor v0.83.0/go.mod h1:14tnFp/J3PFriWAuvi3Jf5hOkc/p4zhfZORbNDzoHH0= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor v0.83.0 h1:rfTAZuRV54Qb/Aah8sdnpF5Z9qmtotyR1L8Z79R/QYU= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor v0.83.0/go.mod h1:C3P6u4x0CwtLgTVdeAvSR9m1CPjHeJRjklfaTvQ7JRU= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.83.0 h1:3S7vb8bttl9eC3qQRi5Rm4q01taM+ue8loomADB0qSA= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.83.0/go.mod h1:Y9TezTRfqJ3cOTsjWxe7rgDKZU71uU28mJ10GKYBK5g= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.83.0 h1:TNoRtrLHLk5Xr8JZwxiHxnpy0+UYnDHl7SPtZIoTC0E= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.83.0/go.mod h1:KDZS6tI2Q8aZ7TCJzZuqEbrwDJPBAaSQIbeXsO6Nvvs= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.83.0 h1:Jpa2cCa1CxWgc6kUP6fTfYvqRZzpNcMDFyzjzoXzWPA= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.83.0/go.mod h1:a7EPC0J3PIU+x3BZSDKxhB7cxeJjf6mgy6nmdhGGQ8M= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanprocessor v0.83.0 h1:mchNw/c+HM4jW22fFhHRjYX7pyk4t7eiG/QFhoUv020= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanprocessor v0.83.0/go.mod h1:yJJ/GpM8NeloEgznZyGTH8neThCL4KzQd8gsI+eJDxM= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.83.0 h1:4D72ZwITvSbNchy/LAISnFlCjGDZYblPmaqn3LKFKiE= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.83.0/go.mod h1:HP+yAae+1agolvpTo2GRT3BXgPDt1NaYfm5PWtPrI7I= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.83.0 h1:pJTvexBqzytA/I5Djq8MXsKlqyT+wZ3ZmZLqkBX0GTI= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.83.0/go.mod h1:Z5rds2ALV45/OFitkS3ayHhe1u2Lxo3+nRQ9mFwMaxA= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsecscontainermetricsreceiver v0.83.0 h1:wANd57Lpv/ueBpmqVzusAMWyXy+y5ko09QwZxGZw5+Y= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsecscontainermetricsreceiver v0.83.0/go.mod h1:RT4UNOaHyOXGMEq1Azp6Bnm5qUHpx6BT0Ih8o0n9GP8= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.83.0 h1:NKNHzx3qvSl5G5drW90XeiDLNZL0Qmgf3j5EdPv36UE= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.83.0/go.mod h1:yDRLZz1bwaTXmyTo5b317TN7a5oHkHMuio19nyJdK90= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/carbonreceiver v0.83.0 h1:6gICrLbGa3rkGMT7J56W8acDAWefodbQrcs53/tf8UQ= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/carbonreceiver v0.83.0/go.mod h1:tB9vMhTdZ5g/HXOoM1nUVzA/f+SHtA5XZxBKXzOR7SM= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/datadogreceiver v0.83.0 h1:vxkRZ0TsEv6EPd6sgx5NjP3zEezr9nVOumzNkoAjG88= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/datadogreceiver v0.83.0/go.mod h1:RPDOaKR5l+NTaYQN4gB6mdsH8r06qBBes1bmH3cQDmY= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/dockerstatsreceiver v0.83.0 h1:3mtM33u9B2CqZmG5ryP349HvaDYQs8useSJIA441Vgs= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver v0.83.0 h1:TNBKlerQbOS5cTMNxn/pfxxE/PpYLajPaJYQ8wiHrIw= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver v0.83.0 h1:6y1yhypoixL19d4PpmNlPbDt9zhs3btg54RQiC6D7lg= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.83.0 h1:wu0ZdOTDqjl0Ug6J7rAO43GIe7nW/hvPXZ/Kcs7Hmis= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.83.0/go.mod h1:qhWK3ADYGvYaS54XbaH6yK8/iPGQLtimdtIcXevTLkw= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.83.0 h1:rVuP6uUEXa/z8+D6N/Z9ni4czrFEjs7hsWe+DozKpqE= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.83.0/go.mod h1:7sh5S7LJEtuknmYyRzQoWlqbYerr5OvViF/AdkGUsOQ= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver v0.83.0 h1:o1+JwweAu7t1bUHFJeZouEZSMv+2BCVLKc0HJ4ZujSg= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver v0.83.0/go.mod h1:eMWB47gXK1J4odbvapm9CyWezoNbbSQkgGKwmPGyNhI= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.83.0 h1:WaisWqr64HIHo0Gj+FJO2YzjRAwXpq29HRaMbstnXeg= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.83.0/go.mod h1:3Q0ttKtPcn9wY7VvT3P0E6Y6FMDA+yOu64xbr4bqHrM= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/sapmreceiver v0.83.0 h1:v6T2SchOCJ3fCQp78PjHMLsLh72n4Z120dg4DrrgpFY= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/sapmreceiver v0.83.0/go.mod h1:98rhYX2mqdrhpVWq15I8QjQclddY6KNpGiSkfxcJZyo= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/signalfxreceiver v0.83.0 h1:RTzQ4V4M1MjA4SQVGhZLqcwwfUCw5c8iQ+J6pDnSFhs= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/signalfxreceiver v0.83.0/go.mod h1:oKch7fKCrlP+JEEzhuk4mAHHm1KWulPQl9DzkwcZjgo= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkhecreceiver v0.83.0 h1:OqnxnU8Ny5lqjGhkmhJpx+P5GoFI41djKkQn39jNrCY= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkhecreceiver v0.83.0/go.mod h1:T1jbxVNo0XY2nA/3aMwoFHChra1GocCBxUuB5BZMarw= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver v0.83.0 h1:7MpD62lDNplm6ONFQTQB+Gkb+slo2MzCkpxurldcJAk= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver v0.83.0/go.mod h1:MHvgdYigNKOZmiO6N7Fqa1ZerEyIlLL7cCydn3n9R+c= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.83.0 h1:IJdOOFBDG2Z16utgTc9DnLBc3Q0eiRUgudBjlh85x7o= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.83.0/go.mod h1:1z7uIIoZgcqjYfDefFnjfO6frg3t2UorVaxB1DzcHlA= +github.com/open-telemetry/opentelemetry-collector-contrib/testbed v0.83.0 h1:TWe53fopoUzC0MAMYMVDVjap0jiycAjy4eXex+//ceg= +github.com/open-telemetry/opentelemetry-collector-contrib/testbed v0.83.0/go.mod h1:xxEPO/WInd7ETjubvoOgOJweTBuXPPo+x6wBvupEFOY= +github.com/open-telemetry/opentelemetry-collector-contrib/testbed/mockdatareceivers/mockawsxrayreceiver v0.83.0 h1:H98Gr1SIpNp/8m9K3vQPrff868rHRivaeet4Cry5kDE= +github.com/open-telemetry/opentelemetry-collector-contrib/testbed/mockdatareceivers/mockawsxrayreceiver v0.83.0/go.mod h1:EWglPxyAOHEa0/pXNmM1Q6nj8UP7fSTbpmVIe4yiFkI= +github.com/open-telemetry/opentelemetry-collector-contrib/testbed/mockdatasenders/mockdatadogagentexporter v0.83.0 h1:ssskmcYlu4RR6Zs+RLQbFAC68YDGrnKwBBMIzDdRVJI= +github.com/open-telemetry/opentelemetry-collector-contrib/testbed/mockdatasenders/mockdatadogagentexporter v0.83.0/go.mod h1:eRkz1YWkpLT/rwchXgzKlkymV8xMbHwqoCiTQiMrZrk= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.1.0-rc2 h1:2zx/Stx4Wc5pIPDvIxHXvXtQFW/7XWJGmnM7r3wg034= -github.com/opencontainers/image-spec v1.1.0-rc2/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ= +github.com/opencontainers/image-spec v1.1.0-rc4 h1:oOxKUJWnFC4YGHCCMNql1x4YaDfYBTS5Y4x/Cgeo1E0= +github.com/opencontainers/image-spec v1.1.0-rc4/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= github.com/opencontainers/runc v1.1.4/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= github.com/opencontainers/runc v1.1.8 h1:zICRlc+C1XzivLc3nzE+cbJV4LIi8tib6YG0MqC6OqA= github.com/opencontainers/runc v1.1.8/go.mod h1:CbUumNnWCuTGFukNXahoo/RFBZvDAgRh/smNYNOhA50= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.3-0.20220909204839-494a5a6aca78 h1:R5M2qXZiK/mWPMT4VldCOiSL9HIAMuxQZWdG0CSM5+4= github.com/opencontainers/runtime-spec v1.0.3-0.20220909204839-494a5a6aca78/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.1.0-rc.3 h1:l04uafi6kxByhbxev7OWiuUv0LZxEsYUfDWZ6bztAuU= +github.com/opencontainers/runtime-spec v1.1.0-rc.3/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/selinux v1.10.0 h1:rAiKF8hTcgLI3w0DHm6i0ylVVcOrlgR1kK99DRLDhyU= github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= github.com/openshift/api v0.0.0-20180801171038-322a19404e37 h1:05irGU4HK4IauGGDbsk+ZHrm1wOzMLYjMlfaiqMrBYc= @@ -1493,14 +1491,14 @@ github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJ github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.5/go.mod h1:KpXfKdgRDnnhsxw4pNIH9Md5lyFqKUa4YDFlwRYAMyE= -github.com/openzipkin/zipkin-go v0.4.1 h1:kNd/ST2yLLWhaWrkgchya40TJabe8Hioj9udfPcEO5A= -github.com/openzipkin/zipkin-go v0.4.1/go.mod h1:qY0VqDSN1pOBN94dBc6w2GJlWLiovAyg7Qt6/I9HecM= +github.com/openzipkin/zipkin-go v0.4.2 h1:zjqfqHjUpPmB3c1GlCvvgsM1G4LkvqQbBDueDOCg/jA= +github.com/openzipkin/zipkin-go v0.4.2/go.mod h1:ZeVkFjuuBiSy13y8vpSDCjMi9GoI3hPpCJSBx/EYFhY= github.com/ory/go-acc v0.2.6/go.mod h1:4Kb/UnPcT8qRAk3IAxta+hvVapdxTLWtrr7bFLlEgpw= github.com/ory/viper v1.7.5/go.mod h1:ypOuyJmEUb3oENywQZRgeAMwqgOyDqwboO1tj3DjTaM= github.com/outcaste-io/ristretto v0.2.1 h1:KCItuNIGJZcursqHr3ghO7fc5ddZLEHspL9UR0cQM64= github.com/outcaste-io/ristretto v0.2.1/go.mod h1:W8HywhmtlopSB1jeMg3JtdIhf+DYkLAr0VN/s4+MHac= -github.com/ovh/go-ovh v1.3.0 h1:mvZaddk4E4kLcXhzb+cxBsMPYp2pHqiQpWYkInsuZPQ= -github.com/ovh/go-ovh v1.3.0/go.mod h1:AxitLZ5HBRPyUd+Zl60Ajaag+rNTdVXWIkzfrVuTXWA= +github.com/ovh/go-ovh v1.4.1 h1:VBGa5wMyQtTP7Zb+w97zRCh9sLtM/2YKRyy+MEJmWaM= +github.com/ovh/go-ovh v1.4.1/go.mod h1:6bL6pPyUT7tBfI0pqOegJgRjgjuO+mOo+MyXd1EEC0M= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= @@ -1609,8 +1607,8 @@ github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1 github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= -github.com/prometheus/prometheus v0.43.1 h1:Z/Z0S0CoPUVtUnHGokFksWMssSw2Y1Ir9NnWS1pPWU0= -github.com/prometheus/prometheus v0.43.1/go.mod h1:2BA14LgBeqlPuzObSEbh+Y+JwLH2GcqDlJKbF2sA6FM= +github.com/prometheus/prometheus v0.44.0 h1:sgn8Fdx+uE5tHQn0/622swlk2XnIj6udoZCnbVjHIgc= +github.com/prometheus/prometheus v0.44.0/go.mod h1:aPsmIK3py5XammeTguyqTmuqzX/jeCdyOWWobLHNKQg= github.com/prometheus/statsd_exporter v0.21.0/go.mod h1:rbT83sZq2V+p73lHhPZfMc3MLCHmSHelCh9hSGYNLTQ= github.com/prometheus/statsd_exporter v0.22.7 h1:7Pji/i2GuhK6Lu7DHrtTkFmNBCudCPT1pX2CziuyQR0= github.com/prometheus/statsd_exporter v0.22.7/go.mod h1:N/TevpjkIh9ccs6nuzY3jQn9dFqnUakOjnEuMPJJJnI= @@ -1653,16 +1651,15 @@ github.com/sagikazarmark/crypt v0.6.0/go.mod h1:U8+INwJo3nBv1m6A/8OBXAq7Jnpspk5A github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/sanity-io/litter v1.2.0/go.mod h1:JF6pZUFgu2Q0sBZ+HSV35P8TVPI1TTzEwyu9FXAw2W4= github.com/sanposhiho/wastedassign v0.1.3/go.mod h1:LGpq5Hsv74QaqM47WtIsRSF/ik9kqk07kchgv66tLVE= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.14 h1:yFl3jyaSVLNYXlnNYM5z2pagEk1dYQhfr1p20T1NyKY= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.14/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.15 h1:Y7xOFbD+3jaPw+VN7lkakNJ/pa+ZSQVFp1ONtJaBxns= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.15/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= github.com/schollz/progressbar/v2 v2.13.2/go.mod h1:6YZjqdthH6SCZKv2rqGryrxPtfmRB/DWZxSMfCXPyD8= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646 h1:RpforrEYXWkmGwJHIGnLZ3tTWStkjVVstwzNGqxX2Ds= github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= -github.com/secure-systems-lab/go-securesystemslib v0.3.1/go.mod h1:o8hhjkbNl2gOamKUA/eNW3xUrntHT9L4W89W1nfj43U= -github.com/secure-systems-lab/go-securesystemslib v0.5.0 h1:oTiNu0QnulMQgN/hLK124wJD/r2f9ZhIUuKIeBsCBT8= -github.com/secure-systems-lab/go-securesystemslib v0.5.0/go.mod h1:uoCqUC0Ap7jrBSEanxT+SdACYJTVplRXWLkGMuDjXqk= +github.com/secure-systems-lab/go-securesystemslib v0.7.0 h1:OwvJ5jQf9LnIAS83waAjPbcMsODrTQUpJ02eNLUoxBg= +github.com/secure-systems-lab/go-securesystemslib v0.7.0/go.mod h1:/2gYnlnHVQ6xeGtfIqFy7Do03K4cdCY0A/GlJLDKLHI= github.com/securego/gosec v0.0.0-20200203094520-d13bb6d2420c/go.mod h1:gp0gaHj0WlmPh9BdsTmo1aq6C27yIPWdxCKGFGdVKBE= github.com/securego/gosec/v2 v2.6.1/go.mod h1:I76p3NTHBXsGhybUW+cEQ692q2Vp+A0Z6ZLzDIZy+Ao= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= @@ -1673,8 +1670,8 @@ github.com/shirou/gopsutil/v3 v3.22.2/go.mod h1:WapW1AOOPlHyXr+yOyw3uYx36enocrtS github.com/shirou/gopsutil/v3 v3.22.4/go.mod h1:D01hZJ4pVHPpCTZ3m3T2+wDF2YAGfd+H4ifUguaQzHM= github.com/shirou/gopsutil/v3 v3.22.5/go.mod h1:so9G9VzeHt/hsd0YwqprnjHnfARAUktauykSbr+y2gA= github.com/shirou/gopsutil/v3 v3.22.6/go.mod h1:EdIubSnZhbAvBS1yJ7Xi+AShB/hxwLHOMz4MCYz7yMs= -github.com/shirou/gopsutil/v3 v3.23.6 h1:5y46WPI9QBKBbK7EEccUPNXpJpNrvPuTD0O2zHEHT08= -github.com/shirou/gopsutil/v3 v3.23.6/go.mod h1:j7QX50DrXYggrpN30W0Mo+I4/8U2UUIQrnrhqUeWrAU= +github.com/shirou/gopsutil/v3 v3.23.7 h1:C+fHO8hfIppoJ1WdsVm1RoI0RwXoNdfTK7yWXV0wVj4= +github.com/shirou/gopsutil/v3 v3.23.7/go.mod h1:c4gnmoRC0hQuaLqvxnx1//VXQ0Ms/X9UnJF8pddY5z4= github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= @@ -1777,6 +1774,7 @@ github.com/stretchr/testify v1.7.4/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stvp/go-udp-testing v0.0.0-20201019212854-469649b16807/go.mod h1:7jxmlfBCDBXRzr0eAQJ48XC1hBu1np4CS5+cHEYfwpc= @@ -1785,7 +1783,6 @@ github.com/subosito/gotenv v1.3.0/go.mod h1:YzJjq/33h7nrwdY+iHMhEOEEbW0ovIz0tB6t github.com/subosito/gotenv v1.4.1 h1:jyEFiXpy21Wm81FBN71l9VoMMV8H8jG+qIK3GCpY6Qs= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM= github.com/tetafro/godot v1.4.4/go.mod h1:FVDd4JuKliW3UgjswZfJfHq4vAx0bD/Jd5brJjGeaz4= github.com/tidwall/gjson v1.10.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= @@ -1919,50 +1916,50 @@ go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/collector v0.52.0/go.mod h1:a9GvaOhyc0nVOUzqvdv5mxyWghCSso/WRO2GgRl4I1g= go.opentelemetry.io/collector v0.54.0/go.mod h1:FgNzyfb4sAGb5cqusB5znETJ8Pz4OQUBGbOeGIZ2rlQ= go.opentelemetry.io/collector v0.57.2/go.mod h1:9TwWyMRhbFNzaaGLtm/6poWNDJw+etvQMS6Fy+8/8Xs= -go.opentelemetry.io/collector v0.82.0 h1:MaKqWT0R4GCdkZDhYWOQkLfoJj9V7GsMbk1gsAuogaw= -go.opentelemetry.io/collector v0.82.0/go.mod h1:PMmDJkZzC1xpcViHlwMMEVeAnRRl3HYy3nXgD8KJwG0= -go.opentelemetry.io/collector/component v0.82.0 h1:ID9nOGKBf5G0avhuYQlTzmwAyIMvh9B+tlckLE/4qw4= -go.opentelemetry.io/collector/component v0.82.0/go.mod h1:jSdGG4L1Ger6ob6lWpr8jmKC2qqC+XZ/gOgu7GUA5xs= -go.opentelemetry.io/collector/config/configauth v0.82.0 h1:H5xrWyPMotSqajiiH/bay8bpVsT4aq6Vih4OuArXv4Q= -go.opentelemetry.io/collector/config/configauth v0.82.0/go.mod h1:P0ukmBIUk+HP0O7yfUOKRmPmffneAQgmEL9/iTOo1CU= -go.opentelemetry.io/collector/config/configcompression v0.82.0 h1:M6a7eiHdBUB8mIioDhWugJfNm7Sw85cvv/OXyTDhtY0= -go.opentelemetry.io/collector/config/configcompression v0.82.0/go.mod h1:xhHm1sEH7BTECAJo1xn64NMxeIvZGKdVGdSKUUc+YuM= -go.opentelemetry.io/collector/config/configgrpc v0.82.0 h1:taZWDbtVBm0OOcgnfpVA1X43pmU2oNhj39B2uV3COQk= -go.opentelemetry.io/collector/config/configgrpc v0.82.0/go.mod h1:NHXHRI40Q7TT/d38DKT30B7DOrVUkj7anEFOD59R9o8= -go.opentelemetry.io/collector/config/confighttp v0.82.0 h1:2LhyqVTd+Bsr8SgsCq6+q731F81uddK9GwvGhwD/Co0= -go.opentelemetry.io/collector/config/confighttp v0.82.0/go.mod h1:OHGx/aJqGJ9z2jaBXvaylwkAuiUwikg1/n+RRDpsfOo= -go.opentelemetry.io/collector/config/confignet v0.82.0 h1:zN9JaFTn7Dth3u5ot6KZJcBZACTEzGqFWYyO5qAlYfo= -go.opentelemetry.io/collector/config/confignet v0.82.0/go.mod h1:unOg7BZvpt6T5xsf+LyeOQvUhD8ld/2AbfOsmUZ/bPM= -go.opentelemetry.io/collector/config/configopaque v0.82.0 h1:0Ma63QTr4AkODzEABZHtgiU5Dig8SItpHOuB28UnVSw= -go.opentelemetry.io/collector/config/configopaque v0.82.0/go.mod h1:pM1oy6gasukw3H6jAvc9Q9OtFaaY2IbfeuwCPAjOgXc= -go.opentelemetry.io/collector/config/configtelemetry v0.82.0 h1:Zln2K4S5gBDcOpBNIzM0cZS5P6cohEYstHngVvIbGBY= -go.opentelemetry.io/collector/config/configtelemetry v0.82.0/go.mod h1:KEYQRiYJdx38iZkvcLKBZWH9fK4NeafxBwGRrRKMgyA= -go.opentelemetry.io/collector/config/configtls v0.82.0 h1:eE/8muTszLlviOGLy5N08BaXLCcYqDW3mKIoKyDDa8o= -go.opentelemetry.io/collector/config/configtls v0.82.0/go.mod h1:unBTmL1bdpkp9mYEDz7N+Ln4yEwh7Ug74I1HgZMplCk= -go.opentelemetry.io/collector/config/internal v0.82.0 h1:JnnDARkXrC3OJDsMfQkBgfI0Np4s+18zvoDqZ4OH0+I= -go.opentelemetry.io/collector/config/internal v0.82.0/go.mod h1:RKcLV1gQxhgwx+6rlPYsvGMq1RZNne3UeOUZkHxJnIg= -go.opentelemetry.io/collector/confmap v0.82.0 h1:s1Rd8jz21DGlLJfED0Py9VaEq2qPWmWwWy5MriDCX+4= -go.opentelemetry.io/collector/confmap v0.82.0/go.mod h1:IS/PoUYHETtxV6+fJammTkCxxa4LEwK2u4Cx/bVCH/s= -go.opentelemetry.io/collector/connector v0.82.0 h1:sCzfcROg0IbmmwoAeLzVfcAs1ZpwlA+UzLzc3xRjOr4= -go.opentelemetry.io/collector/connector v0.82.0/go.mod h1:yXr1degja36+aAdY3qOv66jCXHs5QjiIeoerygLYC44= -go.opentelemetry.io/collector/consumer v0.82.0 h1:vZecylW6bpaphetSTjCLgwXLxSYQ6oe/kzwkx4iF5oE= -go.opentelemetry.io/collector/consumer v0.82.0/go.mod h1:qrhd0i0Gp0RkihcEXb+7Rb584Kal2NmGH1eA4Zg6puA= -go.opentelemetry.io/collector/exporter v0.82.0 h1:BWsx4rWfVwlV+qNuevSMm+2Cv6uGZYYZ9CEFqq0q+F4= -go.opentelemetry.io/collector/exporter v0.82.0/go.mod h1:e3VPpLYVNRaF+G2HuKw6A5hTBMYZ4tgRYYzMusfwFJE= -go.opentelemetry.io/collector/exporter/loggingexporter v0.82.0 h1:HlgFz6qqpjqk9ZmGbaLSdUJxOo6Q3jo3PiJHcuugpaA= -go.opentelemetry.io/collector/exporter/loggingexporter v0.82.0/go.mod h1:jMMN2fKXx+RKDI3tpqIym5HK6uZnJ3X22hyFgK24cK4= -go.opentelemetry.io/collector/exporter/otlpexporter v0.82.0 h1:tYCEUQpfyuS/NgrWg9Ulps6f0ffPSCBRTBdK6sXnSaw= -go.opentelemetry.io/collector/exporter/otlpexporter v0.82.0/go.mod h1:CGeXJuRYxrzTtJUHlpLPHirzcmGq5qbcPff0ec+If14= -go.opentelemetry.io/collector/exporter/otlphttpexporter v0.82.0 h1:GdnfmEgOY3/GHFereYRcfr8RcDTR0vlK9a3Qtyr0jCg= -go.opentelemetry.io/collector/exporter/otlphttpexporter v0.82.0/go.mod h1:1a6is4De7GYERjFOa1K9dPbhRwsip5Zj7jt96taViY8= -go.opentelemetry.io/collector/extension v0.82.0 h1:DH4tqrTOz0HmGDJ6FT/jRD2woQf3ugqC6QqSiQdH3wg= -go.opentelemetry.io/collector/extension v0.82.0/go.mod h1:n7d0XTh7fdyorZWTc+gLpJh78FS7GjRqIjUiW1xdhe0= -go.opentelemetry.io/collector/extension/auth v0.82.0 h1:iaxwFslRj6mfzs1wVzbnj+gDU2G98IeXW4tcrq78p5s= -go.opentelemetry.io/collector/extension/auth v0.82.0/go.mod h1:O1xBcb06pKD8g3FadLDvMa1xKZwPGdHQp4CI8vW3RCM= -go.opentelemetry.io/collector/extension/ballastextension v0.82.0 h1:GiNzI6Z3iX9DQwJ/fI44o3yWDtecfgAgxs5C8kptP0Q= -go.opentelemetry.io/collector/extension/ballastextension v0.82.0/go.mod h1:s15/A21hPRjlXH7EelcHlvW2g7A8tEVfReO2T6Wz+C4= -go.opentelemetry.io/collector/extension/zpagesextension v0.82.0 h1:rZN8OxNy+YBjaDXYGnFoGRPBDruET1lxjVL8hzzgH5k= -go.opentelemetry.io/collector/extension/zpagesextension v0.82.0/go.mod h1:mUJk+sX47AdkdASvXu26cK/NXOh+5j+TtEdxJA6K+W4= +go.opentelemetry.io/collector v0.83.0 h1:rKFch1CANepajPwBTvzYj/hKz7RsMyUkPPPNjRCpJ/I= +go.opentelemetry.io/collector v0.83.0/go.mod h1:MNN79VDXXaRP2ZqcDVOfWH0Jl8BbcMttJ3SY/pU6vxo= +go.opentelemetry.io/collector/component v0.83.0 h1:7bMbOHQezVx9RhSLu9KQRBhjXmO+CbOVhBk5uySb0fY= +go.opentelemetry.io/collector/component v0.83.0/go.mod h1:Qy2mIP32UKN1x8rsjJbkgB9obAVu4hRusc1wKNFeV+o= +go.opentelemetry.io/collector/config/configauth v0.83.0 h1:caIkUszP+kTRVx9HW6z7x05CMnzlaBoP2BKyWDIr2ag= +go.opentelemetry.io/collector/config/configauth v0.83.0/go.mod h1:PqSIaQryPWiPVMuxlNPEvTpuvhdRq8ySN9nKlk3YbH4= +go.opentelemetry.io/collector/config/configcompression v0.83.0 h1:WwGfHyGey8JSUsBGUmRHaOzwllrLmsjjo5SZCYfSP14= +go.opentelemetry.io/collector/config/configcompression v0.83.0/go.mod h1:Mi1/3D+qNlRohrVMbBOj6XSHo7YKAKbgWYisNW2Qobc= +go.opentelemetry.io/collector/config/configgrpc v0.83.0 h1:bmX6M/L0+gtBSqAvPGh2cV8c4htNFfxa/9ZT8FreOHE= +go.opentelemetry.io/collector/config/configgrpc v0.83.0/go.mod h1:VpRhSIukmgVjx0HISN5r+y6EYQNGDYLU8j8hVUlcMjc= +go.opentelemetry.io/collector/config/confighttp v0.83.0 h1:yBra00XanzqXL0kLs3Aaas7RLoL50bM/Za8223vwJik= +go.opentelemetry.io/collector/config/confighttp v0.83.0/go.mod h1:Eu2WVZa8cy3F8mlxXIFPgzeAeLnaVc5UZzcEtufrOWs= +go.opentelemetry.io/collector/config/confignet v0.83.0 h1:xaQkMXvId8y7o6ke2qVRZZDqNc315CGkIcZ6LSVxDE0= +go.opentelemetry.io/collector/config/confignet v0.83.0/go.mod h1:I0iJQDhns1GgXBIumB64WHLPMmJpNdDaEDHQnmaaqsU= +go.opentelemetry.io/collector/config/configopaque v0.83.0 h1:nhYguW1zVFnQlaZWhwbXJS4/+WEPdQSEL8kTF/j/zeI= +go.opentelemetry.io/collector/config/configopaque v0.83.0/go.mod h1:Ga1x7xLQXWmwxfW1pPqjI4qT+eNxf9wu2/Mx7O2u01U= +go.opentelemetry.io/collector/config/configtelemetry v0.83.0 h1:Dx+POy68CFsec9JDYd7cxQPULLfSOAG8ma5Jl3ZZ3+Y= +go.opentelemetry.io/collector/config/configtelemetry v0.83.0/go.mod h1:8wZuTKLdcWwdB82Jd07TOHsHKuv8l47T+MUGEsPe4z4= +go.opentelemetry.io/collector/config/configtls v0.83.0 h1:qeAqwvw7qs3fY8wVZzN54E+SNMES7YdATY0ASEbJlUw= +go.opentelemetry.io/collector/config/configtls v0.83.0/go.mod h1:YMf+YSUhPB/LD5pZSyb3wRi7x6vbiMbONXOWFQnJnZ4= +go.opentelemetry.io/collector/config/internal v0.83.0 h1:yQZegCOPl4dWUVkr/fscVFU/AjANT5+Tu5XpKztTTSA= +go.opentelemetry.io/collector/config/internal v0.83.0/go.mod h1:BQs+X52s4BCIshht8qgbT4dqCM5YM2h6RQWln6zWhRA= +go.opentelemetry.io/collector/confmap v0.83.0 h1:eUaiFdhTLkFdNpMi5FLSHSQ6X2FcEHe0KfEUt9ZtVlI= +go.opentelemetry.io/collector/confmap v0.83.0/go.mod h1:ZsmLyJ+4VeO+qz5o1RKadRoY4Db+d8PYwiLCJ3Z5Et8= +go.opentelemetry.io/collector/connector v0.83.0 h1:wUxpqBTjJ9WSgZnosU26ALGYtwPVzEQxQk7w+mKSOs4= +go.opentelemetry.io/collector/connector v0.83.0/go.mod h1:o9xnA7C+2JzYQMSyWIJz/28tc3lGs3JkKLpckpAdzB8= +go.opentelemetry.io/collector/consumer v0.83.0 h1:8wg0UfFxxaGYsTkQGWuf1pE7C/dTvPkkYmBtR6N5BKc= +go.opentelemetry.io/collector/consumer v0.83.0/go.mod h1:YLbmTqvgIOYUlEeWun8wQ4RZ0HaYjsABWKw7nwU9F3c= +go.opentelemetry.io/collector/exporter v0.83.0 h1:1MPrMaCFvEvl291pAE0hTgPb7YybjSak9O5akzXqnXs= +go.opentelemetry.io/collector/exporter v0.83.0/go.mod h1:5XIrrkfRI7Ndt5FnH0CC6It0VxTHRviGv/I350EWGBs= +go.opentelemetry.io/collector/exporter/loggingexporter v0.83.0 h1:1k0zCEqUfNhWYw8X9zuQ4LNU4o5qwG6f1J3+P8lpe+E= +go.opentelemetry.io/collector/exporter/loggingexporter v0.83.0/go.mod h1:/Sa1r32rwJpBRHSzWclacQlyr6BG/uRuaXqi/CmPvb0= +go.opentelemetry.io/collector/exporter/otlpexporter v0.83.0 h1:k5bJVlXJCJGraslJtOcQPELbRE3gB5MCzzvYurp5aF4= +go.opentelemetry.io/collector/exporter/otlpexporter v0.83.0/go.mod h1:MIGlrd6rhbfsRUgFqGfu7xWfBlG72ZFNGUj2ZR53LGE= +go.opentelemetry.io/collector/exporter/otlphttpexporter v0.83.0 h1:5JeQ6JKiZiRlrcjw4LkzpTkdb3wOflvzYj1kbmr1h+I= +go.opentelemetry.io/collector/exporter/otlphttpexporter v0.83.0/go.mod h1:twNJ2isyvMaDZ7K3OeBtwOHW95uYQ5ylpgMbgyJqhks= +go.opentelemetry.io/collector/extension v0.83.0 h1:O47qpJTeav6jATvnIUvUrO5KBMqa6ySMA5i+7XXW7GY= +go.opentelemetry.io/collector/extension v0.83.0/go.mod h1:gPfwNimQiscUpaUGC/pUniTn4b5O+8IxHVKHDUkGqSI= +go.opentelemetry.io/collector/extension/auth v0.83.0 h1:H0orp7a7/NZae4/ymnC5JpuvO6GNcGLNz+nEDAw9ciU= +go.opentelemetry.io/collector/extension/auth v0.83.0/go.mod h1:Br0OyLU0p+2xS0UvQRvgWmH0Kv/4kPkNVr9AMzee5GM= +go.opentelemetry.io/collector/extension/ballastextension v0.83.0 h1:t0ITNPF7JAXa3+PA4INN6sORIYYgleP84ufPV+yceyU= +go.opentelemetry.io/collector/extension/ballastextension v0.83.0/go.mod h1:ZcsZT3S2EcM8DXz1R5tSVNL9AZmoxpbB65itsrWblhU= +go.opentelemetry.io/collector/extension/zpagesextension v0.83.0 h1:a2Avt+yjaJbHPbiST3I/4GgfxrB3iEpTtgIEGermItw= +go.opentelemetry.io/collector/extension/zpagesextension v0.83.0/go.mod h1:oJBN3N0uusy36qMo7FeWCUv2F9S6JCYw1omvtMtUQ4o= go.opentelemetry.io/collector/featuregate v1.0.0-rcv0014 h1:C9o0mbP0MyygqFnKueVQK/v9jef6zvuttmTGlKaqhgw= go.opentelemetry.io/collector/featuregate v1.0.0-rcv0014/go.mod h1:0mE3mDLmUrOXVoNsuvj+7dV14h/9HFl/Fy9YTLoLObo= go.opentelemetry.io/collector/model v0.49.0/go.mod h1:nOYQv9KoFPs6ihJwOi24qB209EOhS9HkwhGj54YiEAw= @@ -1973,22 +1970,22 @@ go.opentelemetry.io/collector/pdata v0.56.0/go.mod h1:mYcCREWiIJyHss0dbU+GSiz2tm go.opentelemetry.io/collector/pdata v0.57.2/go.mod h1:RU9I8lwBUxucwOsSYzHEcHi15M9QaX78hgQ2PRdSxV0= go.opentelemetry.io/collector/pdata v1.0.0-rcv0014 h1:iT5qH0NLmkGeIdDtnBogYDx7L58t6CaWGL378DEo2QY= go.opentelemetry.io/collector/pdata v1.0.0-rcv0014/go.mod h1:BRvDrx43kiSoUx3mr7SoA7h9B8+OY99mUK+CZSQFWW4= -go.opentelemetry.io/collector/processor v0.82.0 h1:DoqVrrnGYThu/h1sOr6E0hR1Fj5nQT4VT0ptFZcltRk= -go.opentelemetry.io/collector/processor v0.82.0/go.mod h1:B0MtfLWCYNBJ+PXf9k77M2Yn08MKItNB2vuvwhqrtt0= -go.opentelemetry.io/collector/processor/batchprocessor v0.82.0 h1:cUS+9wkzgp5+kgYB7ppSW1HRT+L5fzo3Wmjcm0W6Fho= -go.opentelemetry.io/collector/processor/batchprocessor v0.82.0/go.mod h1:q/+ywtFMrB3yTSSfxw/rpEq07CcgpQeQoROJdi9JOm8= -go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.82.0 h1:ACdNV8fO2LM1yw1gBIXN5ybydxZHqAHomkEf1WljPyc= -go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.82.0/go.mod h1:LbeXquV0D0yi+qIohuxSAvp4LBaJbIer9ZCP9+bGBtU= -go.opentelemetry.io/collector/receiver v0.82.0 h1:bc6jc8jmSgc0/C9zqTqqWOGJFVx0AJ53jiToSmQs2SE= -go.opentelemetry.io/collector/receiver v0.82.0/go.mod h1:Uh6BgcTmmrA1Bm/GpKGRY6WwQyPio4yEDsYkUo0A5Gk= -go.opentelemetry.io/collector/receiver/otlpreceiver v0.82.0 h1:LzcmQ9d7NauTVEWfPNwRwqNd/NBQDi+JU0OHWearcEA= -go.opentelemetry.io/collector/receiver/otlpreceiver v0.82.0/go.mod h1:Qt9Ha/yWaU6ni0XwFslNCBX5zZBQHcnxma/sU1s7LH4= +go.opentelemetry.io/collector/processor v0.83.0 h1:oWMpPzHLkzlPXRIa27UsfsaDSbXaF/0qeiCn3BaesGo= +go.opentelemetry.io/collector/processor v0.83.0/go.mod h1:sLxTTqkIhmNtekO0HebXgVclPpm/xoQ4+g8CbzgYBCM= +go.opentelemetry.io/collector/processor/batchprocessor v0.83.0 h1:Zj4VKcO+NPXEONd0pr6y94nbJdJr/I2VLNxCYcfH0Go= +go.opentelemetry.io/collector/processor/batchprocessor v0.83.0/go.mod h1:ZA8h5ZJYFzcRqp33+I/M81RZjnnLWrtQ9Q/I5lVBlLs= +go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.83.0 h1:OZPN7wOunbPnMeoCDPkoseUamtuG8CjiY+hhmD+yU7w= +go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.83.0/go.mod h1:8DM+L0qvJudvIxSwd7wiVMZpiipYJgX5GnS9Zq7hZzQ= +go.opentelemetry.io/collector/receiver v0.83.0 h1:T2LI6BGNGMGBN8DLWUy7KyFXVaQR8ah+7ssCwb8OqNs= +go.opentelemetry.io/collector/receiver v0.83.0/go.mod h1:yEo8Mv57a53Psd2BvUbP/he5ZtdrwHezeLUCTUtf6PA= +go.opentelemetry.io/collector/receiver/otlpreceiver v0.83.0 h1:A0xNr1N/d5jkO+42G9CQ7C69UZhcTsnAibo1FzRA/PA= +go.opentelemetry.io/collector/receiver/otlpreceiver v0.83.0/go.mod h1:h+WL9WneDgX4uHVBdt/yVINSB/NUzVTOqukoBF1F7uc= go.opentelemetry.io/collector/semconv v0.52.0/go.mod h1:SxK0rUnUP7YeDakexzbE/vhimTOHwE6m/4aKKd9e27Q= go.opentelemetry.io/collector/semconv v0.54.0/go.mod h1:HAGkPKNMhc4kEHevEqVIEtUuvsRQMIbUWBb8yBrqEwk= go.opentelemetry.io/collector/semconv v0.56.0/go.mod h1:EH1wbDvTyqKpKBBpoMIe0KQk2plCcFS66Mo17WtR7CQ= go.opentelemetry.io/collector/semconv v0.57.2/go.mod h1:84YnUjmm+nhGu4YTDLnHCbxnL74ooWpismPG79tFD7w= -go.opentelemetry.io/collector/semconv v0.82.0 h1:WUeT2a+uZjI6kLvwcBaJnGvo7KSQ/9dIFRcxOQdXucc= -go.opentelemetry.io/collector/semconv v0.82.0/go.mod h1:TlYPtzvsXyHOgr5eATi43qEMqwSmIziivJB2uctKswo= +go.opentelemetry.io/collector/semconv v0.83.0 h1:zfBJaGiC7XI8dLD/8QIyKre98RHcq3DaG1g1B+U/Dow= +go.opentelemetry.io/collector/semconv v0.83.0/go.mod h1:j/8THcqVxFna1FpvA2zYIsUperEtOaRaqoLYIN4doWw= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.32.0/go.mod h1:J0dBVrt7dPS/lKJyQoW0xzQiUr4r2Ik1VwPjAUWnofI= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.33.0/go.mod h1:y/SlJpJQPd2UzfBCj0E9Flk9FDCtTyqUmaCB41qFrWI= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.1-0.20230612162650-64be7e574a17 h1:mdcNStUIXngF/mH3xxAo4nbR4g65IXqLL1SvYMjz7JQ= @@ -2108,13 +2105,13 @@ golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20210915214749-c084706c2272/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk= golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -2208,7 +2205,6 @@ golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= @@ -2240,9 +2236,9 @@ golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2/go.mod h1:CfG3xpIq0wQ8r1q4Su golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220725212005-46097bf591d3/go.mod h1:AaygXjzTFtRAg2ttMY5RMuhpJ3cNnI0XpyFJD1iQRSM= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14= golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -2265,8 +2261,8 @@ golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= -golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= +golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= +golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -2350,7 +2346,6 @@ golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201024232916-9f70ab9862d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2412,7 +2407,8 @@ golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= @@ -2420,8 +2416,8 @@ golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9sn golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0= golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2437,6 +2433,7 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -2558,7 +2555,6 @@ golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= golang.org/x/tools v0.12.0 h1:YW6HUoUmYBpwSgyaGaZq1fHjrBjX1rlpZ54T6mu2kss= golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= @@ -2614,8 +2610,8 @@ google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRR google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= google.golang.org/api v0.81.0/go.mod h1:FA6Mb/bZxj706H2j+j2d6mHEEaHBmbbWnkfvmorOCko= -google.golang.org/api v0.134.0 h1:ktL4Goua+UBgoP1eL1/60LwZJqa1sIzkLmvoR3hR6Gw= -google.golang.org/api v0.134.0/go.mod h1:sjRL3UnjTx5UqNQS9EWr9N8p7xbHpy1k0XGRLCf3Spk= +google.golang.org/api v0.136.0 h1:e/6enzUE1s4tGPa6Q3ZYShKTtvRc+1Jq0rrafhppmOs= +google.golang.org/api v0.136.0/go.mod h1:XtJfF+V2zgUxelOn5Zs3kECtluMxneJG8ZxUTlLNTPA= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -2711,12 +2707,12 @@ google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130 h1:Au6te5hbKUV8pIYWHqOUZ1pva5qK/rwbIhoXEUB9Lu8= -google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:O9kGHb51iE/nOGvQaDUuadVYqovW56s5emA88lQnj6Y= -google.golang.org/genproto/googleapis/api v0.0.0-20230706204954-ccb25ca9f130 h1:XVeBY8d/FaK4848myy41HBqnDwvxeV3zMZhwN1TvAMU= -google.golang.org/genproto/googleapis/api v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:mPBs5jNgx2GuQGvFwUvVKqtn6HsUw9nP64BedgvqEsQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230720185612-659f7aaaa771 h1:Z8qdAF9GFsmcUuWQ5KVYIpP3PCKydn/YKORnghIalu4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230720185612-659f7aaaa771/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= +google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 h1:L6iMMGrtzgHsWofoFcihmDEMYeDR9KN/ThbPWGrh++g= +google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8= +google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5 h1:nIgk/EEq3/YlnmVVXVnm14rC2oxgs1o0ong4sD/rd44= +google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5/go.mod h1:5DZzOUPCLYL3mNkQ0ms0F3EuUNZ7py1Bqeq6sxzI7/Q= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230807174057-1744710a1577 h1:wukfNtZmZUurLN/atp2hiIeTKn7QJWIQdHzqmsOnAOk= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230807174057-1744710a1577/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= @@ -2878,8 +2874,8 @@ rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8 rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/controller-runtime v0.15.0 h1:ML+5Adt3qZnMSYxZ7gAverBLNPSMQEibtzAgp0UPojU= -sigs.k8s.io/controller-runtime v0.15.0/go.mod h1:7ngYvp1MLT+9GeZ+6lH3LOlcHkp/+tzA/fmHa4iq9kk= +sigs.k8s.io/controller-runtime v0.15.1 h1:9UvgKD4ZJGcj24vefUFgZFP3xej/3igL9BsOUTb/+4c= +sigs.k8s.io/controller-runtime v0.15.1/go.mod h1:7ngYvp1MLT+9GeZ+6lH3LOlcHkp/+tzA/fmHa4iq9kk= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= From e0b1456f06445c0a9f1fab07e16bfe1c11b13625 Mon Sep 17 00:00:00 2001 From: Bryan Aguilar Date: Tue, 15 Aug 2023 08:52:57 -0700 Subject: [PATCH 2/8] Add script to update versions --- Makefile | 4 ++++ Makefile.Common | 5 +++++ 2 files changed, 9 insertions(+) diff --git a/Makefile b/Makefile index 5f7f0f4b6..b427fab01 100644 --- a/Makefile +++ b/Makefile @@ -199,6 +199,10 @@ golint: lint-static-check gomod-tidy: @$(MAKE) for-all-target TARGET="mod-tidy" +.PHONY: gomod-update-collector +gomod-update-collector: + @$(MAKE) for-all-target TARGET="update-collector-ver" + .PHONY: gomod-vendor gomod-vendor: diff --git a/Makefile.Common b/Makefile.Common index a8757f5f7..fd08f563c 100644 --- a/Makefile.Common +++ b/Makefile.Common @@ -32,3 +32,8 @@ lint: .PHONY: mod-tidy mod-tidy: go mod tidy + +.PHONY: update-collector-ver +update-collector-ver: + awk -v CORE="$(CORE_VER)" -v CONTRIB="$(CONTRIB_VER)" '/go.opentelemetry.io\/collector/ {sub("v[0-9]+\\.[0-9]+(\\.[0-9]+)?", CORE)} /github.com\/open-telemetry\/opentelemetry-collector-contrib/ {sub("v[0-9]+\\.[0-9]+(\\.[0-9]+)?", CONTRIB)} 1' \ + go.mod > go.mod.tmp && mv go.mod.tmp go.mod From 3c946b11c8f26eff0196fbb48a6ebf192d5224ed Mon Sep 17 00:00:00 2001 From: Bryan Aguilar Date: Tue, 15 Aug 2023 08:53:14 -0700 Subject: [PATCH 3/8] run vendor --- .../go/compute/internal/version.go | 2 +- .../Azure/go-autorest/autorest/adal/token.go | 42 +- .../DataDog/datadog-agent/pkg/proto/LICENSE | 200 ++ .../pkg/proto/pbgo/trace/agent_payload.pb.go | 240 +++ .../pkg/proto/pbgo/trace/agent_payload_gen.go | 200 ++ .../pbgo/trace/agent_payload_vtproto.pb.go | 523 +++++ .../pkg/proto/pbgo/trace/decoder_bytes.go | 274 +++ .../pkg/proto/pbgo/trace/decoder_v05.go | 220 ++ .../pkg/proto/pbgo/trace/hook.go | 33 + .../pkg/proto/pbgo/trace/span.pb.go | 307 +++ .../pkg/proto/pbgo/trace/span_gen.go | 361 ++++ .../pkg/proto/pbgo/trace/span_utils.go | 51 + .../pkg/proto/pbgo/trace/span_vtproto.pb.go | 994 +++++++++ .../pkg/proto/pbgo/trace/stats.pb.go | 677 ++++++ .../pkg/proto/pbgo/trace/stats_gen.go | 1591 +++++++++++++++ .../pkg/proto/pbgo/trace/stats_vtproto.pb.go | 1814 +++++++++++++++++ .../pkg/proto/pbgo/trace/trace.go | 52 + .../pkg/proto/pbgo/trace/trace_gen.go | 158 ++ .../pkg/proto/pbgo/trace/tracer_payload.pb.go | 391 ++++ .../proto/pbgo/trace/tracer_payload_gen.go | 384 ++++ .../proto/pbgo/trace/tracer_payload_utils.go | 35 + .../pbgo/trace/tracer_payload_vtproto.pb.go | 1066 ++++++++++ .../pkg/remoteconfig/state/products.go | 3 + .../pkg/remoteconfig/state/tuf.go | 2 +- .../DataDog/datadog-agent/pkg/util/log/log.go | 74 +- .../DataDog/go-tuf/client/client.go | 263 +-- .../DataDog/go-tuf/client/delegations.go | 53 +- .../DataDog/go-tuf/client/file_store.go | 90 + .../github.com/DataDog/go-tuf/data/types.go | 94 +- .../DataDog/go-tuf/internal/roles/roles.go | 7 + .../go-tuf/pkg/keys/deprecated_ecdsa.go | 101 + .../DataDog/go-tuf/pkg/keys/ecdsa.go | 160 +- .../DataDog/go-tuf/pkg/keys/ed25519.go | 88 +- .../DataDog/go-tuf/pkg/keys/keys.go | 3 + .../DataDog/go-tuf/pkg/keys/pkix.go | 56 + .../github.com/DataDog/go-tuf/pkg/keys/rsa.go | 118 +- .../DataDog/go-tuf/pkg/targets/delegation.go | 9 +- vendor/github.com/DataDog/go-tuf/util/util.go | 51 +- vendor/github.com/DataDog/go-tuf/verify/db.go | 21 +- .../DataDog/go-tuf/verify/errors.go | 8 +- .../DataDog/go-tuf/verify/verify.go | 61 +- .../pkg/otlp/logs/logs_translator.go | 17 +- .../pkg/otlp/metrics/consumer.go | 4 +- .../pkg/otlp/metrics/metrics_translator.go | 169 +- .../otlp/metrics/runtime_metric_mappings.go | 2 +- .../pkg/otlp/metrics/statspayload.go | 28 +- .../pkg/quantile/agent.go | 2 +- .../{Shopify => IBM}/sarama/.gitignore | 0 .../{Shopify => IBM}/sarama/.golangci.yml | 15 +- vendor/github.com/IBM/sarama/CHANGELOG.md | 1511 ++++++++++++++ vendor/github.com/IBM/sarama/CONTRIBUTING.md | 46 + .../{Shopify => IBM}/sarama/Dockerfile.kafka | 5 +- .../sarama/LICENSE => IBM/sarama/LICENSE.md} | 3 + .../{Shopify => IBM}/sarama/Makefile | 0 .../{Shopify => IBM}/sarama/README.md | 13 +- .../{Shopify => IBM}/sarama/Vagrantfile | 0 .../{Shopify => IBM}/sarama/acl_bindings.go | 0 .../sarama/acl_create_request.go | 0 .../sarama/acl_create_response.go | 0 .../sarama/acl_delete_request.go | 0 .../sarama/acl_delete_response.go | 0 .../sarama/acl_describe_request.go | 2 +- .../sarama/acl_describe_response.go | 0 .../{Shopify => IBM}/sarama/acl_filter.go | 0 .../{Shopify => IBM}/sarama/acl_types.go | 8 +- .../sarama/add_offsets_to_txn_request.go | 0 .../sarama/add_offsets_to_txn_response.go | 0 .../sarama/add_partitions_to_txn_request.go | 2 +- .../sarama/add_partitions_to_txn_response.go | 0 .../{Shopify => IBM}/sarama/admin.go | 72 +- .../sarama/alter_client_quotas_request.go | 0 .../sarama/alter_client_quotas_response.go | 0 .../sarama/alter_configs_request.go | 0 .../sarama/alter_configs_response.go | 0 .../alter_partition_reassignments_request.go | 0 .../alter_partition_reassignments_response.go | 0 .../alter_user_scram_credentials_request.go | 0 .../alter_user_scram_credentials_response.go | 0 .../sarama/api_versions_request.go | 0 .../sarama/api_versions_response.go | 0 .../{Shopify => IBM}/sarama/async_producer.go | 2 +- .../sarama/balance_strategy.go | 85 +- .../{Shopify => IBM}/sarama/broker.go | 6 +- .../{Shopify => IBM}/sarama/client.go | 23 +- .../{Shopify => IBM}/sarama/compress.go | 0 .../{Shopify => IBM}/sarama/config.go | 23 +- .../sarama/config_resource_type.go | 0 .../{Shopify => IBM}/sarama/consumer.go | 4 +- .../{Shopify => IBM}/sarama/consumer_group.go | 7 +- .../sarama/consumer_group_members.go | 0 .../sarama/consumer_metadata_request.go | 0 .../sarama/consumer_metadata_response.go | 0 .../{Shopify => IBM}/sarama/control_record.go | 0 .../{Shopify => IBM}/sarama/crc32_field.go | 0 .../sarama/create_partitions_request.go | 0 .../sarama/create_partitions_response.go | 0 .../sarama/create_topics_request.go | 0 .../sarama/create_topics_response.go | 0 vendor/github.com/IBM/sarama/decompress.go | 98 + .../sarama/delete_groups_request.go | 0 .../sarama/delete_groups_response.go | 0 .../sarama/delete_offsets_request.go | 0 .../sarama/delete_offsets_response.go | 0 .../sarama/delete_records_request.go | 0 .../sarama/delete_records_response.go | 0 .../sarama/delete_topics_request.go | 0 .../sarama/delete_topics_response.go | 0 .../sarama/describe_client_quotas_request.go | 0 .../sarama/describe_client_quotas_response.go | 0 .../sarama/describe_configs_request.go | 0 .../sarama/describe_configs_response.go | 0 .../sarama/describe_groups_request.go | 8 +- .../sarama/describe_groups_response.go | 8 +- .../sarama/describe_log_dirs_request.go | 0 .../sarama/describe_log_dirs_response.go | 0 ...describe_user_scram_credentials_request.go | 0 ...escribe_user_scram_credentials_response.go | 0 .../{Shopify => IBM}/sarama/dev.yml | 0 .../sarama/docker-compose.yml | 10 +- .../sarama/encoder_decoder.go | 0 .../sarama/end_txn_request.go | 0 .../sarama/end_txn_response.go | 0 .../{Shopify => IBM}/sarama/entrypoint.sh | 2 +- .../{Shopify => IBM}/sarama/errors.go | 4 +- .../{Shopify => IBM}/sarama/fetch_request.go | 0 .../{Shopify => IBM}/sarama/fetch_response.go | 0 .../sarama/find_coordinator_request.go | 0 .../sarama/find_coordinator_response.go | 0 .../sarama/gssapi_kerberos.go | 2 + .../sarama/heartbeat_request.go | 0 .../sarama/heartbeat_response.go | 0 .../incremental_alter_configs_request.go | 0 .../incremental_alter_configs_response.go | 0 .../sarama/init_producer_id_request.go | 0 .../sarama/init_producer_id_response.go | 0 .../{Shopify => IBM}/sarama/interceptors.go | 0 .../sarama/join_group_request.go | 0 .../sarama/join_group_response.go | 0 .../sarama/kerberos_client.go | 15 +- .../sarama/leave_group_request.go | 0 .../sarama/leave_group_response.go | 0 .../{Shopify => IBM}/sarama/length_field.go | 0 .../sarama/list_groups_request.go | 0 .../sarama/list_groups_response.go | 0 .../list_partition_reassignments_request.go | 0 .../list_partition_reassignments_response.go | 0 .../{Shopify => IBM}/sarama/message.go | 0 .../{Shopify => IBM}/sarama/message_set.go | 0 .../sarama/metadata_request.go | 0 .../sarama/metadata_response.go | 0 .../{Shopify => IBM}/sarama/metrics.go | 0 .../{Shopify => IBM}/sarama/mockbroker.go | 0 .../{Shopify => IBM}/sarama/mockkerberos.go | 0 .../{Shopify => IBM}/sarama/mockresponses.go | 0 .../sarama/offset_commit_request.go | 6 +- .../sarama/offset_commit_response.go | 0 .../sarama/offset_fetch_request.go | 0 .../sarama/offset_fetch_response.go | 0 .../{Shopify => IBM}/sarama/offset_manager.go | 6 +- .../{Shopify => IBM}/sarama/offset_request.go | 0 .../sarama/offset_response.go | 0 .../{Shopify => IBM}/sarama/packet_decoder.go | 2 +- .../{Shopify => IBM}/sarama/packet_encoder.go | 0 .../{Shopify => IBM}/sarama/partitioner.go | 0 .../{Shopify => IBM}/sarama/prep_encoder.go | 0 .../sarama/produce_request.go | 0 .../sarama/produce_response.go | 0 .../{Shopify => IBM}/sarama/produce_set.go | 0 .../{Shopify => IBM}/sarama/quota_types.go | 0 .../{Shopify => IBM}/sarama/real_decoder.go | 0 .../{Shopify => IBM}/sarama/real_encoder.go | 0 .../{Shopify => IBM}/sarama/record.go | 0 .../{Shopify => IBM}/sarama/record_batch.go | 0 .../{Shopify => IBM}/sarama/records.go | 0 .../{Shopify => IBM}/sarama/request.go | 0 .../sarama/response_header.go | 0 .../{Shopify => IBM}/sarama/sarama.go | 2 +- .../sarama/sasl_authenticate_request.go | 0 .../sarama/sasl_authenticate_response.go | 0 .../sarama/sasl_handshake_request.go | 0 .../sarama/sasl_handshake_response.go | 0 .../sarama/scram_formatter.go | 0 .../sarama/sticky_assignor_user_data.go | 0 .../sarama/sync_group_request.go | 0 .../sarama/sync_group_response.go | 0 .../{Shopify => IBM}/sarama/sync_producer.go | 2 +- .../{Shopify => IBM}/sarama/timestamp.go | 0 .../sarama/transaction_manager.go | 10 +- .../sarama/txn_offset_commit_request.go | 0 .../sarama/txn_offset_commit_response.go | 0 .../{Shopify => IBM}/sarama/utils.go | 6 +- .../{Shopify => IBM}/sarama/version.go | 0 .../{Shopify => IBM}/sarama/zstd.go | 0 vendor/github.com/Shopify/sarama/CHANGELOG.md | 1187 ----------- .../github.com/Shopify/sarama/decompress.go | 61 - vendor/github.com/antonmedv/expr/ast/node.go | 4 +- .../antonmedv/expr/checker/checker.go | 38 +- .../antonmedv/expr/checker/types.go | 12 +- .../antonmedv/expr/compiler/compiler.go | 76 +- .../antonmedv/expr/conf/types_table.go | 14 +- vendor/github.com/antonmedv/expr/expr.go | 13 +- .../github.com/antonmedv/expr/file/error.go | 4 +- .../github.com/antonmedv/expr/vm/opcodes.go | 4 +- .../github.com/antonmedv/expr/vm/program.go | 8 +- .../antonmedv/expr/vm/runtime/runtime.go | 4 +- vendor/github.com/antonmedv/expr/vm/vm.go | 6 + .../aws/aws-sdk-go/aws/endpoints/defaults.go | 97 + .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../aws/aws-sdk-go/service/ec2/api.go | 60 + .../github.com/digitalocean/godo/CHANGELOG.md | 5 + .../github.com/digitalocean/godo/apps.gen.go | 65 +- .../digitalocean/godo/apps_accessors.go | 176 ++ vendor/github.com/digitalocean/godo/godo.go | 4 +- .../gophercloud/gophercloud/CHANGELOG.md | 31 + .../gophercloud/gophercloud/README.md | 119 +- .../gophercloud/gophercloud/RELEASE.md | 79 + .../gophercloud/openstack/auth_env.go | 9 + .../gophercloud/openstack/client.go | 2 +- .../v2/extensions/floatingips/results.go | 4 + .../v2/extensions/hypervisors/results.go | 4 + .../openstack/compute/v2/servers/doc.go | 20 + .../openstack/compute/v2/servers/requests.go | 17 +- .../openstack/compute/v2/servers/results.go | 12 + .../openstack/identity/v2/tenants/results.go | 4 + .../identity/v3/extensions/oauth1/results.go | 12 + .../gophercloud/pagination/pager.go | 3 + .../gophercloud/provider_client.go | 2 +- .../grpc-gateway/v2/{LICENSE.txt => LICENSE} | 0 .../grpc-gateway/v2/runtime/BUILD.bazel | 8 +- .../v2/runtime/marshal_httpbodyproto.go | 2 +- .../github.com/hashicorp/consul/api/agent.go | 6 + vendor/github.com/hashicorp/consul/api/api.go | 6 +- vendor/github.com/hashicorp/nomad/api/acl.go | 194 +- .../github.com/hashicorp/nomad/api/agent.go | 3 + .../hashicorp/nomad/api/allocations.go | 9 +- .../hashicorp/nomad/api/allocations_exec.go | 3 + vendor/github.com/hashicorp/nomad/api/api.go | 33 +- .../hashicorp/nomad/api/constraint.go | 3 + .../github.com/hashicorp/nomad/api/consul.go | 16 +- .../hashicorp/nomad/api/contexts/contexts.go | 3 + vendor/github.com/hashicorp/nomad/api/csi.go | 3 + .../hashicorp/nomad/api/deployments.go | 3 + .../hashicorp/nomad/api/evaluations.go | 3 + .../hashicorp/nomad/api/event_stream.go | 3 + vendor/github.com/hashicorp/nomad/api/fs.go | 11 + .../github.com/hashicorp/nomad/api/ioutil.go | 3 + vendor/github.com/hashicorp/nomad/api/jobs.go | 91 +- .../github.com/hashicorp/nomad/api/keyring.go | 3 + .../hashicorp/nomad/api/namespace.go | 3 + .../hashicorp/nomad/api/node_meta.go | 7 +- .../github.com/hashicorp/nomad/api/nodes.go | 3 + .../hashicorp/nomad/api/operator.go | 3 + .../hashicorp/nomad/api/operator_autopilot.go | 3 + .../hashicorp/nomad/api/operator_metrics.go | 3 + .../github.com/hashicorp/nomad/api/quota.go | 3 + vendor/github.com/hashicorp/nomad/api/raw.go | 3 + .../hashicorp/nomad/api/recommendations.go | 3 + .../github.com/hashicorp/nomad/api/regions.go | 3 + .../hashicorp/nomad/api/resources.go | 3 + .../github.com/hashicorp/nomad/api/scaling.go | 3 + .../github.com/hashicorp/nomad/api/search.go | 3 + .../hashicorp/nomad/api/sentinel.go | 3 + .../hashicorp/nomad/api/services.go | 3 + .../github.com/hashicorp/nomad/api/status.go | 3 + .../github.com/hashicorp/nomad/api/system.go | 3 + .../github.com/hashicorp/nomad/api/tasks.go | 9 + .../github.com/hashicorp/nomad/api/utils.go | 3 + .../hashicorp/nomad/api/variables.go | 3 + .../hcloud-go/hcloud/architecture.go | 12 + .../hetznercloud/hcloud-go/hcloud/hcloud.go | 2 +- .../hetznercloud/hcloud-go/hcloud/image.go | 42 +- .../hetznercloud/hcloud-go/hcloud/iso.go | 23 +- .../hetznercloud/hcloud-go/hcloud/schema.go | 40 +- .../hcloud-go/hcloud/schema/image.go | 35 +- .../hcloud-go/hcloud/schema/iso.go | 11 +- .../hcloud-go/hcloud/schema/server_type.go | 19 +- .../hcloud-go/hcloud/server_type.go | 19 +- .../ionos-cloud/sdk-go/v6/README.md | 203 +- .../github.com/ionos-cloud/sdk-go/v6/api_.go | 4 +- .../v6/api_application_load_balancers.go | 80 +- .../sdk-go/v6/api_contract_resources.go | 4 +- .../ionos-cloud/sdk-go/v6/api_data_centers.go | 12 +- .../sdk-go/v6/api_firewall_rules.go | 8 +- .../ionos-cloud/sdk-go/v6/api_flow_logs.go | 4 +- .../ionos-cloud/sdk-go/v6/api_images.go | 4 +- .../ionos-cloud/sdk-go/v6/api_ip_blocks.go | 8 +- .../ionos-cloud/sdk-go/v6/api_kubernetes.go | 72 +- .../ionos-cloud/sdk-go/v6/api_labels.go | 36 +- .../ionos-cloud/sdk-go/v6/api_lans.go | 175 +- .../sdk-go/v6/api_load_balancers.go | 10 +- .../ionos-cloud/sdk-go/v6/api_locations.go | 26 +- .../ionos-cloud/sdk-go/v6/api_nat_gateways.go | 18 +- .../sdk-go/v6/api_network_interfaces.go | 4 +- .../sdk-go/v6/api_network_load_balancers.go | 12 +- .../sdk-go/v6/api_private_cross_connects.go | 4 +- .../ionos-cloud/sdk-go/v6/api_servers.go | 158 +- .../ionos-cloud/sdk-go/v6/api_snapshots.go | 4 +- .../sdk-go/v6/api_target_groups.go | 32 +- .../ionos-cloud/sdk-go/v6/api_templates.go | 26 +- .../sdk-go/v6/api_user_management.go | 4 +- .../ionos-cloud/sdk-go/v6/api_user_s3_keys.go | 4 +- .../ionos-cloud/sdk-go/v6/api_volumes.go | 8 +- .../ionos-cloud/sdk-go/v6/client.go | 2 +- .../ionos-cloud/sdk-go/v6/configuration.go | 2 +- .../v6/model_application_load_balancer.go | 2 +- ...plication_load_balancer_forwarding_rule.go | 2 +- ...oad_balancer_forwarding_rule_properties.go | 8 +- ...ation_load_balancer_forwarding_rule_put.go | 2 +- ...lication_load_balancer_forwarding_rules.go | 2 +- ...del_application_load_balancer_http_rule.go | 16 +- ...ation_load_balancer_http_rule_condition.go | 10 +- ...el_application_load_balancer_properties.go | 8 +- .../v6/model_application_load_balancer_put.go | 2 +- .../v6/model_application_load_balancers.go | 2 +- .../sdk-go/v6/model_attached_volumes.go | 2 +- .../sdk-go/v6/model_contract_properties.go | 4 +- .../ionos-cloud/sdk-go/v6/model_contracts.go | 2 +- .../v6/model_datacenter_element_metadata.go | 2 +- .../v6/model_firewallrule_properties.go | 51 +- .../ionos-cloud/sdk-go/v6/model_flow_log.go | 2 +- .../sdk-go/v6/model_flow_log_properties.go | 4 +- .../ionos-cloud/sdk-go/v6/model_flow_logs.go | 2 +- .../sdk-go/v6/model_group_properties.go | 131 +- .../sdk-go/v6/model_image_properties.go | 10 +- .../ionos-cloud/sdk-go/v6/model_images.go | 2 +- .../ionos-cloud/sdk-go/v6/model_info.go | 6 +- .../v6/model_kubernetes_auto_scaling.go | 4 +- .../sdk-go/v6/model_kubernetes_cluster.go | 6 +- .../v6/model_kubernetes_cluster_for_post.go | 6 +- ..._kubernetes_cluster_properties_for_post.go | 6 +- ...l_kubernetes_cluster_properties_for_put.go | 6 +- .../sdk-go/v6/model_kubernetes_clusters.go | 8 +- .../v6/model_kubernetes_maintenance_window.go | 4 +- .../sdk-go/v6/model_kubernetes_node.go | 4 +- .../v6/model_kubernetes_node_metadata.go | 10 +- .../sdk-go/v6/model_kubernetes_node_pool.go | 4 +- .../v6/model_kubernetes_node_pool_for_post.go | 4 +- .../v6/model_kubernetes_node_pool_for_put.go | 4 +- .../v6/model_kubernetes_node_pool_lan.go | 49 +- .../model_kubernetes_node_pool_properties.go | 26 +- ...ubernetes_node_pool_properties_for_post.go | 24 +- ...kubernetes_node_pool_properties_for_put.go | 12 +- .../sdk-go/v6/model_kubernetes_node_pools.go | 4 +- .../v6/model_kubernetes_node_properties.go | 8 +- .../sdk-go/v6/model_kubernetes_nodes.go | 4 +- .../ionos-cloud/sdk-go/v6/model_lan_post.go | 335 --- .../sdk-go/v6/model_lan_properties_post.go | 250 --- .../sdk-go/v6/model_location_properties.go | 8 +- ...oad_balancer_forwarding_rule_properties.go | 2 +- .../sdk-go/v6/model_nic_properties.go | 47 +- .../sdk-go/v6/model_resource_limits.go | 44 +- .../ionos-cloud/sdk-go/v6/model_s3_bucket.go | 2 +- .../sdk-go/v6/model_server_properties.go | 60 +- .../sdk-go/v6/model_target_group.go | 2 +- .../v6/model_target_group_health_check.go | 6 +- .../model_target_group_http_health_check.go | 12 +- .../v6/model_target_group_properties.go | 6 +- .../sdk-go/v6/model_target_group_put.go | 2 +- .../sdk-go/v6/model_target_group_target.go | 6 +- .../sdk-go/v6/model_target_groups.go | 2 +- .../ionos-cloud/sdk-go/v6/model_template.go | 2 +- .../sdk-go/v6/model_template_properties.go | 2 +- .../ionos-cloud/sdk-go/v6/model_templates.go | 2 +- .../ionos-cloud/sdk-go/v6/model_volume.go | 2 +- .../sdk-go/v6/model_volume_properties.go | 8 +- .../github.com/ionos-cloud/sdk-go/v6/utils.go | 38 + vendor/github.com/linode/linodego/README.md | 3 +- vendor/github.com/linode/linodego/account.go | 1 + .../linode/linodego/account_logins.go | 1 + .../linode/linodego/account_payments.go | 4 +- vendor/github.com/linode/linodego/client.go | 4 +- vendor/github.com/linode/linodego/config.go | 6 +- vendor/github.com/linode/linodego/go.work.sum | 14 + .../linode/linodego/lke_clusters.go | 29 + .../linode/linodego/object_storage_buckets.go | 25 +- .../linode/linodego/object_storage_object.go | 10 +- .../github.com/linode/linodego/pagination.go | 2 +- .../linode/linodego/profile_logins.go | 83 + vendor/github.com/linode/linodego/tags.go | 3 + vendor/github.com/linode/linodego/vlans.go | 4 +- vendor/github.com/linode/linodego/waitfor.go | 46 +- vendor/github.com/miekg/dns/README.md | 2 +- vendor/github.com/miekg/dns/client.go | 15 +- vendor/github.com/miekg/dns/clientconfig.go | 2 +- vendor/github.com/miekg/dns/defaults.go | 2 +- vendor/github.com/miekg/dns/dnssec.go | 14 +- vendor/github.com/miekg/dns/edns.go | 45 +- .../miekg/dns/listen_no_reuseport.go | 4 +- .../github.com/miekg/dns/listen_reuseport.go | 3 +- vendor/github.com/miekg/dns/msg.go | 56 +- vendor/github.com/miekg/dns/msg_helpers.go | 8 +- vendor/github.com/miekg/dns/scan_rr.go | 2 +- vendor/github.com/miekg/dns/server.go | 2 +- vendor/github.com/miekg/dns/svcb.go | 47 +- vendor/github.com/miekg/dns/types.go | 38 +- vendor/github.com/miekg/dns/udp_windows.go | 7 +- vendor/github.com/miekg/dns/version.go | 2 +- vendor/github.com/miekg/dns/ztypes.go | 415 +++- .../exporter/awsemfexporter/README.md | 8 +- .../exporter/awsemfexporter/datapoint.go | 23 +- .../exporter/awsemfexporter/emf_exporter.go | 6 +- .../exporter/awsemfexporter/factory.go | 14 +- .../exporter/awsemfexporter/grouped_metric.go | 4 +- .../awsemfexporter/metric_translator.go | 28 +- .../internal/translator/cause.go | 36 +- .../exporter/datadogexporter/README.md | 2 +- .../exporter/datadogexporter/factory.go | 2 + .../hostmetadata/provider/provider.go | 34 +- .../internal/metrics/consumer.go | 8 +- .../internal/metrics/consumer_deprecated.go | 8 +- .../exporter/datadogexporter/logs_exporter.go | 16 +- .../exporter/datadogexporter/metadata.yaml | 2 +- .../datadogexporter/metrics_exporter.go | 4 +- .../internal/serialization/serialization.go | 2 +- .../exporter/kafkaexporter/README.md | 4 +- .../exporter/kafkaexporter/authentication.go | 2 +- .../exporter/kafkaexporter/config.go | 10 +- .../exporter/kafkaexporter/factory.go | 2 +- .../internal/awsmsk/iam_scram_client.go | 2 +- .../kafkaexporter/jaeger_marshaler.go | 2 +- .../exporter/kafkaexporter/kafka_exporter.go | 2 +- .../exporter/kafkaexporter/marshaler.go | 2 +- .../exporter/kafkaexporter/pdata_marshaler.go | 2 +- .../exporter/kafkaexporter/raw_marshaler.go | 2 +- .../exporter/kafkaexporter/scram_client.go | 2 +- .../loadbalancingexporter/resolver_dns.go | 6 +- .../exporter/logzioexporter/logziospan.go | 4 +- .../exporter/prometheusexporter/collector.go | 2 +- .../internal/dimensions/dimclient.go | 3 +- .../internal/translation/constants.go | 8 + .../translation/dpfilters/filterset.go | 6 +- .../extension/observer/ecsobserver/README.md | 2 +- .../observer/ecsobserver/metadata.yaml | 2 +- .../internal/aws/metrics/metric_calculator.go | 54 +- .../metadataproviders/system/metadata.go | 28 +- .../pkg/ottl/README.md | 11 +- .../pkg/ottl/expression.go | 53 +- .../pkg/ottl/functions.go | 57 +- .../pkg/ottl/grammar.go | 3 +- .../pkg/ottl/math.go | 57 +- .../pkg/ottl/parser.go | 2 +- .../jaeger/jaegerproto_to_traces.go | 2 +- .../translator/prometheus/normalize_name.go | 6 - .../pkg/translator/signalfx/from_metrics.go | 6 +- .../zipkin/zipkinv2/to_translator.go | 5 + .../attributesprocessor/attributes_metric.go | 1 + .../processor/filterprocessor/README.md | 31 + .../processor/filterprocessor/config.go | 6 +- .../processor/filterprocessor/metrics.go | 1 + .../groupbytraceprocessor/storage_memory.go | 6 +- .../k8sattributesprocessor/config.go | 1 + .../internal/kube/client.go | 42 +- .../internal/kube/informer.go | 23 + .../internal/kube/kube.go | 2 + .../internal/metadata/generated_config.go | 4 + .../internal/metadata/generated_resource.go | 7 + .../k8sattributesprocessor/metadata.yaml | 4 + .../k8sattributesprocessor/options.go | 7 + .../metricstransformprocessor/config.go | 156 +- .../metricstransformprocessor/factory.go | 50 +- .../metrics_transform_processor.go | 10 +- .../metrics_transform_processor_otlp.go | 14 +- .../operation_aggregate_labels.go | 22 +- .../resourcedetectionprocessor/README.md | 18 +- .../internal/gcp/gcp.go | 2 + .../gcp/internal/metadata/generated_config.go | 8 + .../internal/metadata/generated_resource.go | 14 + .../internal/gcp/metadata.yaml | 10 +- .../internal/gcp/types.go | 2 + .../internal/heroku/heroku.go | 25 +- .../internal/metadata/generated_config.go | 12 +- .../internal/metadata/generated_resource.go | 7 + .../internal/system/metadata.yaml | 4 + .../internal/system/system.go | 35 +- .../tailsamplingprocessor/and_helper.go | 4 +- .../tailsamplingprocessor/composite_helper.go | 4 +- .../tailsamplingprocessor/processor.go | 4 +- .../internal/cadvisor/cadvisor_linux.go | 13 + .../internal/cadvisor/cadvisor_nolinux.go | 5 + .../cadvisor/extractors/cpu_extractor.go | 4 + .../cadvisor/extractors/diskio_extractor.go | 4 + .../internal/cadvisor/extractors/extractor.go | 5 +- .../cadvisor/extractors/fs_extractor.go | 8 +- .../cadvisor/extractors/mem_extractor.go | 4 + .../cadvisor/extractors/net_extractor.go | 16 +- .../internal/k8sapiserver/k8sapiserver.go | 3 +- .../internal/stores/podstore.go | 27 +- .../internal/stores/store.go | 8 + .../awscontainerinsightreceiver/receiver.go | 14 +- .../receiver/awsxrayreceiver/receiver.go | 4 +- .../receiver/kafkareceiver/kafka_receiver.go | 8 +- .../receiver/prometheusreceiver/README.md | 2 + .../receiver/prometheusreceiver/config.go | 3 + .../internal/metricfamily.go | 34 +- .../prometheusreceiver/metrics_receiver.go | 2 + .../receiver/statsdreceiver/config.go | 2 +- .../receiver/statsdreceiver/factory.go | 2 +- .../protocol/metric_translator.go | 2 +- .../{ => internal}/protocol/parser.go | 2 +- .../{ => internal}/protocol/statsd_parser.go | 2 +- .../{ => internal}/transport/mock_reporter.go | 2 +- .../{ => internal}/transport/server.go | 4 +- .../{ => internal}/transport/udp_server.go | 4 +- .../receiver/statsdreceiver/receiver.go | 4 +- .../receiver/statsdreceiver/reporter.go | 2 +- .../image-spec/specs-go/v1/annotations.go | 9 - .../image-spec/specs-go/v1/artifact.go | 34 - .../image-spec/specs-go/v1/config.go | 29 +- .../image-spec/specs-go/v1/descriptor.go | 10 +- .../image-spec/specs-go/v1/index.go | 6 + .../image-spec/specs-go/v1/manifest.go | 3 + .../image-spec/specs-go/v1/mediatype.go | 19 +- .../image-spec/specs-go/version.go | 2 +- .../runtime-spec/specs-go/config.go | 114 +- .../runtime-spec/specs-go/version.go | 6 +- vendor/github.com/ovh/go-ovh/LICENSE | 42 +- .../ovh/go-ovh/ovh/configuration.go | 88 +- .../github.com/ovh/go-ovh/ovh/consumer_key.go | 1 - vendor/github.com/ovh/go-ovh/ovh/error.go | 36 +- vendor/github.com/ovh/go-ovh/ovh/ovh.go | 35 +- .../prometheus/prometheus/config/config.go | 18 +- .../prometheus/discovery/aws/ec2.go | 2 +- .../prometheus/discovery/dns/dns.go | 7 +- .../prometheus/discovery/hetzner/hcloud.go | 2 +- .../prometheus/discovery/hetzner/robot.go | 4 +- .../prometheus/discovery/ionos/server.go | 2 +- .../discovery/kubernetes/client_metrics.go | 6 +- .../discovery/kubernetes/endpoints.go | 1 + .../discovery/kubernetes/endpointslice.go | 4 +- .../discovery/kubernetes/ingress.go | 2 +- .../discovery/kubernetes/kubernetes.go | 7 +- .../prometheus/discovery/kubernetes/node.go | 2 +- .../prometheus/discovery/kubernetes/pod.go | 2 +- .../discovery/kubernetes/service.go | 2 +- .../prometheus/discovery/linode/linode.go | 8 +- .../prometheus/discovery/marathon/marathon.go | 12 +- .../prometheus/discovery/nomad/nomad.go | 2 +- .../discovery/ovhcloud/dedicated_server.go | 2 +- .../prometheus/discovery/ovhcloud/vps.go | 2 +- .../prometheus/discovery/registry.go | 2 +- .../prometheus/discovery/vultr/vultr.go | 4 +- .../discovery/zookeeper/zookeeper.go | 2 +- .../model/histogram/float_histogram.go | 54 +- .../prometheus/model/labels/labels.go | 42 +- .../prometheus/model/labels/labels_string.go | 25 +- .../prometheus/model/relabel/relabel.go | 2 +- .../prometheus/model/textparse/promparse.go | 7 +- .../prometheus/prometheus/prompb/custom.go | 5 + .../prometheus/prometheus/prompb/types.pb.go | 152 +- .../prometheus/prometheus/prompb/types.proto | 7 +- .../prometheus/prometheus/scrape/manager.go | 5 +- .../prometheus/prometheus/scrape/scrape.go | 55 +- .../prometheus/prometheus/scrape/target.go | 17 +- .../prometheus/prometheus/storage/buffer.go | 600 +++++- .../prometheus/prometheus/storage/fanout.go | 5 +- .../prometheus/storage/interface.go | 6 +- .../prometheus/prometheus/storage/merge.go | 18 +- .../prometheus/prometheus/storage/series.go | 23 +- .../prometheus/tsdb/chunkenc/bstream.go | 6 +- .../prometheus/tsdb/chunkenc/chunk.go | 21 +- .../tsdb/chunkenc/float_histogram.go | 4 +- .../prometheus/tsdb/chunkenc/histogram.go | 8 +- .../prometheus/tsdb/chunkenc/varbit.go | 2 +- .../prometheus/tsdb/chunkenc/xor.go | 28 +- .../tsdb/chunks/chunk_write_queue.go | 5 +- .../prometheus/tsdb/chunks/head_chunks.go | 54 +- .../prometheus/tsdb/errors/errors.go | 14 + .../prometheus/tsdb/tsdbutil/chunks.go | 14 +- .../prometheus/util/testutil/context.go | 2 +- .../prometheus/util/testutil/roundtrip.go | 2 +- .../prometheus/util/treecache/treecache.go | 18 +- .../api/baremetal/v1/baremetal_sdk.go | 246 ++- .../api/instance/v1/instance_sdk.go | 200 +- .../api/marketplace/v1/marketplace_sdk.go | 6 +- .../scaleway/scaleway-sdk-go/scw/client.go | 4 +- .../scaleway/scaleway-sdk-go/scw/config.go | 4 + .../gopsutil/v3/process/process_darwin.go | 2 - .../gopsutil/v3/process/process_freebsd.go | 2 - .../gopsutil/v3/process/process_linux.go | 2 - .../gopsutil/v3/process/process_openbsd.go | 2 - .../collector/extension/extension.go | 8 + .../collector/otelcol/collector.go | 14 + .../collector/otelcol/configprovider.go | 25 + .../service/extensions/extensions.go | 12 + .../collector/service/service.go | 12 + .../x/oauth2/google/appengine_gen1.go | 1 - .../x/oauth2/google/appengine_gen2_flex.go | 1 - .../x/oauth2/internal/client_appengine.go | 1 - .../api/compute/v1/compute-api.json | 272 ++- .../api/compute/v1/compute-gen.go | 563 ++++- vendor/google.golang.org/api/internal/cba.go | 40 +- .../google.golang.org/api/internal/version.go | 2 +- vendor/modules.txt | 485 ++--- 593 files changed, 19093 insertions(+), 4883 deletions(-) create mode 100644 vendor/github.com/DataDog/datadog-agent/pkg/proto/LICENSE create mode 100644 vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/agent_payload.pb.go create mode 100644 vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/agent_payload_gen.go create mode 100644 vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/agent_payload_vtproto.pb.go create mode 100644 vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/decoder_bytes.go create mode 100644 vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/decoder_v05.go create mode 100644 vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/hook.go create mode 100644 vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/span.pb.go create mode 100644 vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/span_gen.go create mode 100644 vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/span_utils.go create mode 100644 vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/span_vtproto.pb.go create mode 100644 vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/stats.pb.go create mode 100644 vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/stats_gen.go create mode 100644 vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/stats_vtproto.pb.go create mode 100644 vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/trace.go create mode 100644 vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/trace_gen.go create mode 100644 vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/tracer_payload.pb.go create mode 100644 vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/tracer_payload_gen.go create mode 100644 vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/tracer_payload_utils.go create mode 100644 vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/tracer_payload_vtproto.pb.go create mode 100644 vendor/github.com/DataDog/go-tuf/client/file_store.go create mode 100644 vendor/github.com/DataDog/go-tuf/pkg/keys/deprecated_ecdsa.go create mode 100644 vendor/github.com/DataDog/go-tuf/pkg/keys/pkix.go rename vendor/github.com/{Shopify => IBM}/sarama/.gitignore (100%) rename vendor/github.com/{Shopify => IBM}/sarama/.golangci.yml (86%) create mode 100644 vendor/github.com/IBM/sarama/CHANGELOG.md create mode 100644 vendor/github.com/IBM/sarama/CONTRIBUTING.md rename vendor/github.com/{Shopify => IBM}/sarama/Dockerfile.kafka (82%) rename vendor/github.com/{Shopify/sarama/LICENSE => IBM/sarama/LICENSE.md} (95%) rename vendor/github.com/{Shopify => IBM}/sarama/Makefile (100%) rename vendor/github.com/{Shopify => IBM}/sarama/README.md (72%) rename vendor/github.com/{Shopify => IBM}/sarama/Vagrantfile (100%) rename vendor/github.com/{Shopify => IBM}/sarama/acl_bindings.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/acl_create_request.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/acl_create_response.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/acl_delete_request.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/acl_delete_response.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/acl_describe_request.go (93%) rename vendor/github.com/{Shopify => IBM}/sarama/acl_describe_response.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/acl_filter.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/acl_types.go (93%) rename vendor/github.com/{Shopify => IBM}/sarama/add_offsets_to_txn_request.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/add_offsets_to_txn_response.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/add_partitions_to_txn_request.go (96%) rename vendor/github.com/{Shopify => IBM}/sarama/add_partitions_to_txn_response.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/admin.go (96%) rename vendor/github.com/{Shopify => IBM}/sarama/alter_client_quotas_request.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/alter_client_quotas_response.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/alter_configs_request.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/alter_configs_response.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/alter_partition_reassignments_request.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/alter_partition_reassignments_response.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/alter_user_scram_credentials_request.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/alter_user_scram_credentials_response.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/api_versions_request.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/api_versions_response.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/async_producer.go (99%) rename vendor/github.com/{Shopify => IBM}/sarama/balance_strategy.go (95%) rename vendor/github.com/{Shopify => IBM}/sarama/broker.go (99%) rename vendor/github.com/{Shopify => IBM}/sarama/client.go (98%) rename vendor/github.com/{Shopify => IBM}/sarama/compress.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/config.go (98%) rename vendor/github.com/{Shopify => IBM}/sarama/config_resource_type.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/consumer.go (99%) rename vendor/github.com/{Shopify => IBM}/sarama/consumer_group.go (99%) rename vendor/github.com/{Shopify => IBM}/sarama/consumer_group_members.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/consumer_metadata_request.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/consumer_metadata_response.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/control_record.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/crc32_field.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/create_partitions_request.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/create_partitions_response.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/create_topics_request.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/create_topics_response.go (100%) create mode 100644 vendor/github.com/IBM/sarama/decompress.go rename vendor/github.com/{Shopify => IBM}/sarama/delete_groups_request.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/delete_groups_response.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/delete_offsets_request.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/delete_offsets_response.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/delete_records_request.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/delete_records_response.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/delete_topics_request.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/delete_topics_response.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/describe_client_quotas_request.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/describe_client_quotas_response.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/describe_configs_request.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/describe_configs_response.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/describe_groups_request.go (92%) rename vendor/github.com/{Shopify => IBM}/sarama/describe_groups_response.go (98%) rename vendor/github.com/{Shopify => IBM}/sarama/describe_log_dirs_request.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/describe_log_dirs_response.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/describe_user_scram_credentials_request.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/describe_user_scram_credentials_response.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/dev.yml (100%) rename vendor/github.com/{Shopify => IBM}/sarama/docker-compose.yml (96%) rename vendor/github.com/{Shopify => IBM}/sarama/encoder_decoder.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/end_txn_request.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/end_txn_response.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/entrypoint.sh (94%) rename vendor/github.com/{Shopify => IBM}/sarama/errors.go (99%) rename vendor/github.com/{Shopify => IBM}/sarama/fetch_request.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/fetch_response.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/find_coordinator_request.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/find_coordinator_response.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/gssapi_kerberos.go (99%) rename vendor/github.com/{Shopify => IBM}/sarama/heartbeat_request.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/heartbeat_response.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/incremental_alter_configs_request.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/incremental_alter_configs_response.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/init_producer_id_request.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/init_producer_id_response.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/interceptors.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/join_group_request.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/join_group_response.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/kerberos_client.go (79%) rename vendor/github.com/{Shopify => IBM}/sarama/leave_group_request.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/leave_group_response.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/length_field.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/list_groups_request.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/list_groups_response.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/list_partition_reassignments_request.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/list_partition_reassignments_response.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/message.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/message_set.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/metadata_request.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/metadata_response.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/metrics.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/mockbroker.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/mockkerberos.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/mockresponses.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/offset_commit_request.go (95%) rename vendor/github.com/{Shopify => IBM}/sarama/offset_commit_response.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/offset_fetch_request.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/offset_fetch_response.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/offset_manager.go (98%) rename vendor/github.com/{Shopify => IBM}/sarama/offset_request.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/offset_response.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/packet_decoder.go (98%) rename vendor/github.com/{Shopify => IBM}/sarama/packet_encoder.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/partitioner.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/prep_encoder.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/produce_request.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/produce_response.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/produce_set.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/quota_types.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/real_decoder.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/real_encoder.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/record.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/record_batch.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/records.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/request.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/response_header.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/sarama.go (99%) rename vendor/github.com/{Shopify => IBM}/sarama/sasl_authenticate_request.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/sasl_authenticate_response.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/sasl_handshake_request.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/sasl_handshake_response.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/scram_formatter.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/sticky_assignor_user_data.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/sync_group_request.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/sync_group_response.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/sync_producer.go (98%) rename vendor/github.com/{Shopify => IBM}/sarama/timestamp.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/transaction_manager.go (99%) rename vendor/github.com/{Shopify => IBM}/sarama/txn_offset_commit_request.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/txn_offset_commit_response.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/utils.go (98%) rename vendor/github.com/{Shopify => IBM}/sarama/version.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/zstd.go (100%) delete mode 100644 vendor/github.com/Shopify/sarama/CHANGELOG.md delete mode 100644 vendor/github.com/Shopify/sarama/decompress.go create mode 100644 vendor/github.com/gophercloud/gophercloud/RELEASE.md rename vendor/github.com/grpc-ecosystem/grpc-gateway/v2/{LICENSE.txt => LICENSE} (100%) create mode 100644 vendor/github.com/hetznercloud/hcloud-go/hcloud/architecture.go delete mode 100644 vendor/github.com/ionos-cloud/sdk-go/v6/model_lan_post.go delete mode 100644 vendor/github.com/ionos-cloud/sdk-go/v6/model_lan_properties_post.go create mode 100644 vendor/github.com/linode/linodego/profile_logins.go rename vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/{ => internal}/protocol/metric_translator.go (98%) rename vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/{ => internal}/protocol/parser.go (90%) rename vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/{ => internal}/protocol/statsd_parser.go (99%) rename vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/{ => internal}/transport/mock_reporter.go (94%) rename vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/{ => internal}/transport/server.go (95%) rename vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/{ => internal}/transport/udp_server.go (94%) delete mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/artifact.go diff --git a/vendor/cloud.google.com/go/compute/internal/version.go b/vendor/cloud.google.com/go/compute/internal/version.go index e939b9f5e..639553700 100644 --- a/vendor/cloud.google.com/go/compute/internal/version.go +++ b/vendor/cloud.google.com/go/compute/internal/version.go @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.20.1" +const Version = "1.23.0" diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go index c90209a94..2a24ab80c 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go @@ -127,6 +127,9 @@ type TokenRefreshCallback func(Token) error // TokenRefresh is a type representing a custom callback to refresh a token type TokenRefresh func(ctx context.Context, resource string) (*Token, error) +// JWTCallback is the type representing callback that will be called to get the federated OIDC JWT +type JWTCallback func() (string, error) + // Token encapsulates the access token used to authorize Azure requests. // https://docs.microsoft.com/en-us/azure/active-directory/develop/v1-oauth2-client-creds-grant-flow#service-to-service-access-token-response type Token struct { @@ -367,14 +370,18 @@ func (secret ServicePrincipalAuthorizationCodeSecret) MarshalJSON() ([]byte, err // ServicePrincipalFederatedSecret implements ServicePrincipalSecret for Federated JWTs. type ServicePrincipalFederatedSecret struct { - jwt string + jwtCallback JWTCallback } // SetAuthenticationValues is a method of the interface ServicePrincipalSecret. // It will populate the form submitted during OAuth Token Acquisition using a JWT signed by an OIDC issuer. -func (secret *ServicePrincipalFederatedSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { +func (secret *ServicePrincipalFederatedSecret) SetAuthenticationValues(_ *ServicePrincipalToken, v *url.Values) error { + jwt, err := secret.jwtCallback() + if err != nil { + return err + } - v.Set("client_assertion", secret.jwt) + v.Set("client_assertion", jwt) v.Set("client_assertion_type", "urn:ietf:params:oauth:client-assertion-type:jwt-bearer") return nil } @@ -687,6 +694,8 @@ func NewServicePrincipalTokenFromAuthorizationCode(oauthConfig OAuthConfig, clie } // NewServicePrincipalTokenFromFederatedToken creates a ServicePrincipalToken from the supplied federated OIDC JWT. +// +// Deprecated: Use NewServicePrincipalTokenFromFederatedTokenWithCallback to refresh jwt dynamically. func NewServicePrincipalTokenFromFederatedToken(oauthConfig OAuthConfig, clientID string, jwt string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { if err := validateOAuthConfig(oauthConfig); err != nil { return nil, err @@ -700,12 +709,37 @@ func NewServicePrincipalTokenFromFederatedToken(oauthConfig OAuthConfig, clientI if jwt == "" { return nil, fmt.Errorf("parameter 'jwt' cannot be empty") } + return NewServicePrincipalTokenFromFederatedTokenCallback( + oauthConfig, + clientID, + func() (string, error) { + return jwt, nil + }, + resource, + callbacks..., + ) +} + +// NewServicePrincipalTokenFromFederatedTokenCallback creates a ServicePrincipalToken from the supplied federated OIDC JWTCallback. +func NewServicePrincipalTokenFromFederatedTokenCallback(oauthConfig OAuthConfig, clientID string, jwtCallback JWTCallback, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if jwtCallback == nil { + return nil, fmt.Errorf("parameter 'jwtCallback' cannot be empty") + } return NewServicePrincipalTokenWithSecret( oauthConfig, clientID, resource, &ServicePrincipalFederatedSecret{ - jwt: jwt, + jwtCallback: jwtCallback, }, callbacks..., ) diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/proto/LICENSE b/vendor/github.com/DataDog/datadog-agent/pkg/proto/LICENSE new file mode 100644 index 000000000..b370545be --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/proto/LICENSE @@ -0,0 +1,200 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016-present Datadog, Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/agent_payload.pb.go b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/agent_payload.pb.go new file mode 100644 index 000000000..49352db01 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/agent_payload.pb.go @@ -0,0 +1,240 @@ +// protoc -I. -I$GOPATH/src --gogofaster_out=. span.proto tracer_payload.proto agent_payload.proto + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.5 +// source: datadog/trace/agent_payload.proto + +package trace + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// AgentPayload represents payload the agent sends to the intake. +type AgentPayload struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // hostName specifies hostname of where the agent is running. + HostName string `protobuf:"bytes,1,opt,name=hostName,proto3" json:"hostName,omitempty"` + // env specifies `env` set in agent configuration. + Env string `protobuf:"bytes,2,opt,name=env,proto3" json:"env,omitempty"` + // tracerPayloads specifies list of the payloads received from tracers. + TracerPayloads []*TracerPayload `protobuf:"bytes,5,rep,name=tracerPayloads,proto3" json:"tracerPayloads,omitempty"` + // tags specifies tags common in all `tracerPayloads`. + Tags map[string]string `protobuf:"bytes,6,rep,name=tags,proto3" json:"tags,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // agentVersion specifies version of the agent. + AgentVersion string `protobuf:"bytes,7,opt,name=agentVersion,proto3" json:"agentVersion,omitempty"` + // targetTPS holds `TargetTPS` value in AgentConfig. + TargetTPS float64 `protobuf:"fixed64,8,opt,name=targetTPS,proto3" json:"targetTPS,omitempty"` + // errorTPS holds `ErrorTPS` value in AgentConfig. + ErrorTPS float64 `protobuf:"fixed64,9,opt,name=errorTPS,proto3" json:"errorTPS,omitempty"` + // rareSamplerEnabled holds `RareSamplerEnabled` value in AgentConfig + RareSamplerEnabled bool `protobuf:"varint,10,opt,name=rareSamplerEnabled,proto3" json:"rareSamplerEnabled,omitempty"` +} + +func (x *AgentPayload) Reset() { + *x = AgentPayload{} + if protoimpl.UnsafeEnabled { + mi := &file_datadog_trace_agent_payload_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AgentPayload) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AgentPayload) ProtoMessage() {} + +func (x *AgentPayload) ProtoReflect() protoreflect.Message { + mi := &file_datadog_trace_agent_payload_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AgentPayload.ProtoReflect.Descriptor instead. +func (*AgentPayload) Descriptor() ([]byte, []int) { + return file_datadog_trace_agent_payload_proto_rawDescGZIP(), []int{0} +} + +func (x *AgentPayload) GetHostName() string { + if x != nil { + return x.HostName + } + return "" +} + +func (x *AgentPayload) GetEnv() string { + if x != nil { + return x.Env + } + return "" +} + +func (x *AgentPayload) GetTracerPayloads() []*TracerPayload { + if x != nil { + return x.TracerPayloads + } + return nil +} + +func (x *AgentPayload) GetTags() map[string]string { + if x != nil { + return x.Tags + } + return nil +} + +func (x *AgentPayload) GetAgentVersion() string { + if x != nil { + return x.AgentVersion + } + return "" +} + +func (x *AgentPayload) GetTargetTPS() float64 { + if x != nil { + return x.TargetTPS + } + return 0 +} + +func (x *AgentPayload) GetErrorTPS() float64 { + if x != nil { + return x.ErrorTPS + } + return 0 +} + +func (x *AgentPayload) GetRareSamplerEnabled() bool { + if x != nil { + return x.RareSamplerEnabled + } + return false +} + +var File_datadog_trace_agent_payload_proto protoreflect.FileDescriptor + +var file_datadog_trace_agent_payload_proto_rawDesc = []byte{ + 0x0a, 0x21, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, + 0x61, 0x67, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x74, 0x72, 0x61, + 0x63, 0x65, 0x1a, 0x22, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, + 0x65, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x84, 0x03, 0x0a, 0x0c, 0x41, 0x67, 0x65, 0x6e, 0x74, + 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x4e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x76, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x65, 0x6e, 0x76, 0x12, 0x44, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x50, + 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x54, 0x72, + 0x61, 0x63, 0x65, 0x72, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x0e, 0x74, 0x72, 0x61, + 0x63, 0x65, 0x72, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x12, 0x39, 0x0a, 0x04, 0x74, + 0x61, 0x67, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x64, 0x61, 0x74, 0x61, + 0x64, 0x6f, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x50, + 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x22, 0x0a, 0x0c, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x67, + 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x61, + 0x72, 0x67, 0x65, 0x74, 0x54, 0x50, 0x53, 0x18, 0x08, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x74, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x50, 0x53, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x54, 0x50, 0x53, 0x18, 0x09, 0x20, 0x01, 0x28, 0x01, 0x52, 0x08, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x54, 0x50, 0x53, 0x12, 0x2e, 0x0a, 0x12, 0x72, 0x61, 0x72, 0x65, 0x53, 0x61, 0x6d, 0x70, + 0x6c, 0x65, 0x72, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x12, 0x72, 0x61, 0x72, 0x65, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, 0x45, 0x6e, 0x61, + 0x62, 0x6c, 0x65, 0x64, 0x1a, 0x37, 0x0a, 0x09, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x16, 0x5a, + 0x14, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x67, 0x6f, 0x2f, + 0x74, 0x72, 0x61, 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_datadog_trace_agent_payload_proto_rawDescOnce sync.Once + file_datadog_trace_agent_payload_proto_rawDescData = file_datadog_trace_agent_payload_proto_rawDesc +) + +func file_datadog_trace_agent_payload_proto_rawDescGZIP() []byte { + file_datadog_trace_agent_payload_proto_rawDescOnce.Do(func() { + file_datadog_trace_agent_payload_proto_rawDescData = protoimpl.X.CompressGZIP(file_datadog_trace_agent_payload_proto_rawDescData) + }) + return file_datadog_trace_agent_payload_proto_rawDescData +} + +var file_datadog_trace_agent_payload_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_datadog_trace_agent_payload_proto_goTypes = []interface{}{ + (*AgentPayload)(nil), // 0: datadog.trace.AgentPayload + nil, // 1: datadog.trace.AgentPayload.TagsEntry + (*TracerPayload)(nil), // 2: datadog.trace.TracerPayload +} +var file_datadog_trace_agent_payload_proto_depIdxs = []int32{ + 2, // 0: datadog.trace.AgentPayload.tracerPayloads:type_name -> datadog.trace.TracerPayload + 1, // 1: datadog.trace.AgentPayload.tags:type_name -> datadog.trace.AgentPayload.TagsEntry + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_datadog_trace_agent_payload_proto_init() } +func file_datadog_trace_agent_payload_proto_init() { + if File_datadog_trace_agent_payload_proto != nil { + return + } + file_datadog_trace_tracer_payload_proto_init() + if !protoimpl.UnsafeEnabled { + file_datadog_trace_agent_payload_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AgentPayload); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_datadog_trace_agent_payload_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_datadog_trace_agent_payload_proto_goTypes, + DependencyIndexes: file_datadog_trace_agent_payload_proto_depIdxs, + MessageInfos: file_datadog_trace_agent_payload_proto_msgTypes, + }.Build() + File_datadog_trace_agent_payload_proto = out.File + file_datadog_trace_agent_payload_proto_rawDesc = nil + file_datadog_trace_agent_payload_proto_goTypes = nil + file_datadog_trace_agent_payload_proto_depIdxs = nil +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/agent_payload_gen.go b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/agent_payload_gen.go new file mode 100644 index 000000000..26cefad58 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/agent_payload_gen.go @@ -0,0 +1,200 @@ +package trace + +// Code generated by github.com/tinylib/msgp DO NOT EDIT. + +import ( + "github.com/tinylib/msgp/msgp" +) + +// MarshalMsg implements msgp.Marshaler +func (z *AgentPayload) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 8 + // string "HostName" + o = append(o, 0x88, 0xa8, 0x48, 0x6f, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65) + o = msgp.AppendString(o, z.HostName) + // string "Env" + o = append(o, 0xa3, 0x45, 0x6e, 0x76) + o = msgp.AppendString(o, z.Env) + // string "TracerPayloads" + o = append(o, 0xae, 0x54, 0x72, 0x61, 0x63, 0x65, 0x72, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.TracerPayloads))) + for za0001 := range z.TracerPayloads { + if z.TracerPayloads[za0001] == nil { + o = msgp.AppendNil(o) + } else { + o, err = z.TracerPayloads[za0001].MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "TracerPayloads", za0001) + return + } + } + } + // string "Tags" + o = append(o, 0xa4, 0x54, 0x61, 0x67, 0x73) + o = msgp.AppendMapHeader(o, uint32(len(z.Tags))) + for za0002, za0003 := range z.Tags { + o = msgp.AppendString(o, za0002) + o = msgp.AppendString(o, za0003) + } + // string "AgentVersion" + o = append(o, 0xac, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + o = msgp.AppendString(o, z.AgentVersion) + // string "TargetTPS" + o = append(o, 0xa9, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x50, 0x53) + o = msgp.AppendFloat64(o, z.TargetTPS) + // string "ErrorTPS" + o = append(o, 0xa8, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x54, 0x50, 0x53) + o = msgp.AppendFloat64(o, z.ErrorTPS) + // string "RareSamplerEnabled" + o = append(o, 0xb2, 0x52, 0x61, 0x72, 0x65, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64) + o = msgp.AppendBool(o, z.RareSamplerEnabled) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *AgentPayload) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "HostName": + z.HostName, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "HostName") + return + } + case "Env": + z.Env, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Env") + return + } + case "TracerPayloads": + var zb0002 uint32 + zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "TracerPayloads") + return + } + if cap(z.TracerPayloads) >= int(zb0002) { + z.TracerPayloads = (z.TracerPayloads)[:zb0002] + } else { + z.TracerPayloads = make([]*TracerPayload, zb0002) + } + for za0001 := range z.TracerPayloads { + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.TracerPayloads[za0001] = nil + } else { + if z.TracerPayloads[za0001] == nil { + z.TracerPayloads[za0001] = new(TracerPayload) + } + bts, err = z.TracerPayloads[za0001].UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "TracerPayloads", za0001) + return + } + } + } + case "Tags": + var zb0003 uint32 + zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Tags") + return + } + if z.Tags == nil { + z.Tags = make(map[string]string, zb0003) + } else if len(z.Tags) > 0 { + for key := range z.Tags { + delete(z.Tags, key) + } + } + for zb0003 > 0 { + var za0002 string + var za0003 string + zb0003-- + za0002, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Tags") + return + } + za0003, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Tags", za0002) + return + } + z.Tags[za0002] = za0003 + } + case "AgentVersion": + z.AgentVersion, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "AgentVersion") + return + } + case "TargetTPS": + z.TargetTPS, bts, err = msgp.ReadFloat64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "TargetTPS") + return + } + case "ErrorTPS": + z.ErrorTPS, bts, err = msgp.ReadFloat64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "ErrorTPS") + return + } + case "RareSamplerEnabled": + z.RareSamplerEnabled, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "RareSamplerEnabled") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *AgentPayload) Msgsize() (s int) { + s = 1 + 9 + msgp.StringPrefixSize + len(z.HostName) + 4 + msgp.StringPrefixSize + len(z.Env) + 15 + msgp.ArrayHeaderSize + for za0001 := range z.TracerPayloads { + if z.TracerPayloads[za0001] == nil { + s += msgp.NilSize + } else { + s += z.TracerPayloads[za0001].Msgsize() + } + } + s += 5 + msgp.MapHeaderSize + if z.Tags != nil { + for za0002, za0003 := range z.Tags { + _ = za0003 + s += msgp.StringPrefixSize + len(za0002) + msgp.StringPrefixSize + len(za0003) + } + } + s += 13 + msgp.StringPrefixSize + len(z.AgentVersion) + 10 + msgp.Float64Size + 9 + msgp.Float64Size + 19 + msgp.BoolSize + return +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/agent_payload_vtproto.pb.go b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/agent_payload_vtproto.pb.go new file mode 100644 index 000000000..e4d4f171b --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/agent_payload_vtproto.pb.go @@ -0,0 +1,523 @@ +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// protoc-gen-go-vtproto version: v0.4.0 +// source: datadog/trace/agent_payload.proto + +package trace + +import ( + binary "encoding/binary" + fmt "fmt" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + io "io" + math "math" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *AgentPayload) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AgentPayload) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *AgentPayload) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.RareSamplerEnabled { + i-- + if m.RareSamplerEnabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 + } + if m.ErrorTPS != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ErrorTPS)))) + i-- + dAtA[i] = 0x49 + } + if m.TargetTPS != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.TargetTPS)))) + i-- + dAtA[i] = 0x41 + } + if len(m.AgentVersion) > 0 { + i -= len(m.AgentVersion) + copy(dAtA[i:], m.AgentVersion) + i = encodeVarint(dAtA, i, uint64(len(m.AgentVersion))) + i-- + dAtA[i] = 0x3a + } + if len(m.Tags) > 0 { + for k := range m.Tags { + v := m.Tags[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarint(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x32 + } + } + if len(m.TracerPayloads) > 0 { + for iNdEx := len(m.TracerPayloads) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.TracerPayloads[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + } + if len(m.Env) > 0 { + i -= len(m.Env) + copy(dAtA[i:], m.Env) + i = encodeVarint(dAtA, i, uint64(len(m.Env))) + i-- + dAtA[i] = 0x12 + } + if len(m.HostName) > 0 { + i -= len(m.HostName) + copy(dAtA[i:], m.HostName) + i = encodeVarint(dAtA, i, uint64(len(m.HostName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AgentPayload) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.HostName) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Env) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.TracerPayloads) > 0 { + for _, e := range m.TracerPayloads { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if len(m.Tags) > 0 { + for k, v := range m.Tags { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + 1 + len(v) + sov(uint64(len(v))) + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + l = len(m.AgentVersion) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.TargetTPS != 0 { + n += 9 + } + if m.ErrorTPS != 0 { + n += 9 + } + if m.RareSamplerEnabled { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *AgentPayload) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AgentPayload: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AgentPayload: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HostName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TracerPayloads", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TracerPayloads = append(m.TracerPayloads, &TracerPayload{}) + if err := m.TracerPayloads[len(m.TracerPayloads)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Tags == nil { + m.Tags = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLength + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLength + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Tags[mapkey] = mapvalue + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AgentVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AgentVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetTPS", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.TargetTPS = float64(math.Float64frombits(v)) + case 9: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field ErrorTPS", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.ErrorTPS = float64(math.Float64frombits(v)) + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RareSamplerEnabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RareSamplerEnabled = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/decoder_bytes.go b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/decoder_bytes.go new file mode 100644 index 000000000..50bdc4966 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/decoder_bytes.go @@ -0,0 +1,274 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package trace + +import ( + "bytes" + "errors" + "math" + "strings" + "unicode/utf8" + + "github.com/tinylib/msgp/msgp" +) + +// repairUTF8 ensures all characters in s are UTF-8 by replacing non-UTF-8 characters +// with the replacement char � +func repairUTF8(s string) string { + in := strings.NewReader(s) + var out bytes.Buffer + out.Grow(len(s)) + + for { + r, _, err := in.ReadRune() + if err != nil { + // note: by contract, if `in` contains non-valid utf-8, no error is returned. Rather the utf-8 replacement + // character is returned. Therefore, the only error should usually be io.EOF indicating end of string. + // If any other error is returned by chance, we quit as well, outputting whatever part of the string we + // had already constructed. + return out.String() + } + out.WriteRune(r) + } +} + +// parseStringBytes reads the next type in the msgpack payload and +// converts the BinType or the StrType in a valid string. +func parseStringBytes(bts []byte) (string, []byte, error) { + if msgp.IsNil(bts) { + bts, err := msgp.ReadNilBytes(bts) + return "", bts, err + } + // read the generic representation type without decoding + t := msgp.NextType(bts) + + var ( + err error + i []byte + ) + switch t { + case msgp.BinType: + i, bts, err = msgp.ReadBytesZC(bts) + case msgp.StrType: + i, bts, err = msgp.ReadStringZC(bts) + default: + return "", bts, msgp.TypeError{Encoded: t, Method: msgp.StrType} + } + if err != nil { + return "", bts, err + } + if utf8.Valid(i) { + return string(i), bts, nil + } + return repairUTF8(msgp.UnsafeString(i)), bts, nil +} + +// parseFloat64Bytes parses a float64 even if the sent value is an int64 or an uint64; +// this is required because the encoding library could remove bytes from the encoded +// payload to reduce the size, if they're not needed. +func parseFloat64Bytes(bts []byte) (float64, []byte, error) { + if msgp.IsNil(bts) { + bts, err := msgp.ReadNilBytes(bts) + return 0, bts, err + } + // read the generic representation type without decoding + t := msgp.NextType(bts) + + var err error + switch t { + case msgp.IntType: + var i int64 + i, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + return 0, bts, err + } + + return float64(i), bts, nil + case msgp.UintType: + var i uint64 + i, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + return 0, bts, err + } + + return float64(i), bts, nil + case msgp.Float64Type: + var f float64 + f, bts, err = msgp.ReadFloat64Bytes(bts) + if err != nil { + return 0, bts, err + } + + return f, bts, nil + default: + return 0, bts, msgp.TypeError{Encoded: t, Method: msgp.Float64Type} + } +} + +// cast to int64 values that are int64 but that are sent in uint64 +// over the wire. Set to 0 if they overflow the MaxInt64 size. This +// cast should be used ONLY while decoding int64 values that are +// sent as uint64 to reduce the payload size, otherwise the approach +// is not correct in the general sense. +func castInt64(v uint64) (int64, bool) { + if v > math.MaxInt64 { + return 0, false + } + return int64(v), true +} + +// parseInt64Bytes parses an int64 even if the sent value is an uint64; +// this is required because the encoding library could remove bytes from the encoded +// payload to reduce the size, if they're not needed. +func parseInt64Bytes(bts []byte) (int64, []byte, error) { + if msgp.IsNil(bts) { + bts, err := msgp.ReadNilBytes(bts) + return 0, bts, err + } + // read the generic representation type without decoding + t := msgp.NextType(bts) + + var ( + i int64 + u uint64 + err error + ) + switch t { + case msgp.IntType: + i, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + return 0, bts, err + } + return i, bts, nil + case msgp.UintType: + u, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + return 0, bts, err + } + + // force-cast + i, ok := castInt64(u) + if !ok { + return 0, bts, errors.New("found uint64, overflows int64") + } + return i, bts, nil + default: + return 0, bts, msgp.TypeError{Encoded: t, Method: msgp.IntType} + } +} + +// parseUint64Bytes parses an uint64 even if the sent value is an int64; +// this is required because the language used for the encoding library +// may not have unsigned types. An example is early version of Java +// (and so JRuby interpreter) that encodes uint64 as int64: +// http://docs.oracle.com/javase/tutorial/java/nutsandbolts/datatypes.html +func parseUint64Bytes(bts []byte) (uint64, []byte, error) { + if msgp.IsNil(bts) { + bts, err := msgp.ReadNilBytes(bts) + return 0, bts, err + } + // read the generic representation type without decoding + t := msgp.NextType(bts) + + var ( + i int64 + u uint64 + err error + ) + switch t { + case msgp.UintType: + u, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + return 0, bts, err + } + return u, bts, err + case msgp.IntType: + i, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + return 0, bts, err + } + return uint64(i), bts, nil + default: + return 0, bts, msgp.TypeError{Encoded: t, Method: msgp.IntType} + } +} + +// cast to int32 values that are int32 but that are sent in uint32 +// over the wire. Set to 0 if they overflow the MaxInt32 size. This +// cast should be used ONLY while decoding int32 values that are +// sent as uint32 to reduce the payload size, otherwise the approach +// is not correct in the general sense. +func castInt32(v uint32) (int32, bool) { + if v > math.MaxInt32 { + return 0, false + } + return int32(v), true +} + +// parseInt32Bytes parses an int32 even if the sent value is an uint32; +// this is required because the encoding library could remove bytes from the encoded +// payload to reduce the size, if they're not needed. +func parseInt32Bytes(bts []byte) (int32, []byte, error) { + if msgp.IsNil(bts) { + bts, err := msgp.ReadNilBytes(bts) + return 0, bts, err + } + // read the generic representation type without decoding + t := msgp.NextType(bts) + + var ( + i int32 + u uint32 + err error + ) + switch t { + case msgp.IntType: + i, bts, err = msgp.ReadInt32Bytes(bts) + if err != nil { + return 0, bts, err + } + return i, bts, nil + case msgp.UintType: + u, bts, err = msgp.ReadUint32Bytes(bts) + if err != nil { + return 0, bts, err + } + + // force-cast + i, ok := castInt32(u) + if !ok { + return 0, bts, errors.New("found uint32, overflows int32") + } + return i, bts, nil + default: + return 0, bts, msgp.TypeError{Encoded: t, Method: msgp.IntType} + } +} + +// parseBytes reads the next BinType in the msgpack payload. +// +//nolint:unused // potentially useful; was used with prior proto definitions +func parseBytes(bts []byte) ([]byte, []byte, error) { + if msgp.IsNil(bts) { + bts, err := msgp.ReadNilBytes(bts) + return nil, bts, err + } + // read the generic representation type without decoding + t := msgp.NextType(bts) + + switch t { + case msgp.BinType: + unsafeBytes, bts, err := msgp.ReadBytesZC(bts) + if err != nil { + return nil, bts, err + } + safeBytes := make([]byte, len(unsafeBytes)) + copy(safeBytes, unsafeBytes) + return safeBytes, bts, nil + default: + return nil, bts, msgp.TypeError{Encoded: t, Method: msgp.BinType} + } +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/decoder_v05.go b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/decoder_v05.go new file mode 100644 index 000000000..b35f81692 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/decoder_v05.go @@ -0,0 +1,220 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package trace + +import ( + "errors" + "fmt" + + "github.com/tinylib/msgp/msgp" +) + +// dictionaryString reads an int from decoder dc and returns the string +// at that index from dict. +func dictionaryString(bts []byte, dict []string) (string, []byte, error) { + var ( + ui uint32 + err error + ) + ui, bts, err = msgp.ReadUint32Bytes(bts) + if err != nil { + return "", bts, err + } + idx := int(ui) + if idx >= len(dict) { + return "", bts, fmt.Errorf("dictionary index %d out of range", idx) + } + return dict[idx], bts, nil +} + +// UnmarshalMsgDictionary decodes a trace using the specification from the v0.5 endpoint. +// For details, see the documentation for endpoint v0.5 in pkg/trace/api/version.go +func (t *Traces) UnmarshalMsgDictionary(bts []byte) error { + var err error + if _, bts, err = msgp.ReadArrayHeaderBytes(bts); err != nil { + return err + } + // read dictionary + var sz uint32 + if sz, bts, err = msgp.ReadArrayHeaderBytes(bts); err != nil { + return err + } + if sz > 25*1e6 { // Dictionary can't be larger than 25 MB + return errors.New("too long payload") + } + dict := make([]string, sz) + for i := range dict { + var str string + str, bts, err = parseStringBytes(bts) + if err != nil { + return err + } + dict[i] = str + } + // read traces + sz, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + return err + } + if cap(*t) >= int(sz) { + *t = (*t)[:sz] + } else { + *t = make(Traces, sz) + } + for i := range *t { + sz, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + return err + } + if cap((*t)[i]) >= int(sz) { + (*t)[i] = (*t)[i][:sz] + } else { + (*t)[i] = make(Trace, sz) + } + for j := range (*t)[i] { + if (*t)[i][j] == nil { + (*t)[i][j] = new(Span) + } + if bts, err = (*t)[i][j].UnmarshalMsgDictionary(bts, dict); err != nil { + return err + } + } + } + return nil +} + +// spanPropertyCount specifies the number of top-level properties that a span +// has. +const spanPropertyCount = 12 + +// UnmarshalMsgDictionary decodes a span from the given decoder dc, looking up strings +// in the given dictionary dict. For details, see the documentation for endpoint v0.5 +// in pkg/trace/api/version.go +func (z *Span) UnmarshalMsgDictionary(bts []byte, dict []string) ([]byte, error) { + var ( + sz uint32 + err error + ) + sz, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + return bts, err + } + if sz != spanPropertyCount { + return bts, errors.New("encoded span needs exactly 12 elements in array") + } + // Service (0) + z.Service, bts, err = dictionaryString(bts, dict) + if err != nil { + return bts, err + } + // Name (1) + z.Name, bts, err = dictionaryString(bts, dict) + if err != nil { + return bts, err + } + // Resource (2) + z.Resource, bts, err = dictionaryString(bts, dict) + if err != nil { + return bts, err + } + // TraceID (3) + z.TraceID, bts, err = parseUint64Bytes(bts) + if err != nil { + return bts, err + } + // SpanID (4) + z.SpanID, bts, err = parseUint64Bytes(bts) + if err != nil { + return bts, err + } + // ParentID (5) + z.ParentID, bts, err = parseUint64Bytes(bts) + if err != nil { + return bts, err + } + // Start (6) + z.Start, bts, err = parseInt64Bytes(bts) + if err != nil { + return bts, err + } + // Duration (7) + z.Duration, bts, err = parseInt64Bytes(bts) + if err != nil { + return bts, err + } + // Error (8) + z.Error, bts, err = parseInt32Bytes(bts) + if err != nil { + return bts, err + } + // Meta (9) + sz, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + return bts, err + } + if sz > 25*1e6 { // Dictionary can't be larger than 25 MB + return bts, errors.New("too long payload") + } + if z.Meta == nil && sz > 0 { + z.Meta = make(map[string]string, sz) + } else if len(z.Meta) > 0 { + for key := range z.Meta { + delete(z.Meta, key) + } + } + hook, hookok := MetaHook() + for sz > 0 { + sz-- + var key, val string + key, bts, err = dictionaryString(bts, dict) + if err != nil { + return bts, err + } + val, bts, err = dictionaryString(bts, dict) + if err != nil { + return bts, err + } + if hookok { + z.Meta[key] = hook(key, val) + } else { + z.Meta[key] = val + } + } + // Metrics (10) + sz, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + return bts, err + } + if z.Metrics == nil && sz > 0 { + z.Metrics = make(map[string]float64, sz) + } else if len(z.Metrics) > 0 { + for key := range z.Metrics { + delete(z.Metrics, key) + } + } + for sz > 0 { + sz-- + var ( + key string + val float64 + ) + key, bts, err = dictionaryString(bts, dict) + if err != nil { + return bts, err + } + val, bts, err = parseFloat64Bytes(bts) + if err != nil { + return bts, err + } + z.Metrics[key] = val + } + // Type (11) + z.Type, bts, err = dictionaryString(bts, dict) + if err != nil { + return bts, err + } + return bts, nil +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/hook.go b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/hook.go new file mode 100644 index 000000000..969f3daa9 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/hook.go @@ -0,0 +1,33 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package trace + +import ( + "sync" +) + +var ( + mu sync.RWMutex // guards metahook + metahook func(_, v string) string +) + +// SetMetaHook registers a callback which will run upon decoding each map +// entry in the span's Meta field. The hook has the opportunity to alter the +// value that is assigned to span.Meta[k] at decode time. By default, if no +// hook is defined, the behaviour is span.Meta[k] = v. +func SetMetaHook(hook func(k, v string) string) { + mu.Lock() + defer mu.Unlock() + metahook = hook +} + +// MetaHook returns the active meta hook. A MetaHook is a function which is ran +// for each span.Meta[k] = v value and has the opportunity to alter the final v. +func MetaHook() (hook func(k, v string) string, ok bool) { + mu.RLock() + defer mu.RUnlock() + return metahook, metahook != nil +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/span.pb.go b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/span.pb.go new file mode 100644 index 000000000..6ef3f76a6 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/span.pb.go @@ -0,0 +1,307 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.5 +// source: datadog/trace/span.proto + +package trace + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Span struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // service is the name of the service with which this span is associated. + // @gotags: json:"service" msg:"service" + Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service" msg:"service"` + // name is the operation name of this span. + // @gotags: json:"name" msg:"name" + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name" msg:"name"` + // resource is the resource name of this span, also sometimes called the endpoint (for web spans). + // @gotags: json:"resource" msg:"resource" + Resource string `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource" msg:"resource"` + // traceID is the ID of the trace to which this span belongs. + // @gotags: json:"trace_id" msg:"trace_id" + TraceID uint64 `protobuf:"varint,4,opt,name=traceID,proto3" json:"trace_id" msg:"trace_id"` + // spanID is the ID of this span. + // @gotags: json:"span_id" msg:"span_id" + SpanID uint64 `protobuf:"varint,5,opt,name=spanID,proto3" json:"span_id" msg:"span_id"` + // parentID is the ID of this span's parent, or zero if this span has no parent. + // @gotags: json:"parent_id" msg:"parent_id" + ParentID uint64 `protobuf:"varint,6,opt,name=parentID,proto3" json:"parent_id" msg:"parent_id"` + // start is the number of nanoseconds between the Unix epoch and the beginning of this span. + // @gotags: json:"start" msg:"start" + Start int64 `protobuf:"varint,7,opt,name=start,proto3" json:"start" msg:"start"` + // duration is the time length of this span in nanoseconds. + // @gotags: json:"duration" msg:"duration" + Duration int64 `protobuf:"varint,8,opt,name=duration,proto3" json:"duration" msg:"duration"` + // error is 1 if there is an error associated with this span, or 0 if there is not. + // @gotags: json:"error" msg:"error" + Error int32 `protobuf:"varint,9,opt,name=error,proto3" json:"error" msg:"error"` + // meta is a mapping from tag name to tag value for string-valued tags. + // @gotags: json:"meta,omitempty" msg:"meta,omitempty" + Meta map[string]string `protobuf:"bytes,10,rep,name=meta,proto3" json:"meta,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3" msg:"meta,omitempty"` + // metrics is a mapping from tag name to tag value for numeric-valued tags. + // @gotags: json:"metrics,omitempty" msg:"metrics,omitempty" + Metrics map[string]float64 `protobuf:"bytes,11,rep,name=metrics,proto3" json:"metrics,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"fixed64,2,opt,name=value,proto3" msg:"metrics,omitempty"` + // type is the type of the service with which this span is associated. Example values: web, db, lambda. + // @gotags: json:"type" msg:"type" + Type string `protobuf:"bytes,12,opt,name=type,proto3" json:"type" msg:"type"` + // meta_struct is a registry of structured "other" data used by, e.g., AppSec. + // @gotags: json:"meta_struct,omitempty" msg:"meta_struct,omitempty" + MetaStruct map[string][]byte `protobuf:"bytes,13,rep,name=meta_struct,json=metaStruct,proto3" json:"meta_struct,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3" msg:"meta_struct,omitempty"` +} + +func (x *Span) Reset() { + *x = Span{} + if protoimpl.UnsafeEnabled { + mi := &file_datadog_trace_span_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Span) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Span) ProtoMessage() {} + +func (x *Span) ProtoReflect() protoreflect.Message { + mi := &file_datadog_trace_span_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Span.ProtoReflect.Descriptor instead. +func (*Span) Descriptor() ([]byte, []int) { + return file_datadog_trace_span_proto_rawDescGZIP(), []int{0} +} + +func (x *Span) GetService() string { + if x != nil { + return x.Service + } + return "" +} + +func (x *Span) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Span) GetResource() string { + if x != nil { + return x.Resource + } + return "" +} + +func (x *Span) GetTraceID() uint64 { + if x != nil { + return x.TraceID + } + return 0 +} + +func (x *Span) GetSpanID() uint64 { + if x != nil { + return x.SpanID + } + return 0 +} + +func (x *Span) GetParentID() uint64 { + if x != nil { + return x.ParentID + } + return 0 +} + +func (x *Span) GetStart() int64 { + if x != nil { + return x.Start + } + return 0 +} + +func (x *Span) GetDuration() int64 { + if x != nil { + return x.Duration + } + return 0 +} + +func (x *Span) GetError() int32 { + if x != nil { + return x.Error + } + return 0 +} + +func (x *Span) GetMeta() map[string]string { + if x != nil { + return x.Meta + } + return nil +} + +func (x *Span) GetMetrics() map[string]float64 { + if x != nil { + return x.Metrics + } + return nil +} + +func (x *Span) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *Span) GetMetaStruct() map[string][]byte { + if x != nil { + return x.MetaStruct + } + return nil +} + +var File_datadog_trace_span_proto protoreflect.FileDescriptor + +var file_datadog_trace_span_proto_rawDesc = []byte{ + 0x0a, 0x18, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, + 0x73, 0x70, 0x61, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x64, 0x61, 0x74, 0x61, + 0x64, 0x6f, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x22, 0xe3, 0x04, 0x0a, 0x04, 0x53, 0x70, + 0x61, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x18, 0x0a, 0x07, + 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x44, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x74, + 0x72, 0x61, 0x63, 0x65, 0x49, 0x44, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x70, 0x61, 0x6e, 0x49, 0x44, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x73, 0x70, 0x61, 0x6e, 0x49, 0x44, 0x12, 0x1a, + 0x0a, 0x08, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, 0x44, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x08, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, 0x44, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, + 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x12, 0x31, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x1d, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, + 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x04, 0x6d, 0x65, 0x74, 0x61, 0x12, 0x3a, 0x0a, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, + 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, + 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x4d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x73, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x44, 0x0a, 0x0b, 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x73, 0x74, + 0x72, 0x75, 0x63, 0x74, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x64, 0x61, 0x74, + 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, + 0x4d, 0x65, 0x74, 0x61, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x0a, 0x6d, 0x65, 0x74, 0x61, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x1a, 0x37, 0x0a, 0x09, 0x4d, + 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3a, 0x0a, 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x1a, 0x3d, 0x0a, 0x0f, 0x4d, 0x65, 0x74, 0x61, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, + 0x16, 0x5a, 0x14, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x67, + 0x6f, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_datadog_trace_span_proto_rawDescOnce sync.Once + file_datadog_trace_span_proto_rawDescData = file_datadog_trace_span_proto_rawDesc +) + +func file_datadog_trace_span_proto_rawDescGZIP() []byte { + file_datadog_trace_span_proto_rawDescOnce.Do(func() { + file_datadog_trace_span_proto_rawDescData = protoimpl.X.CompressGZIP(file_datadog_trace_span_proto_rawDescData) + }) + return file_datadog_trace_span_proto_rawDescData +} + +var file_datadog_trace_span_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_datadog_trace_span_proto_goTypes = []interface{}{ + (*Span)(nil), // 0: datadog.trace.Span + nil, // 1: datadog.trace.Span.MetaEntry + nil, // 2: datadog.trace.Span.MetricsEntry + nil, // 3: datadog.trace.Span.MetaStructEntry +} +var file_datadog_trace_span_proto_depIdxs = []int32{ + 1, // 0: datadog.trace.Span.meta:type_name -> datadog.trace.Span.MetaEntry + 2, // 1: datadog.trace.Span.metrics:type_name -> datadog.trace.Span.MetricsEntry + 3, // 2: datadog.trace.Span.meta_struct:type_name -> datadog.trace.Span.MetaStructEntry + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_datadog_trace_span_proto_init() } +func file_datadog_trace_span_proto_init() { + if File_datadog_trace_span_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_datadog_trace_span_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Span); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_datadog_trace_span_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_datadog_trace_span_proto_goTypes, + DependencyIndexes: file_datadog_trace_span_proto_depIdxs, + MessageInfos: file_datadog_trace_span_proto_msgTypes, + }.Build() + File_datadog_trace_span_proto = out.File + file_datadog_trace_span_proto_rawDesc = nil + file_datadog_trace_span_proto_goTypes = nil + file_datadog_trace_span_proto_depIdxs = nil +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/span_gen.go b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/span_gen.go new file mode 100644 index 000000000..9789822a9 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/span_gen.go @@ -0,0 +1,361 @@ +package trace + +// Code generated by github.com/tinylib/msgp DO NOT EDIT. + +import ( + "github.com/tinylib/msgp/msgp" +) + +// MarshalMsg implements msgp.Marshaler +func (z *Span) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // omitempty: check for empty values + zb0001Len := uint32(13) + var zb0001Mask uint16 /* 13 bits */ + if z.Meta == nil { + zb0001Len-- + zb0001Mask |= 0x200 + } + if z.Metrics == nil { + zb0001Len-- + zb0001Mask |= 0x400 + } + if z.MetaStruct == nil { + zb0001Len-- + zb0001Mask |= 0x1000 + } + // variable map header, size zb0001Len + o = append(o, 0x80|uint8(zb0001Len)) + if zb0001Len == 0 { + return + } + // string "service" + o = append(o, 0xa7, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65) + o = msgp.AppendString(o, z.Service) + // string "name" + o = append(o, 0xa4, 0x6e, 0x61, 0x6d, 0x65) + o = msgp.AppendString(o, z.Name) + // string "resource" + o = append(o, 0xa8, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65) + o = msgp.AppendString(o, z.Resource) + // string "trace_id" + o = append(o, 0xa8, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64) + o = msgp.AppendUint64(o, z.TraceID) + // string "span_id" + o = append(o, 0xa7, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x69, 0x64) + o = msgp.AppendUint64(o, z.SpanID) + // string "parent_id" + o = append(o, 0xa9, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64) + o = msgp.AppendUint64(o, z.ParentID) + // string "start" + o = append(o, 0xa5, 0x73, 0x74, 0x61, 0x72, 0x74) + o = msgp.AppendInt64(o, z.Start) + // string "duration" + o = append(o, 0xa8, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e) + o = msgp.AppendInt64(o, z.Duration) + // string "error" + o = append(o, 0xa5, 0x65, 0x72, 0x72, 0x6f, 0x72) + o = msgp.AppendInt32(o, z.Error) + if (zb0001Mask & 0x200) == 0 { // if not empty + // string "meta" + o = append(o, 0xa4, 0x6d, 0x65, 0x74, 0x61) + o = msgp.AppendMapHeader(o, uint32(len(z.Meta))) + for za0001, za0002 := range z.Meta { + o = msgp.AppendString(o, za0001) + o = msgp.AppendString(o, za0002) + } + } + if (zb0001Mask & 0x400) == 0 { // if not empty + // string "metrics" + o = append(o, 0xa7, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73) + o = msgp.AppendMapHeader(o, uint32(len(z.Metrics))) + for za0003, za0004 := range z.Metrics { + o = msgp.AppendString(o, za0003) + o = msgp.AppendFloat64(o, za0004) + } + } + // string "type" + o = append(o, 0xa4, 0x74, 0x79, 0x70, 0x65) + o = msgp.AppendString(o, z.Type) + if (zb0001Mask & 0x1000) == 0 { // if not empty + // string "meta_struct" + o = append(o, 0xab, 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74) + o = msgp.AppendMapHeader(o, uint32(len(z.MetaStruct))) + for za0005, za0006 := range z.MetaStruct { + o = msgp.AppendString(o, za0005) + o = msgp.AppendBytes(o, za0006) + } + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *Span) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + hook, hookok := MetaHook() + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "service": + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + z.Service = "" + break + } + z.Service, bts, err = parseStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Service") + return + } + case "name": + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + z.Name = "" + break + } + z.Name, bts, err = parseStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Service") + return + } + case "resource": + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + z.Resource = "" + break + } + z.Resource, bts, err = parseStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Service") + return + } + case "trace_id": + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + z.TraceID = 0 + break + } + z.TraceID, bts, err = parseUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "TraceID") + return + } + case "span_id": + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + z.SpanID = 0 + break + } + z.SpanID, bts, err = parseUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "SpanID") + return + } + case "parent_id": + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + z.ParentID = 0 + break + } + z.ParentID, bts, err = parseUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "ParentID") + return + } + case "start": + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + z.Start = 0 + break + } + z.Start, bts, err = parseInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Start") + return + } + case "duration": + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + z.Duration = 0 + break + } + z.Duration, bts, err = parseInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Duration") + return + } + case "error": + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + z.Error = 0 + break + } + z.Error, bts, err = parseInt32Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Error") + return + } + case "meta": + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + z.Meta = nil + break + } + var zb0002 uint32 + zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Meta") + return + } + if z.Meta == nil && zb0002 > 0 { + z.Meta = make(map[string]string, zb0002) + } else if len(z.Meta) > 0 { + for key := range z.Meta { + delete(z.Meta, key) + } + } + for zb0002 > 0 { + var za0001 string + var za0002 string + zb0002-- + za0001, bts, err = parseStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Meta") + return + } + za0002, bts, err = parseStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Meta", za0001) + return + } + if hookok { + z.Meta[za0001] = hook(za0001, za0002) + } else { + z.Meta[za0001] = za0002 + } + } + case "metrics": + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + z.Metrics = nil + break + } + var zb0003 uint32 + zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Metrics") + return + } + if z.Metrics == nil && zb0003 > 0{ + z.Metrics = make(map[string]float64, zb0003) + } else if len(z.Metrics) > 0 { + for key := range z.Metrics { + delete(z.Metrics, key) + } + } + for zb0003 > 0 { + var za0003 string + var za0004 float64 + zb0003-- + za0003, bts, err = parseStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Metrics") + return + } + za0004, bts, err = parseFloat64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Metrics", za0003) + return + } + z.Metrics[za0003] = za0004 + } + case "type": + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + z.Type = "" + break + } + z.Type, bts, err = parseStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Type") + return + } + case "meta_struct": + var zb0004 uint32 + zb0004, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "MetaStruct") + return + } + if z.MetaStruct == nil { + z.MetaStruct = make(map[string][]byte, zb0004) + } else if len(z.MetaStruct) > 0 { + for key := range z.MetaStruct { + delete(z.MetaStruct, key) + } + } + for zb0004 > 0 { + var za0005 string + var za0006 []byte + zb0004-- + za0005, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "MetaStruct") + return + } + za0006, bts, err = msgp.ReadBytesBytes(bts, za0006) + if err != nil { + err = msgp.WrapError(err, "MetaStruct", za0005) + return + } + z.MetaStruct[za0005] = za0006 + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *Span) Msgsize() (s int) { + s = 1 + 8 + msgp.StringPrefixSize + len(z.Service) + 5 + msgp.StringPrefixSize + len(z.Name) + 9 + msgp.StringPrefixSize + len(z.Resource) + 9 + msgp.Uint64Size + 8 + msgp.Uint64Size + 10 + msgp.Uint64Size + 6 + msgp.Int64Size + 9 + msgp.Int64Size + 6 + msgp.Int32Size + 5 + msgp.MapHeaderSize + if z.Meta != nil { + for za0001, za0002 := range z.Meta { + _ = za0002 + s += msgp.StringPrefixSize + len(za0001) + msgp.StringPrefixSize + len(za0002) + } + } + s += 8 + msgp.MapHeaderSize + if z.Metrics != nil { + for za0003, za0004 := range z.Metrics { + _ = za0004 + s += msgp.StringPrefixSize + len(za0003) + msgp.Float64Size + } + } + s += 5 + msgp.StringPrefixSize + len(z.Type) + 12 + msgp.MapHeaderSize + if z.MetaStruct != nil { + for za0005, za0006 := range z.MetaStruct { + _ = za0006 + s += msgp.StringPrefixSize + len(za0005) + msgp.BytesPrefixSize + len(za0006) + } + } + return +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/span_utils.go b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/span_utils.go new file mode 100644 index 000000000..06a1524a2 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/span_utils.go @@ -0,0 +1,51 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package trace + +// spanCopiedFields records the fields that are copied in ShallowCopy. +// This should match exactly the fields set in (*Span).ShallowCopy. +// This is used by tests to enforce the correctness of ShallowCopy. +var spanCopiedFields = map[string]struct{}{ + "Service": {}, + "Name": {}, + "Resource": {}, + "TraceID": {}, + "SpanID": {}, + "ParentID": {}, + "Start": {}, + "Duration": {}, + "Error": {}, + "Meta": {}, + "Metrics": {}, + "Type": {}, + "MetaStruct": {}, +} + +// ShallowCopy returns a shallow copy of the copy-able portion of a Span. These are the +// public fields which will have a Get* method for them. The completeness of this +// method is enforced by the init function above. Instead of using pkg/proto/utils.ProtoCopier, +// which incurs heavy reflection cost for every copy at runtime, we use reflection once at +// startup to ensure our method is complete. +func (s *Span) ShallowCopy() *Span { + if s == nil { + return &Span{} + } + return &Span{ + Service: s.Service, + Name: s.Name, + Resource: s.Resource, + TraceID: s.TraceID, + SpanID: s.SpanID, + ParentID: s.ParentID, + Start: s.Start, + Duration: s.Duration, + Error: s.Error, + Meta: s.Meta, + Metrics: s.Metrics, + Type: s.Type, + MetaStruct: s.MetaStruct, + } +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/span_vtproto.pb.go b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/span_vtproto.pb.go new file mode 100644 index 000000000..d313cb30a --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/span_vtproto.pb.go @@ -0,0 +1,994 @@ +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// protoc-gen-go-vtproto version: v0.4.0 +// source: datadog/trace/span.proto + +package trace + +import ( + binary "encoding/binary" + fmt "fmt" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + io "io" + math "math" + bits "math/bits" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *Span) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Span) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Span) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.MetaStruct) > 0 { + for k := range m.MetaStruct { + v := m.MetaStruct[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarint(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x6a + } + } + if len(m.Type) > 0 { + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarint(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x62 + } + if len(m.Metrics) > 0 { + for k := range m.Metrics { + v := m.Metrics[k] + baseI := i + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(v)))) + i-- + dAtA[i] = 0x11 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x5a + } + } + if len(m.Meta) > 0 { + for k := range m.Meta { + v := m.Meta[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarint(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x52 + } + } + if m.Error != 0 { + i = encodeVarint(dAtA, i, uint64(m.Error)) + i-- + dAtA[i] = 0x48 + } + if m.Duration != 0 { + i = encodeVarint(dAtA, i, uint64(m.Duration)) + i-- + dAtA[i] = 0x40 + } + if m.Start != 0 { + i = encodeVarint(dAtA, i, uint64(m.Start)) + i-- + dAtA[i] = 0x38 + } + if m.ParentID != 0 { + i = encodeVarint(dAtA, i, uint64(m.ParentID)) + i-- + dAtA[i] = 0x30 + } + if m.SpanID != 0 { + i = encodeVarint(dAtA, i, uint64(m.SpanID)) + i-- + dAtA[i] = 0x28 + } + if m.TraceID != 0 { + i = encodeVarint(dAtA, i, uint64(m.TraceID)) + i-- + dAtA[i] = 0x20 + } + if len(m.Resource) > 0 { + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarint(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x1a + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + } + if len(m.Service) > 0 { + i -= len(m.Service) + copy(dAtA[i:], m.Service) + i = encodeVarint(dAtA, i, uint64(len(m.Service))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarint(dAtA []byte, offset int, v uint64) int { + offset -= sov(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Span) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Service) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Resource) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.TraceID != 0 { + n += 1 + sov(uint64(m.TraceID)) + } + if m.SpanID != 0 { + n += 1 + sov(uint64(m.SpanID)) + } + if m.ParentID != 0 { + n += 1 + sov(uint64(m.ParentID)) + } + if m.Start != 0 { + n += 1 + sov(uint64(m.Start)) + } + if m.Duration != 0 { + n += 1 + sov(uint64(m.Duration)) + } + if m.Error != 0 { + n += 1 + sov(uint64(m.Error)) + } + if len(m.Meta) > 0 { + for k, v := range m.Meta { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + 1 + len(v) + sov(uint64(len(v))) + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + if len(m.Metrics) > 0 { + for k, v := range m.Metrics { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + 1 + 8 + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + l = len(m.Type) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.MetaStruct) > 0 { + for k, v := range m.MetaStruct { + _ = k + _ = v + l = 1 + len(v) + sov(uint64(len(v))) + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func sov(x uint64) (n int) { + return (bits.Len64(x|1) + 6) / 7 +} +func soz(x uint64) (n int) { + return sov(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Span) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Span: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Span: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Service = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TraceID", wireType) + } + m.TraceID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TraceID |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SpanID", wireType) + } + m.SpanID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SpanID |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ParentID", wireType) + } + m.ParentID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ParentID |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) + } + m.Start = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Start |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) + } + m.Duration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Duration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + m.Error = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Error |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Meta == nil { + m.Meta = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLength + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLength + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Meta[mapkey] = mapvalue + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Metrics == nil { + m.Metrics = make(map[string]float64) + } + var mapkey string + var mapvalue float64 + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapvaluetemp uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + mapvaluetemp = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + mapvalue = math.Float64frombits(mapvaluetemp) + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Metrics[mapkey] = mapvalue + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetaStruct", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MetaStruct == nil { + m.MetaStruct = make(map[string][]byte) + } + var mapkey string + var mapvalue []byte + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapbyteLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapbyteLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intMapbyteLen := int(mapbyteLen) + if intMapbyteLen < 0 { + return ErrInvalidLength + } + postbytesIndex := iNdEx + intMapbyteLen + if postbytesIndex < 0 { + return ErrInvalidLength + } + if postbytesIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = make([]byte, mapbyteLen) + copy(mapvalue, dAtA[iNdEx:postbytesIndex]) + iNdEx = postbytesIndex + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.MetaStruct[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} + +func skip(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLength + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroup + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLength + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLength = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflow = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroup = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/stats.pb.go b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/stats.pb.go new file mode 100644 index 000000000..ab840b5fe --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/stats.pb.go @@ -0,0 +1,677 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.5 +// source: datadog/trace/stats.proto + +package trace + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// StatsPayload is the payload used to send stats from the agent to the backend. +type StatsPayload struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AgentHostname string `protobuf:"bytes,1,opt,name=agentHostname,proto3" json:"agentHostname,omitempty"` + AgentEnv string `protobuf:"bytes,2,opt,name=agentEnv,proto3" json:"agentEnv,omitempty"` + // @gotags: json:"stats,omitempty" msg:"Stats,omitempty" + Stats []*ClientStatsPayload `protobuf:"bytes,3,rep,name=stats,proto3" json:"stats,omitempty" msg:"Stats,omitempty"` + AgentVersion string `protobuf:"bytes,4,opt,name=agentVersion,proto3" json:"agentVersion,omitempty"` + ClientComputed bool `protobuf:"varint,5,opt,name=clientComputed,proto3" json:"clientComputed,omitempty"` +} + +func (x *StatsPayload) Reset() { + *x = StatsPayload{} + if protoimpl.UnsafeEnabled { + mi := &file_datadog_trace_stats_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StatsPayload) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatsPayload) ProtoMessage() {} + +func (x *StatsPayload) ProtoReflect() protoreflect.Message { + mi := &file_datadog_trace_stats_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatsPayload.ProtoReflect.Descriptor instead. +func (*StatsPayload) Descriptor() ([]byte, []int) { + return file_datadog_trace_stats_proto_rawDescGZIP(), []int{0} +} + +func (x *StatsPayload) GetAgentHostname() string { + if x != nil { + return x.AgentHostname + } + return "" +} + +func (x *StatsPayload) GetAgentEnv() string { + if x != nil { + return x.AgentEnv + } + return "" +} + +func (x *StatsPayload) GetStats() []*ClientStatsPayload { + if x != nil { + return x.Stats + } + return nil +} + +func (x *StatsPayload) GetAgentVersion() string { + if x != nil { + return x.AgentVersion + } + return "" +} + +func (x *StatsPayload) GetClientComputed() bool { + if x != nil { + return x.ClientComputed + } + return false +} + +// ClientStatsPayload is the first layer of span stats aggregation. It is also +// the payload sent by tracers to the agent when stats in tracer are enabled. +type ClientStatsPayload struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Hostname is the tracer hostname. It's extracted from spans with "_dd.hostname" meta + // or set by tracer stats payload when hostname reporting is enabled. + Hostname string `protobuf:"bytes,1,opt,name=hostname,proto3" json:"hostname,omitempty"` + Env string `protobuf:"bytes,2,opt,name=env,proto3" json:"env,omitempty"` // env tag set on spans or in the tracers, used for aggregation + Version string `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"` // version tag set on spans or in the tracers, used for aggregation + // @gotags: json:"stats,omitempty" msg:"Stats,omitempty" + Stats []*ClientStatsBucket `protobuf:"bytes,4,rep,name=stats,proto3" json:"stats,omitempty" msg:"Stats,omitempty"` + Lang string `protobuf:"bytes,5,opt,name=lang,proto3" json:"lang,omitempty"` // informative field not used for aggregation + TracerVersion string `protobuf:"bytes,6,opt,name=tracerVersion,proto3" json:"tracerVersion,omitempty"` // informative field not used for aggregation + RuntimeID string `protobuf:"bytes,7,opt,name=runtimeID,proto3" json:"runtimeID,omitempty"` // used on stats payloads sent by the tracer to identify uniquely a message + Sequence uint64 `protobuf:"varint,8,opt,name=sequence,proto3" json:"sequence,omitempty"` // used on stats payloads sent by the tracer to identify uniquely a message + // AgentAggregation is set by the agent on tracer payloads modified by the agent aggregation layer + // characterizes counts only and distributions only payloads + AgentAggregation string `protobuf:"bytes,9,opt,name=agentAggregation,proto3" json:"agentAggregation,omitempty"` + // Service is the main service of the tracer. + // It is part of unified tagging: https://docs.datadoghq.com/getting_started/tagging/unified_service_tagging + Service string `protobuf:"bytes,10,opt,name=service,proto3" json:"service,omitempty"` + // ContainerID specifies the origin container ID. It is meant to be populated by the client and may + // be enhanced by the agent to ensure it is unique. + ContainerID string `protobuf:"bytes,11,opt,name=containerID,proto3" json:"containerID,omitempty"` + // Tags specifies a set of tags obtained from the orchestrator (where applicable) using the specified containerID. + // This field should be left empty by the client. It only applies to some specific environment. + Tags []string `protobuf:"bytes,12,rep,name=tags,proto3" json:"tags,omitempty"` +} + +func (x *ClientStatsPayload) Reset() { + *x = ClientStatsPayload{} + if protoimpl.UnsafeEnabled { + mi := &file_datadog_trace_stats_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClientStatsPayload) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClientStatsPayload) ProtoMessage() {} + +func (x *ClientStatsPayload) ProtoReflect() protoreflect.Message { + mi := &file_datadog_trace_stats_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClientStatsPayload.ProtoReflect.Descriptor instead. +func (*ClientStatsPayload) Descriptor() ([]byte, []int) { + return file_datadog_trace_stats_proto_rawDescGZIP(), []int{1} +} + +func (x *ClientStatsPayload) GetHostname() string { + if x != nil { + return x.Hostname + } + return "" +} + +func (x *ClientStatsPayload) GetEnv() string { + if x != nil { + return x.Env + } + return "" +} + +func (x *ClientStatsPayload) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *ClientStatsPayload) GetStats() []*ClientStatsBucket { + if x != nil { + return x.Stats + } + return nil +} + +func (x *ClientStatsPayload) GetLang() string { + if x != nil { + return x.Lang + } + return "" +} + +func (x *ClientStatsPayload) GetTracerVersion() string { + if x != nil { + return x.TracerVersion + } + return "" +} + +func (x *ClientStatsPayload) GetRuntimeID() string { + if x != nil { + return x.RuntimeID + } + return "" +} + +func (x *ClientStatsPayload) GetSequence() uint64 { + if x != nil { + return x.Sequence + } + return 0 +} + +func (x *ClientStatsPayload) GetAgentAggregation() string { + if x != nil { + return x.AgentAggregation + } + return "" +} + +func (x *ClientStatsPayload) GetService() string { + if x != nil { + return x.Service + } + return "" +} + +func (x *ClientStatsPayload) GetContainerID() string { + if x != nil { + return x.ContainerID + } + return "" +} + +func (x *ClientStatsPayload) GetTags() []string { + if x != nil { + return x.Tags + } + return nil +} + +// ClientStatsBucket is a time bucket containing aggregated stats. +type ClientStatsBucket struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Start uint64 `protobuf:"varint,1,opt,name=start,proto3" json:"start,omitempty"` // bucket start in nanoseconds + Duration uint64 `protobuf:"varint,2,opt,name=duration,proto3" json:"duration,omitempty"` // bucket duration in nanoseconds + // @gotags: json:"stats,omitempty" msg:"Stats,omitempty" + Stats []*ClientGroupedStats `protobuf:"bytes,3,rep,name=stats,proto3" json:"stats,omitempty" msg:"Stats,omitempty"` + // AgentTimeShift is the shift applied by the agent stats aggregator on bucket start + // when the received bucket start is outside of the agent aggregation window + AgentTimeShift int64 `protobuf:"varint,4,opt,name=agentTimeShift,proto3" json:"agentTimeShift,omitempty"` +} + +func (x *ClientStatsBucket) Reset() { + *x = ClientStatsBucket{} + if protoimpl.UnsafeEnabled { + mi := &file_datadog_trace_stats_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClientStatsBucket) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClientStatsBucket) ProtoMessage() {} + +func (x *ClientStatsBucket) ProtoReflect() protoreflect.Message { + mi := &file_datadog_trace_stats_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClientStatsBucket.ProtoReflect.Descriptor instead. +func (*ClientStatsBucket) Descriptor() ([]byte, []int) { + return file_datadog_trace_stats_proto_rawDescGZIP(), []int{2} +} + +func (x *ClientStatsBucket) GetStart() uint64 { + if x != nil { + return x.Start + } + return 0 +} + +func (x *ClientStatsBucket) GetDuration() uint64 { + if x != nil { + return x.Duration + } + return 0 +} + +func (x *ClientStatsBucket) GetStats() []*ClientGroupedStats { + if x != nil { + return x.Stats + } + return nil +} + +func (x *ClientStatsBucket) GetAgentTimeShift() int64 { + if x != nil { + return x.AgentTimeShift + } + return 0 +} + +// ClientGroupedStats aggregate stats on spans grouped by service, name, resource, status_code, type +type ClientGroupedStats struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + Resource string `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"` + HTTPStatusCode uint32 `protobuf:"varint,4,opt,name=HTTP_status_code,json=HTTPStatusCode,proto3" json:"HTTP_status_code,omitempty"` + Type string `protobuf:"bytes,5,opt,name=type,proto3" json:"type,omitempty"` + DBType string `protobuf:"bytes,6,opt,name=DB_type,json=DBType,proto3" json:"DB_type,omitempty"` // db_type might be used in the future to help in the obfuscation step + Hits uint64 `protobuf:"varint,7,opt,name=hits,proto3" json:"hits,omitempty"` // count of all spans aggregated in the groupedstats + Errors uint64 `protobuf:"varint,8,opt,name=errors,proto3" json:"errors,omitempty"` // count of error spans aggregated in the groupedstats + Duration uint64 `protobuf:"varint,9,opt,name=duration,proto3" json:"duration,omitempty"` // total duration in nanoseconds of spans aggregated in the bucket + OkSummary []byte `protobuf:"bytes,10,opt,name=okSummary,proto3" json:"okSummary,omitempty"` // ddsketch summary of ok spans latencies encoded in protobuf + ErrorSummary []byte `protobuf:"bytes,11,opt,name=errorSummary,proto3" json:"errorSummary,omitempty"` // ddsketch summary of error spans latencies encoded in protobuf + Synthetics bool `protobuf:"varint,12,opt,name=synthetics,proto3" json:"synthetics,omitempty"` // set to true on spans generated by synthetics traffic + TopLevelHits uint64 `protobuf:"varint,13,opt,name=topLevelHits,proto3" json:"topLevelHits,omitempty"` // count of top level spans aggregated in the groupedstats + PeerService string `protobuf:"bytes,14,opt,name=peer_service,json=peerService,proto3" json:"peer_service,omitempty"` // name of the remote service that the `service` communicated with + SpanKind string `protobuf:"bytes,15,opt,name=span_kind,json=spanKind,proto3" json:"span_kind,omitempty"` // value of the span.kind tag on the span +} + +func (x *ClientGroupedStats) Reset() { + *x = ClientGroupedStats{} + if protoimpl.UnsafeEnabled { + mi := &file_datadog_trace_stats_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClientGroupedStats) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClientGroupedStats) ProtoMessage() {} + +func (x *ClientGroupedStats) ProtoReflect() protoreflect.Message { + mi := &file_datadog_trace_stats_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClientGroupedStats.ProtoReflect.Descriptor instead. +func (*ClientGroupedStats) Descriptor() ([]byte, []int) { + return file_datadog_trace_stats_proto_rawDescGZIP(), []int{3} +} + +func (x *ClientGroupedStats) GetService() string { + if x != nil { + return x.Service + } + return "" +} + +func (x *ClientGroupedStats) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ClientGroupedStats) GetResource() string { + if x != nil { + return x.Resource + } + return "" +} + +func (x *ClientGroupedStats) GetHTTPStatusCode() uint32 { + if x != nil { + return x.HTTPStatusCode + } + return 0 +} + +func (x *ClientGroupedStats) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *ClientGroupedStats) GetDBType() string { + if x != nil { + return x.DBType + } + return "" +} + +func (x *ClientGroupedStats) GetHits() uint64 { + if x != nil { + return x.Hits + } + return 0 +} + +func (x *ClientGroupedStats) GetErrors() uint64 { + if x != nil { + return x.Errors + } + return 0 +} + +func (x *ClientGroupedStats) GetDuration() uint64 { + if x != nil { + return x.Duration + } + return 0 +} + +func (x *ClientGroupedStats) GetOkSummary() []byte { + if x != nil { + return x.OkSummary + } + return nil +} + +func (x *ClientGroupedStats) GetErrorSummary() []byte { + if x != nil { + return x.ErrorSummary + } + return nil +} + +func (x *ClientGroupedStats) GetSynthetics() bool { + if x != nil { + return x.Synthetics + } + return false +} + +func (x *ClientGroupedStats) GetTopLevelHits() uint64 { + if x != nil { + return x.TopLevelHits + } + return 0 +} + +func (x *ClientGroupedStats) GetPeerService() string { + if x != nil { + return x.PeerService + } + return "" +} + +func (x *ClientGroupedStats) GetSpanKind() string { + if x != nil { + return x.SpanKind + } + return "" +} + +var File_datadog_trace_stats_proto protoreflect.FileDescriptor + +var file_datadog_trace_stats_proto_rawDesc = []byte{ + 0x0a, 0x19, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, + 0x73, 0x74, 0x61, 0x74, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x64, 0x61, 0x74, + 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x22, 0xd5, 0x01, 0x0a, 0x0c, 0x53, + 0x74, 0x61, 0x74, 0x73, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x24, 0x0a, 0x0d, 0x61, + 0x67, 0x65, 0x6e, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0d, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x76, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x76, 0x12, 0x37, 0x0a, + 0x05, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x64, + 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x43, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, + 0x05, 0x73, 0x74, 0x61, 0x74, 0x73, 0x12, 0x22, 0x0a, 0x0c, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x67, + 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x0e, 0x63, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x0e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x75, 0x74, + 0x65, 0x64, 0x22, 0x84, 0x03, 0x0a, 0x12, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, + 0x74, 0x73, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, 0x73, + 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, + 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x76, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x65, 0x6e, 0x76, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x36, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x20, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, + 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x42, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6c, 0x61, 0x6e, + 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6c, 0x61, 0x6e, 0x67, 0x12, 0x24, 0x0a, + 0x0d, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x49, 0x44, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x49, + 0x44, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x08, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x2a, 0x0a, + 0x10, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x41, 0x67, + 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x49, 0x44, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x49, 0x44, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, 0x0c, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x22, 0xa6, 0x01, 0x0a, 0x11, 0x43, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, + 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x37, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x21, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, + 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x65, 0x64, 0x53, 0x74, + 0x61, 0x74, 0x73, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x73, 0x12, 0x26, 0x0a, 0x0e, 0x61, 0x67, + 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x68, 0x69, 0x66, 0x74, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x0e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x68, 0x69, + 0x66, 0x74, 0x22, 0xc3, 0x03, 0x0a, 0x12, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x47, 0x72, 0x6f, + 0x75, 0x70, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x48, 0x54, 0x54, 0x50, 0x5f, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0e, 0x48, + 0x54, 0x54, 0x50, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x12, 0x17, 0x0a, 0x07, 0x44, 0x42, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x44, 0x42, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x69, + 0x74, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x68, 0x69, 0x74, 0x73, 0x12, 0x16, + 0x0a, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x6f, 0x6b, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, + 0x0a, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x6f, 0x6b, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, + 0x12, 0x22, 0x0a, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, + 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x75, 0x6d, + 0x6d, 0x61, 0x72, 0x79, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x79, 0x6e, 0x74, 0x68, 0x65, 0x74, 0x69, + 0x63, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x73, 0x79, 0x6e, 0x74, 0x68, 0x65, + 0x74, 0x69, 0x63, 0x73, 0x12, 0x22, 0x0a, 0x0c, 0x74, 0x6f, 0x70, 0x4c, 0x65, 0x76, 0x65, 0x6c, + 0x48, 0x69, 0x74, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x74, 0x6f, 0x70, 0x4c, + 0x65, 0x76, 0x65, 0x6c, 0x48, 0x69, 0x74, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x65, 0x65, 0x72, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x70, 0x65, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x73, + 0x70, 0x61, 0x6e, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x73, 0x70, 0x61, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x42, 0x16, 0x5a, 0x14, 0x70, 0x6b, 0x67, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x67, 0x6f, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_datadog_trace_stats_proto_rawDescOnce sync.Once + file_datadog_trace_stats_proto_rawDescData = file_datadog_trace_stats_proto_rawDesc +) + +func file_datadog_trace_stats_proto_rawDescGZIP() []byte { + file_datadog_trace_stats_proto_rawDescOnce.Do(func() { + file_datadog_trace_stats_proto_rawDescData = protoimpl.X.CompressGZIP(file_datadog_trace_stats_proto_rawDescData) + }) + return file_datadog_trace_stats_proto_rawDescData +} + +var file_datadog_trace_stats_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_datadog_trace_stats_proto_goTypes = []interface{}{ + (*StatsPayload)(nil), // 0: datadog.trace.StatsPayload + (*ClientStatsPayload)(nil), // 1: datadog.trace.ClientStatsPayload + (*ClientStatsBucket)(nil), // 2: datadog.trace.ClientStatsBucket + (*ClientGroupedStats)(nil), // 3: datadog.trace.ClientGroupedStats +} +var file_datadog_trace_stats_proto_depIdxs = []int32{ + 1, // 0: datadog.trace.StatsPayload.stats:type_name -> datadog.trace.ClientStatsPayload + 2, // 1: datadog.trace.ClientStatsPayload.stats:type_name -> datadog.trace.ClientStatsBucket + 3, // 2: datadog.trace.ClientStatsBucket.stats:type_name -> datadog.trace.ClientGroupedStats + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_datadog_trace_stats_proto_init() } +func file_datadog_trace_stats_proto_init() { + if File_datadog_trace_stats_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_datadog_trace_stats_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StatsPayload); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_datadog_trace_stats_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClientStatsPayload); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_datadog_trace_stats_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClientStatsBucket); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_datadog_trace_stats_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClientGroupedStats); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_datadog_trace_stats_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_datadog_trace_stats_proto_goTypes, + DependencyIndexes: file_datadog_trace_stats_proto_depIdxs, + MessageInfos: file_datadog_trace_stats_proto_msgTypes, + }.Build() + File_datadog_trace_stats_proto = out.File + file_datadog_trace_stats_proto_rawDesc = nil + file_datadog_trace_stats_proto_goTypes = nil + file_datadog_trace_stats_proto_depIdxs = nil +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/stats_gen.go b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/stats_gen.go new file mode 100644 index 000000000..14ce7bced --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/stats_gen.go @@ -0,0 +1,1591 @@ +package trace + +// Code generated by github.com/tinylib/msgp DO NOT EDIT. + +import ( + "github.com/tinylib/msgp/msgp" +) + +// DecodeMsg implements msgp.Decodable +func (z *ClientGroupedStats) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Service": + z.Service, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Service") + return + } + case "Name": + z.Name, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Name") + return + } + case "Resource": + z.Resource, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Resource") + return + } + case "HTTPStatusCode": + z.HTTPStatusCode, err = dc.ReadUint32() + if err != nil { + err = msgp.WrapError(err, "HTTPStatusCode") + return + } + case "Type": + z.Type, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Type") + return + } + case "DBType": + z.DBType, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "DBType") + return + } + case "Hits": + z.Hits, err = dc.ReadUint64() + if err != nil { + err = msgp.WrapError(err, "Hits") + return + } + case "Errors": + z.Errors, err = dc.ReadUint64() + if err != nil { + err = msgp.WrapError(err, "Errors") + return + } + case "Duration": + z.Duration, err = dc.ReadUint64() + if err != nil { + err = msgp.WrapError(err, "Duration") + return + } + case "OkSummary": + z.OkSummary, err = dc.ReadBytes(z.OkSummary) + if err != nil { + err = msgp.WrapError(err, "OkSummary") + return + } + case "ErrorSummary": + z.ErrorSummary, err = dc.ReadBytes(z.ErrorSummary) + if err != nil { + err = msgp.WrapError(err, "ErrorSummary") + return + } + case "Synthetics": + z.Synthetics, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "Synthetics") + return + } + case "TopLevelHits": + z.TopLevelHits, err = dc.ReadUint64() + if err != nil { + err = msgp.WrapError(err, "TopLevelHits") + return + } + case "PeerService": + z.PeerService, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "PeerService") + return + } + case "SpanKind": + z.SpanKind, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "SpanKind") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *ClientGroupedStats) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 15 + // write "Service" + err = en.Append(0x8f, 0xa7, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65) + if err != nil { + return + } + err = en.WriteString(z.Service) + if err != nil { + err = msgp.WrapError(err, "Service") + return + } + // write "Name" + err = en.Append(0xa4, 0x4e, 0x61, 0x6d, 0x65) + if err != nil { + return + } + err = en.WriteString(z.Name) + if err != nil { + err = msgp.WrapError(err, "Name") + return + } + // write "Resource" + err = en.Append(0xa8, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65) + if err != nil { + return + } + err = en.WriteString(z.Resource) + if err != nil { + err = msgp.WrapError(err, "Resource") + return + } + // write "HTTPStatusCode" + err = en.Append(0xae, 0x48, 0x54, 0x54, 0x50, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65) + if err != nil { + return + } + err = en.WriteUint32(z.HTTPStatusCode) + if err != nil { + err = msgp.WrapError(err, "HTTPStatusCode") + return + } + // write "Type" + err = en.Append(0xa4, 0x54, 0x79, 0x70, 0x65) + if err != nil { + return + } + err = en.WriteString(z.Type) + if err != nil { + err = msgp.WrapError(err, "Type") + return + } + // write "DBType" + err = en.Append(0xa6, 0x44, 0x42, 0x54, 0x79, 0x70, 0x65) + if err != nil { + return + } + err = en.WriteString(z.DBType) + if err != nil { + err = msgp.WrapError(err, "DBType") + return + } + // write "Hits" + err = en.Append(0xa4, 0x48, 0x69, 0x74, 0x73) + if err != nil { + return + } + err = en.WriteUint64(z.Hits) + if err != nil { + err = msgp.WrapError(err, "Hits") + return + } + // write "Errors" + err = en.Append(0xa6, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x73) + if err != nil { + return + } + err = en.WriteUint64(z.Errors) + if err != nil { + err = msgp.WrapError(err, "Errors") + return + } + // write "Duration" + err = en.Append(0xa8, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e) + if err != nil { + return + } + err = en.WriteUint64(z.Duration) + if err != nil { + err = msgp.WrapError(err, "Duration") + return + } + // write "OkSummary" + err = en.Append(0xa9, 0x4f, 0x6b, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79) + if err != nil { + return + } + err = en.WriteBytes(z.OkSummary) + if err != nil { + err = msgp.WrapError(err, "OkSummary") + return + } + // write "ErrorSummary" + err = en.Append(0xac, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79) + if err != nil { + return + } + err = en.WriteBytes(z.ErrorSummary) + if err != nil { + err = msgp.WrapError(err, "ErrorSummary") + return + } + // write "Synthetics" + err = en.Append(0xaa, 0x53, 0x79, 0x6e, 0x74, 0x68, 0x65, 0x74, 0x69, 0x63, 0x73) + if err != nil { + return + } + err = en.WriteBool(z.Synthetics) + if err != nil { + err = msgp.WrapError(err, "Synthetics") + return + } + // write "TopLevelHits" + err = en.Append(0xac, 0x54, 0x6f, 0x70, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x48, 0x69, 0x74, 0x73) + if err != nil { + return + } + err = en.WriteUint64(z.TopLevelHits) + if err != nil { + err = msgp.WrapError(err, "TopLevelHits") + return + } + // write "PeerService" + err = en.Append(0xab, 0x50, 0x65, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65) + if err != nil { + return + } + err = en.WriteString(z.PeerService) + if err != nil { + err = msgp.WrapError(err, "PeerService") + return + } + // write "SpanKind" + err = en.Append(0xa8, 0x53, 0x70, 0x61, 0x6e, 0x4b, 0x69, 0x6e, 0x64) + if err != nil { + return + } + err = en.WriteString(z.SpanKind) + if err != nil { + err = msgp.WrapError(err, "SpanKind") + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *ClientGroupedStats) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 15 + // string "Service" + o = append(o, 0x8f, 0xa7, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65) + o = msgp.AppendString(o, z.Service) + // string "Name" + o = append(o, 0xa4, 0x4e, 0x61, 0x6d, 0x65) + o = msgp.AppendString(o, z.Name) + // string "Resource" + o = append(o, 0xa8, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65) + o = msgp.AppendString(o, z.Resource) + // string "HTTPStatusCode" + o = append(o, 0xae, 0x48, 0x54, 0x54, 0x50, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65) + o = msgp.AppendUint32(o, z.HTTPStatusCode) + // string "Type" + o = append(o, 0xa4, 0x54, 0x79, 0x70, 0x65) + o = msgp.AppendString(o, z.Type) + // string "DBType" + o = append(o, 0xa6, 0x44, 0x42, 0x54, 0x79, 0x70, 0x65) + o = msgp.AppendString(o, z.DBType) + // string "Hits" + o = append(o, 0xa4, 0x48, 0x69, 0x74, 0x73) + o = msgp.AppendUint64(o, z.Hits) + // string "Errors" + o = append(o, 0xa6, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x73) + o = msgp.AppendUint64(o, z.Errors) + // string "Duration" + o = append(o, 0xa8, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e) + o = msgp.AppendUint64(o, z.Duration) + // string "OkSummary" + o = append(o, 0xa9, 0x4f, 0x6b, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79) + o = msgp.AppendBytes(o, z.OkSummary) + // string "ErrorSummary" + o = append(o, 0xac, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79) + o = msgp.AppendBytes(o, z.ErrorSummary) + // string "Synthetics" + o = append(o, 0xaa, 0x53, 0x79, 0x6e, 0x74, 0x68, 0x65, 0x74, 0x69, 0x63, 0x73) + o = msgp.AppendBool(o, z.Synthetics) + // string "TopLevelHits" + o = append(o, 0xac, 0x54, 0x6f, 0x70, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x48, 0x69, 0x74, 0x73) + o = msgp.AppendUint64(o, z.TopLevelHits) + // string "PeerService" + o = append(o, 0xab, 0x50, 0x65, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65) + o = msgp.AppendString(o, z.PeerService) + // string "SpanKind" + o = append(o, 0xa8, 0x53, 0x70, 0x61, 0x6e, 0x4b, 0x69, 0x6e, 0x64) + o = msgp.AppendString(o, z.SpanKind) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *ClientGroupedStats) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Service": + z.Service, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Service") + return + } + case "Name": + z.Name, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Name") + return + } + case "Resource": + z.Resource, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Resource") + return + } + case "HTTPStatusCode": + z.HTTPStatusCode, bts, err = msgp.ReadUint32Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "HTTPStatusCode") + return + } + case "Type": + z.Type, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Type") + return + } + case "DBType": + z.DBType, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "DBType") + return + } + case "Hits": + z.Hits, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Hits") + return + } + case "Errors": + z.Errors, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Errors") + return + } + case "Duration": + z.Duration, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Duration") + return + } + case "OkSummary": + z.OkSummary, bts, err = msgp.ReadBytesBytes(bts, z.OkSummary) + if err != nil { + err = msgp.WrapError(err, "OkSummary") + return + } + case "ErrorSummary": + z.ErrorSummary, bts, err = msgp.ReadBytesBytes(bts, z.ErrorSummary) + if err != nil { + err = msgp.WrapError(err, "ErrorSummary") + return + } + case "Synthetics": + z.Synthetics, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Synthetics") + return + } + case "TopLevelHits": + z.TopLevelHits, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "TopLevelHits") + return + } + case "PeerService": + z.PeerService, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "PeerService") + return + } + case "SpanKind": + z.SpanKind, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "SpanKind") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *ClientGroupedStats) Msgsize() (s int) { + s = 1 + 8 + msgp.StringPrefixSize + len(z.Service) + 5 + msgp.StringPrefixSize + len(z.Name) + 9 + msgp.StringPrefixSize + len(z.Resource) + 15 + msgp.Uint32Size + 5 + msgp.StringPrefixSize + len(z.Type) + 7 + msgp.StringPrefixSize + len(z.DBType) + 5 + msgp.Uint64Size + 7 + msgp.Uint64Size + 9 + msgp.Uint64Size + 10 + msgp.BytesPrefixSize + len(z.OkSummary) + 13 + msgp.BytesPrefixSize + len(z.ErrorSummary) + 11 + msgp.BoolSize + 13 + msgp.Uint64Size + 12 + msgp.StringPrefixSize + len(z.PeerService) + 9 + msgp.StringPrefixSize + len(z.SpanKind) + return +} + +// DecodeMsg implements msgp.Decodable +func (z *ClientStatsBucket) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Start": + z.Start, err = dc.ReadUint64() + if err != nil { + err = msgp.WrapError(err, "Start") + return + } + case "Duration": + z.Duration, err = dc.ReadUint64() + if err != nil { + err = msgp.WrapError(err, "Duration") + return + } + case "Stats": + var zb0002 uint32 + zb0002, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, "Stats") + return + } + if cap(z.Stats) >= int(zb0002) { + z.Stats = (z.Stats)[:zb0002] + } else { + z.Stats = make([]*ClientGroupedStats, zb0002) + } + for za0001 := range z.Stats { + if dc.IsNil() { + err = dc.ReadNil() + if err != nil { + err = msgp.WrapError(err, "Stats", za0001) + return + } + z.Stats[za0001] = nil + } else { + if z.Stats[za0001] == nil { + z.Stats[za0001] = new(ClientGroupedStats) + } + err = z.Stats[za0001].DecodeMsg(dc) + if err != nil { + err = msgp.WrapError(err, "Stats", za0001) + return + } + } + } + case "AgentTimeShift": + z.AgentTimeShift, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "AgentTimeShift") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *ClientStatsBucket) EncodeMsg(en *msgp.Writer) (err error) { + // omitempty: check for empty values + zb0001Len := uint32(4) + var zb0001Mask uint8 /* 4 bits */ + if z.Stats == nil { + zb0001Len-- + zb0001Mask |= 0x4 + } + // variable map header, size zb0001Len + err = en.Append(0x80 | uint8(zb0001Len)) + if err != nil { + return + } + if zb0001Len == 0 { + return + } + // write "Start" + err = en.Append(0xa5, 0x53, 0x74, 0x61, 0x72, 0x74) + if err != nil { + return + } + err = en.WriteUint64(z.Start) + if err != nil { + err = msgp.WrapError(err, "Start") + return + } + // write "Duration" + err = en.Append(0xa8, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e) + if err != nil { + return + } + err = en.WriteUint64(z.Duration) + if err != nil { + err = msgp.WrapError(err, "Duration") + return + } + if (zb0001Mask & 0x4) == 0 { // if not empty + // write "Stats" + err = en.Append(0xa5, 0x53, 0x74, 0x61, 0x74, 0x73) + if err != nil { + return + } + err = en.WriteArrayHeader(uint32(len(z.Stats))) + if err != nil { + err = msgp.WrapError(err, "Stats") + return + } + for za0001 := range z.Stats { + if z.Stats[za0001] == nil { + err = en.WriteNil() + if err != nil { + return + } + } else { + err = z.Stats[za0001].EncodeMsg(en) + if err != nil { + err = msgp.WrapError(err, "Stats", za0001) + return + } + } + } + } + // write "AgentTimeShift" + err = en.Append(0xae, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x68, 0x69, 0x66, 0x74) + if err != nil { + return + } + err = en.WriteInt64(z.AgentTimeShift) + if err != nil { + err = msgp.WrapError(err, "AgentTimeShift") + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *ClientStatsBucket) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // omitempty: check for empty values + zb0001Len := uint32(4) + var zb0001Mask uint8 /* 4 bits */ + if z.Stats == nil { + zb0001Len-- + zb0001Mask |= 0x4 + } + // variable map header, size zb0001Len + o = append(o, 0x80|uint8(zb0001Len)) + if zb0001Len == 0 { + return + } + // string "Start" + o = append(o, 0xa5, 0x53, 0x74, 0x61, 0x72, 0x74) + o = msgp.AppendUint64(o, z.Start) + // string "Duration" + o = append(o, 0xa8, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e) + o = msgp.AppendUint64(o, z.Duration) + if (zb0001Mask & 0x4) == 0 { // if not empty + // string "Stats" + o = append(o, 0xa5, 0x53, 0x74, 0x61, 0x74, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.Stats))) + for za0001 := range z.Stats { + if z.Stats[za0001] == nil { + o = msgp.AppendNil(o) + } else { + o, err = z.Stats[za0001].MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "Stats", za0001) + return + } + } + } + } + // string "AgentTimeShift" + o = append(o, 0xae, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x68, 0x69, 0x66, 0x74) + o = msgp.AppendInt64(o, z.AgentTimeShift) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *ClientStatsBucket) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Start": + z.Start, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Start") + return + } + case "Duration": + z.Duration, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Duration") + return + } + case "Stats": + var zb0002 uint32 + zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Stats") + return + } + if cap(z.Stats) >= int(zb0002) { + z.Stats = (z.Stats)[:zb0002] + } else { + z.Stats = make([]*ClientGroupedStats, zb0002) + } + for za0001 := range z.Stats { + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.Stats[za0001] = nil + } else { + if z.Stats[za0001] == nil { + z.Stats[za0001] = new(ClientGroupedStats) + } + bts, err = z.Stats[za0001].UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "Stats", za0001) + return + } + } + } + case "AgentTimeShift": + z.AgentTimeShift, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "AgentTimeShift") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *ClientStatsBucket) Msgsize() (s int) { + s = 1 + 6 + msgp.Uint64Size + 9 + msgp.Uint64Size + 6 + msgp.ArrayHeaderSize + for za0001 := range z.Stats { + if z.Stats[za0001] == nil { + s += msgp.NilSize + } else { + s += z.Stats[za0001].Msgsize() + } + } + s += 15 + msgp.Int64Size + return +} + +// DecodeMsg implements msgp.Decodable +func (z *ClientStatsPayload) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Hostname": + z.Hostname, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Hostname") + return + } + case "Env": + z.Env, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Env") + return + } + case "Version": + z.Version, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Version") + return + } + case "Stats": + var zb0002 uint32 + zb0002, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, "Stats") + return + } + if cap(z.Stats) >= int(zb0002) { + z.Stats = (z.Stats)[:zb0002] + } else { + z.Stats = make([]*ClientStatsBucket, zb0002) + } + for za0001 := range z.Stats { + if dc.IsNil() { + err = dc.ReadNil() + if err != nil { + err = msgp.WrapError(err, "Stats", za0001) + return + } + z.Stats[za0001] = nil + } else { + if z.Stats[za0001] == nil { + z.Stats[za0001] = new(ClientStatsBucket) + } + err = z.Stats[za0001].DecodeMsg(dc) + if err != nil { + err = msgp.WrapError(err, "Stats", za0001) + return + } + } + } + case "Lang": + z.Lang, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Lang") + return + } + case "TracerVersion": + z.TracerVersion, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "TracerVersion") + return + } + case "RuntimeID": + z.RuntimeID, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "RuntimeID") + return + } + case "Sequence": + z.Sequence, err = dc.ReadUint64() + if err != nil { + err = msgp.WrapError(err, "Sequence") + return + } + case "AgentAggregation": + z.AgentAggregation, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "AgentAggregation") + return + } + case "Service": + z.Service, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Service") + return + } + case "ContainerID": + z.ContainerID, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "ContainerID") + return + } + case "Tags": + var zb0003 uint32 + zb0003, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, "Tags") + return + } + if cap(z.Tags) >= int(zb0003) { + z.Tags = (z.Tags)[:zb0003] + } else { + z.Tags = make([]string, zb0003) + } + for za0002 := range z.Tags { + z.Tags[za0002], err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Tags", za0002) + return + } + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *ClientStatsPayload) EncodeMsg(en *msgp.Writer) (err error) { + // omitempty: check for empty values + zb0001Len := uint32(12) + var zb0001Mask uint16 /* 12 bits */ + if z.Stats == nil { + zb0001Len-- + zb0001Mask |= 0x8 + } + // variable map header, size zb0001Len + err = en.Append(0x80 | uint8(zb0001Len)) + if err != nil { + return + } + if zb0001Len == 0 { + return + } + // write "Hostname" + err = en.Append(0xa8, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65) + if err != nil { + return + } + err = en.WriteString(z.Hostname) + if err != nil { + err = msgp.WrapError(err, "Hostname") + return + } + // write "Env" + err = en.Append(0xa3, 0x45, 0x6e, 0x76) + if err != nil { + return + } + err = en.WriteString(z.Env) + if err != nil { + err = msgp.WrapError(err, "Env") + return + } + // write "Version" + err = en.Append(0xa7, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + if err != nil { + return + } + err = en.WriteString(z.Version) + if err != nil { + err = msgp.WrapError(err, "Version") + return + } + if (zb0001Mask & 0x8) == 0 { // if not empty + // write "Stats" + err = en.Append(0xa5, 0x53, 0x74, 0x61, 0x74, 0x73) + if err != nil { + return + } + err = en.WriteArrayHeader(uint32(len(z.Stats))) + if err != nil { + err = msgp.WrapError(err, "Stats") + return + } + for za0001 := range z.Stats { + if z.Stats[za0001] == nil { + err = en.WriteNil() + if err != nil { + return + } + } else { + err = z.Stats[za0001].EncodeMsg(en) + if err != nil { + err = msgp.WrapError(err, "Stats", za0001) + return + } + } + } + } + // write "Lang" + err = en.Append(0xa4, 0x4c, 0x61, 0x6e, 0x67) + if err != nil { + return + } + err = en.WriteString(z.Lang) + if err != nil { + err = msgp.WrapError(err, "Lang") + return + } + // write "TracerVersion" + err = en.Append(0xad, 0x54, 0x72, 0x61, 0x63, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + if err != nil { + return + } + err = en.WriteString(z.TracerVersion) + if err != nil { + err = msgp.WrapError(err, "TracerVersion") + return + } + // write "RuntimeID" + err = en.Append(0xa9, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x49, 0x44) + if err != nil { + return + } + err = en.WriteString(z.RuntimeID) + if err != nil { + err = msgp.WrapError(err, "RuntimeID") + return + } + // write "Sequence" + err = en.Append(0xa8, 0x53, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65) + if err != nil { + return + } + err = en.WriteUint64(z.Sequence) + if err != nil { + err = msgp.WrapError(err, "Sequence") + return + } + // write "AgentAggregation" + err = en.Append(0xb0, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e) + if err != nil { + return + } + err = en.WriteString(z.AgentAggregation) + if err != nil { + err = msgp.WrapError(err, "AgentAggregation") + return + } + // write "Service" + err = en.Append(0xa7, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65) + if err != nil { + return + } + err = en.WriteString(z.Service) + if err != nil { + err = msgp.WrapError(err, "Service") + return + } + // write "ContainerID" + err = en.Append(0xab, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44) + if err != nil { + return + } + err = en.WriteString(z.ContainerID) + if err != nil { + err = msgp.WrapError(err, "ContainerID") + return + } + // write "Tags" + err = en.Append(0xa4, 0x54, 0x61, 0x67, 0x73) + if err != nil { + return + } + err = en.WriteArrayHeader(uint32(len(z.Tags))) + if err != nil { + err = msgp.WrapError(err, "Tags") + return + } + for za0002 := range z.Tags { + err = en.WriteString(z.Tags[za0002]) + if err != nil { + err = msgp.WrapError(err, "Tags", za0002) + return + } + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *ClientStatsPayload) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // omitempty: check for empty values + zb0001Len := uint32(12) + var zb0001Mask uint16 /* 12 bits */ + if z.Stats == nil { + zb0001Len-- + zb0001Mask |= 0x8 + } + // variable map header, size zb0001Len + o = append(o, 0x80|uint8(zb0001Len)) + if zb0001Len == 0 { + return + } + // string "Hostname" + o = append(o, 0xa8, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65) + o = msgp.AppendString(o, z.Hostname) + // string "Env" + o = append(o, 0xa3, 0x45, 0x6e, 0x76) + o = msgp.AppendString(o, z.Env) + // string "Version" + o = append(o, 0xa7, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + o = msgp.AppendString(o, z.Version) + if (zb0001Mask & 0x8) == 0 { // if not empty + // string "Stats" + o = append(o, 0xa5, 0x53, 0x74, 0x61, 0x74, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.Stats))) + for za0001 := range z.Stats { + if z.Stats[za0001] == nil { + o = msgp.AppendNil(o) + } else { + o, err = z.Stats[za0001].MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "Stats", za0001) + return + } + } + } + } + // string "Lang" + o = append(o, 0xa4, 0x4c, 0x61, 0x6e, 0x67) + o = msgp.AppendString(o, z.Lang) + // string "TracerVersion" + o = append(o, 0xad, 0x54, 0x72, 0x61, 0x63, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + o = msgp.AppendString(o, z.TracerVersion) + // string "RuntimeID" + o = append(o, 0xa9, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x49, 0x44) + o = msgp.AppendString(o, z.RuntimeID) + // string "Sequence" + o = append(o, 0xa8, 0x53, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65) + o = msgp.AppendUint64(o, z.Sequence) + // string "AgentAggregation" + o = append(o, 0xb0, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e) + o = msgp.AppendString(o, z.AgentAggregation) + // string "Service" + o = append(o, 0xa7, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65) + o = msgp.AppendString(o, z.Service) + // string "ContainerID" + o = append(o, 0xab, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44) + o = msgp.AppendString(o, z.ContainerID) + // string "Tags" + o = append(o, 0xa4, 0x54, 0x61, 0x67, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.Tags))) + for za0002 := range z.Tags { + o = msgp.AppendString(o, z.Tags[za0002]) + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *ClientStatsPayload) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Hostname": + z.Hostname, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Hostname") + return + } + case "Env": + z.Env, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Env") + return + } + case "Version": + z.Version, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Version") + return + } + case "Stats": + var zb0002 uint32 + zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Stats") + return + } + if cap(z.Stats) >= int(zb0002) { + z.Stats = (z.Stats)[:zb0002] + } else { + z.Stats = make([]*ClientStatsBucket, zb0002) + } + for za0001 := range z.Stats { + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.Stats[za0001] = nil + } else { + if z.Stats[za0001] == nil { + z.Stats[za0001] = new(ClientStatsBucket) + } + bts, err = z.Stats[za0001].UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "Stats", za0001) + return + } + } + } + case "Lang": + z.Lang, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Lang") + return + } + case "TracerVersion": + z.TracerVersion, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "TracerVersion") + return + } + case "RuntimeID": + z.RuntimeID, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "RuntimeID") + return + } + case "Sequence": + z.Sequence, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Sequence") + return + } + case "AgentAggregation": + z.AgentAggregation, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "AgentAggregation") + return + } + case "Service": + z.Service, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Service") + return + } + case "ContainerID": + z.ContainerID, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ContainerID") + return + } + case "Tags": + var zb0003 uint32 + zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Tags") + return + } + if cap(z.Tags) >= int(zb0003) { + z.Tags = (z.Tags)[:zb0003] + } else { + z.Tags = make([]string, zb0003) + } + for za0002 := range z.Tags { + z.Tags[za0002], bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Tags", za0002) + return + } + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *ClientStatsPayload) Msgsize() (s int) { + s = 1 + 9 + msgp.StringPrefixSize + len(z.Hostname) + 4 + msgp.StringPrefixSize + len(z.Env) + 8 + msgp.StringPrefixSize + len(z.Version) + 6 + msgp.ArrayHeaderSize + for za0001 := range z.Stats { + if z.Stats[za0001] == nil { + s += msgp.NilSize + } else { + s += z.Stats[za0001].Msgsize() + } + } + s += 5 + msgp.StringPrefixSize + len(z.Lang) + 14 + msgp.StringPrefixSize + len(z.TracerVersion) + 10 + msgp.StringPrefixSize + len(z.RuntimeID) + 9 + msgp.Uint64Size + 17 + msgp.StringPrefixSize + len(z.AgentAggregation) + 8 + msgp.StringPrefixSize + len(z.Service) + 12 + msgp.StringPrefixSize + len(z.ContainerID) + 5 + msgp.ArrayHeaderSize + for za0002 := range z.Tags { + s += msgp.StringPrefixSize + len(z.Tags[za0002]) + } + return +} + +// DecodeMsg implements msgp.Decodable +func (z *StatsPayload) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "AgentHostname": + z.AgentHostname, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "AgentHostname") + return + } + case "AgentEnv": + z.AgentEnv, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "AgentEnv") + return + } + case "Stats": + var zb0002 uint32 + zb0002, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, "Stats") + return + } + if cap(z.Stats) >= int(zb0002) { + z.Stats = (z.Stats)[:zb0002] + } else { + z.Stats = make([]*ClientStatsPayload, zb0002) + } + for za0001 := range z.Stats { + if dc.IsNil() { + err = dc.ReadNil() + if err != nil { + err = msgp.WrapError(err, "Stats", za0001) + return + } + z.Stats[za0001] = nil + } else { + if z.Stats[za0001] == nil { + z.Stats[za0001] = new(ClientStatsPayload) + } + err = z.Stats[za0001].DecodeMsg(dc) + if err != nil { + err = msgp.WrapError(err, "Stats", za0001) + return + } + } + } + case "AgentVersion": + z.AgentVersion, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "AgentVersion") + return + } + case "ClientComputed": + z.ClientComputed, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "ClientComputed") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *StatsPayload) EncodeMsg(en *msgp.Writer) (err error) { + // omitempty: check for empty values + zb0001Len := uint32(5) + var zb0001Mask uint8 /* 5 bits */ + if z.Stats == nil { + zb0001Len-- + zb0001Mask |= 0x4 + } + // variable map header, size zb0001Len + err = en.Append(0x80 | uint8(zb0001Len)) + if err != nil { + return + } + if zb0001Len == 0 { + return + } + // write "AgentHostname" + err = en.Append(0xad, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65) + if err != nil { + return + } + err = en.WriteString(z.AgentHostname) + if err != nil { + err = msgp.WrapError(err, "AgentHostname") + return + } + // write "AgentEnv" + err = en.Append(0xa8, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x76) + if err != nil { + return + } + err = en.WriteString(z.AgentEnv) + if err != nil { + err = msgp.WrapError(err, "AgentEnv") + return + } + if (zb0001Mask & 0x4) == 0 { // if not empty + // write "Stats" + err = en.Append(0xa5, 0x53, 0x74, 0x61, 0x74, 0x73) + if err != nil { + return + } + err = en.WriteArrayHeader(uint32(len(z.Stats))) + if err != nil { + err = msgp.WrapError(err, "Stats") + return + } + for za0001 := range z.Stats { + if z.Stats[za0001] == nil { + err = en.WriteNil() + if err != nil { + return + } + } else { + err = z.Stats[za0001].EncodeMsg(en) + if err != nil { + err = msgp.WrapError(err, "Stats", za0001) + return + } + } + } + } + // write "AgentVersion" + err = en.Append(0xac, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + if err != nil { + return + } + err = en.WriteString(z.AgentVersion) + if err != nil { + err = msgp.WrapError(err, "AgentVersion") + return + } + // write "ClientComputed" + err = en.Append(0xae, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x64) + if err != nil { + return + } + err = en.WriteBool(z.ClientComputed) + if err != nil { + err = msgp.WrapError(err, "ClientComputed") + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *StatsPayload) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // omitempty: check for empty values + zb0001Len := uint32(5) + var zb0001Mask uint8 /* 5 bits */ + if z.Stats == nil { + zb0001Len-- + zb0001Mask |= 0x4 + } + // variable map header, size zb0001Len + o = append(o, 0x80|uint8(zb0001Len)) + if zb0001Len == 0 { + return + } + // string "AgentHostname" + o = append(o, 0xad, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65) + o = msgp.AppendString(o, z.AgentHostname) + // string "AgentEnv" + o = append(o, 0xa8, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x76) + o = msgp.AppendString(o, z.AgentEnv) + if (zb0001Mask & 0x4) == 0 { // if not empty + // string "Stats" + o = append(o, 0xa5, 0x53, 0x74, 0x61, 0x74, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.Stats))) + for za0001 := range z.Stats { + if z.Stats[za0001] == nil { + o = msgp.AppendNil(o) + } else { + o, err = z.Stats[za0001].MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "Stats", za0001) + return + } + } + } + } + // string "AgentVersion" + o = append(o, 0xac, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + o = msgp.AppendString(o, z.AgentVersion) + // string "ClientComputed" + o = append(o, 0xae, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x64) + o = msgp.AppendBool(o, z.ClientComputed) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *StatsPayload) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "AgentHostname": + z.AgentHostname, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "AgentHostname") + return + } + case "AgentEnv": + z.AgentEnv, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "AgentEnv") + return + } + case "Stats": + var zb0002 uint32 + zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Stats") + return + } + if cap(z.Stats) >= int(zb0002) { + z.Stats = (z.Stats)[:zb0002] + } else { + z.Stats = make([]*ClientStatsPayload, zb0002) + } + for za0001 := range z.Stats { + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.Stats[za0001] = nil + } else { + if z.Stats[za0001] == nil { + z.Stats[za0001] = new(ClientStatsPayload) + } + bts, err = z.Stats[za0001].UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "Stats", za0001) + return + } + } + } + case "AgentVersion": + z.AgentVersion, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "AgentVersion") + return + } + case "ClientComputed": + z.ClientComputed, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ClientComputed") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *StatsPayload) Msgsize() (s int) { + s = 1 + 14 + msgp.StringPrefixSize + len(z.AgentHostname) + 9 + msgp.StringPrefixSize + len(z.AgentEnv) + 6 + msgp.ArrayHeaderSize + for za0001 := range z.Stats { + if z.Stats[za0001] == nil { + s += msgp.NilSize + } else { + s += z.Stats[za0001].Msgsize() + } + } + s += 13 + msgp.StringPrefixSize + len(z.AgentVersion) + 15 + msgp.BoolSize + return +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/stats_vtproto.pb.go b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/stats_vtproto.pb.go new file mode 100644 index 000000000..d12a6f4eb --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/stats_vtproto.pb.go @@ -0,0 +1,1814 @@ +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// protoc-gen-go-vtproto version: v0.4.0 +// source: datadog/trace/stats.proto + +package trace + +import ( + fmt "fmt" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + io "io" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *StatsPayload) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatsPayload) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *StatsPayload) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ClientComputed { + i-- + if m.ClientComputed { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if len(m.AgentVersion) > 0 { + i -= len(m.AgentVersion) + copy(dAtA[i:], m.AgentVersion) + i = encodeVarint(dAtA, i, uint64(len(m.AgentVersion))) + i-- + dAtA[i] = 0x22 + } + if len(m.Stats) > 0 { + for iNdEx := len(m.Stats) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Stats[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.AgentEnv) > 0 { + i -= len(m.AgentEnv) + copy(dAtA[i:], m.AgentEnv) + i = encodeVarint(dAtA, i, uint64(len(m.AgentEnv))) + i-- + dAtA[i] = 0x12 + } + if len(m.AgentHostname) > 0 { + i -= len(m.AgentHostname) + copy(dAtA[i:], m.AgentHostname) + i = encodeVarint(dAtA, i, uint64(len(m.AgentHostname))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClientStatsPayload) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClientStatsPayload) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ClientStatsPayload) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Tags) > 0 { + for iNdEx := len(m.Tags) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Tags[iNdEx]) + copy(dAtA[i:], m.Tags[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Tags[iNdEx]))) + i-- + dAtA[i] = 0x62 + } + } + if len(m.ContainerID) > 0 { + i -= len(m.ContainerID) + copy(dAtA[i:], m.ContainerID) + i = encodeVarint(dAtA, i, uint64(len(m.ContainerID))) + i-- + dAtA[i] = 0x5a + } + if len(m.Service) > 0 { + i -= len(m.Service) + copy(dAtA[i:], m.Service) + i = encodeVarint(dAtA, i, uint64(len(m.Service))) + i-- + dAtA[i] = 0x52 + } + if len(m.AgentAggregation) > 0 { + i -= len(m.AgentAggregation) + copy(dAtA[i:], m.AgentAggregation) + i = encodeVarint(dAtA, i, uint64(len(m.AgentAggregation))) + i-- + dAtA[i] = 0x4a + } + if m.Sequence != 0 { + i = encodeVarint(dAtA, i, uint64(m.Sequence)) + i-- + dAtA[i] = 0x40 + } + if len(m.RuntimeID) > 0 { + i -= len(m.RuntimeID) + copy(dAtA[i:], m.RuntimeID) + i = encodeVarint(dAtA, i, uint64(len(m.RuntimeID))) + i-- + dAtA[i] = 0x3a + } + if len(m.TracerVersion) > 0 { + i -= len(m.TracerVersion) + copy(dAtA[i:], m.TracerVersion) + i = encodeVarint(dAtA, i, uint64(len(m.TracerVersion))) + i-- + dAtA[i] = 0x32 + } + if len(m.Lang) > 0 { + i -= len(m.Lang) + copy(dAtA[i:], m.Lang) + i = encodeVarint(dAtA, i, uint64(len(m.Lang))) + i-- + dAtA[i] = 0x2a + } + if len(m.Stats) > 0 { + for iNdEx := len(m.Stats) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Stats[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Version) > 0 { + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarint(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x1a + } + if len(m.Env) > 0 { + i -= len(m.Env) + copy(dAtA[i:], m.Env) + i = encodeVarint(dAtA, i, uint64(len(m.Env))) + i-- + dAtA[i] = 0x12 + } + if len(m.Hostname) > 0 { + i -= len(m.Hostname) + copy(dAtA[i:], m.Hostname) + i = encodeVarint(dAtA, i, uint64(len(m.Hostname))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClientStatsBucket) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClientStatsBucket) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ClientStatsBucket) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.AgentTimeShift != 0 { + i = encodeVarint(dAtA, i, uint64(m.AgentTimeShift)) + i-- + dAtA[i] = 0x20 + } + if len(m.Stats) > 0 { + for iNdEx := len(m.Stats) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Stats[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if m.Duration != 0 { + i = encodeVarint(dAtA, i, uint64(m.Duration)) + i-- + dAtA[i] = 0x10 + } + if m.Start != 0 { + i = encodeVarint(dAtA, i, uint64(m.Start)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ClientGroupedStats) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClientGroupedStats) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ClientGroupedStats) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.SpanKind) > 0 { + i -= len(m.SpanKind) + copy(dAtA[i:], m.SpanKind) + i = encodeVarint(dAtA, i, uint64(len(m.SpanKind))) + i-- + dAtA[i] = 0x7a + } + if len(m.PeerService) > 0 { + i -= len(m.PeerService) + copy(dAtA[i:], m.PeerService) + i = encodeVarint(dAtA, i, uint64(len(m.PeerService))) + i-- + dAtA[i] = 0x72 + } + if m.TopLevelHits != 0 { + i = encodeVarint(dAtA, i, uint64(m.TopLevelHits)) + i-- + dAtA[i] = 0x68 + } + if m.Synthetics { + i-- + if m.Synthetics { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x60 + } + if len(m.ErrorSummary) > 0 { + i -= len(m.ErrorSummary) + copy(dAtA[i:], m.ErrorSummary) + i = encodeVarint(dAtA, i, uint64(len(m.ErrorSummary))) + i-- + dAtA[i] = 0x5a + } + if len(m.OkSummary) > 0 { + i -= len(m.OkSummary) + copy(dAtA[i:], m.OkSummary) + i = encodeVarint(dAtA, i, uint64(len(m.OkSummary))) + i-- + dAtA[i] = 0x52 + } + if m.Duration != 0 { + i = encodeVarint(dAtA, i, uint64(m.Duration)) + i-- + dAtA[i] = 0x48 + } + if m.Errors != 0 { + i = encodeVarint(dAtA, i, uint64(m.Errors)) + i-- + dAtA[i] = 0x40 + } + if m.Hits != 0 { + i = encodeVarint(dAtA, i, uint64(m.Hits)) + i-- + dAtA[i] = 0x38 + } + if len(m.DBType) > 0 { + i -= len(m.DBType) + copy(dAtA[i:], m.DBType) + i = encodeVarint(dAtA, i, uint64(len(m.DBType))) + i-- + dAtA[i] = 0x32 + } + if len(m.Type) > 0 { + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarint(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x2a + } + if m.HTTPStatusCode != 0 { + i = encodeVarint(dAtA, i, uint64(m.HTTPStatusCode)) + i-- + dAtA[i] = 0x20 + } + if len(m.Resource) > 0 { + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarint(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x1a + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + } + if len(m.Service) > 0 { + i -= len(m.Service) + copy(dAtA[i:], m.Service) + i = encodeVarint(dAtA, i, uint64(len(m.Service))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *StatsPayload) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.AgentHostname) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.AgentEnv) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Stats) > 0 { + for _, e := range m.Stats { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + l = len(m.AgentVersion) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.ClientComputed { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *ClientStatsPayload) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Hostname) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Env) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Version) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Stats) > 0 { + for _, e := range m.Stats { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + l = len(m.Lang) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.TracerVersion) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.RuntimeID) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Sequence != 0 { + n += 1 + sov(uint64(m.Sequence)) + } + l = len(m.AgentAggregation) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Service) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.ContainerID) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Tags) > 0 { + for _, s := range m.Tags { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ClientStatsBucket) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Start != 0 { + n += 1 + sov(uint64(m.Start)) + } + if m.Duration != 0 { + n += 1 + sov(uint64(m.Duration)) + } + if len(m.Stats) > 0 { + for _, e := range m.Stats { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if m.AgentTimeShift != 0 { + n += 1 + sov(uint64(m.AgentTimeShift)) + } + n += len(m.unknownFields) + return n +} + +func (m *ClientGroupedStats) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Service) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Resource) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.HTTPStatusCode != 0 { + n += 1 + sov(uint64(m.HTTPStatusCode)) + } + l = len(m.Type) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.DBType) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Hits != 0 { + n += 1 + sov(uint64(m.Hits)) + } + if m.Errors != 0 { + n += 1 + sov(uint64(m.Errors)) + } + if m.Duration != 0 { + n += 1 + sov(uint64(m.Duration)) + } + l = len(m.OkSummary) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.ErrorSummary) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Synthetics { + n += 2 + } + if m.TopLevelHits != 0 { + n += 1 + sov(uint64(m.TopLevelHits)) + } + l = len(m.PeerService) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.SpanKind) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *StatsPayload) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatsPayload: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatsPayload: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AgentHostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AgentHostname = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AgentEnv", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AgentEnv = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stats", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stats = append(m.Stats, &ClientStatsPayload{}) + if err := m.Stats[len(m.Stats)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AgentVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AgentVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientComputed", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ClientComputed = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClientStatsPayload) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClientStatsPayload: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClientStatsPayload: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hostname = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stats", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stats = append(m.Stats, &ClientStatsBucket{}) + if err := m.Stats[len(m.Stats)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Lang", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Lang = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TracerVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TracerVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RuntimeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RuntimeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Sequence", wireType) + } + m.Sequence = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Sequence |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AgentAggregation", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AgentAggregation = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Service = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tags = append(m.Tags, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClientStatsBucket) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClientStatsBucket: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClientStatsBucket: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) + } + m.Start = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Start |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) + } + m.Duration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Duration |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stats", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stats = append(m.Stats, &ClientGroupedStats{}) + if err := m.Stats[len(m.Stats)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AgentTimeShift", wireType) + } + m.AgentTimeShift = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AgentTimeShift |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClientGroupedStats) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClientGroupedStats: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClientGroupedStats: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Service = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HTTPStatusCode", wireType) + } + m.HTTPStatusCode = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HTTPStatusCode |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DBType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DBType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Hits", wireType) + } + m.Hits = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Hits |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Errors", wireType) + } + m.Errors = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Errors |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) + } + m.Duration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Duration |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OkSummary", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OkSummary = append(m.OkSummary[:0], dAtA[iNdEx:postIndex]...) + if m.OkSummary == nil { + m.OkSummary = []byte{} + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ErrorSummary", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ErrorSummary = append(m.ErrorSummary[:0], dAtA[iNdEx:postIndex]...) + if m.ErrorSummary == nil { + m.ErrorSummary = []byte{} + } + iNdEx = postIndex + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Synthetics", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Synthetics = bool(v != 0) + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TopLevelHits", wireType) + } + m.TopLevelHits = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TopLevelHits |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PeerService", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PeerService = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SpanKind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SpanKind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/trace.go b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/trace.go new file mode 100644 index 000000000..184a5c6b6 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/trace.go @@ -0,0 +1,52 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package trace + +//go:generate go run github.com/tinylib/msgp -file=span.pb.go -o span_gen.go -io=false +//go:generate go run github.com/tinylib/msgp -file=tracer_payload.pb.go -o tracer_payload_gen.go -io=false +//go:generate go run github.com/tinylib/msgp -io=false + +// Trace is a collection of spans with the same trace ID +type Trace []*Span + +// Traces is a list of traces. This model matters as this is what we unpack from msgp. +type Traces []Trace + +// RemoveChunk removes a chunk by its index. +func (p *TracerPayload) RemoveChunk(i int) { + if i < 0 || i >= len(p.Chunks) { + return + } + p.Chunks[i] = p.Chunks[len(p.Chunks)-1] + p.Chunks = p.Chunks[:len(p.Chunks)-1] +} + +// Cut cuts off a new tracer payload from the `p` with [0, i-1] chunks +// and keeps [i, n-1] chunks in the original payload `p`. +func (p *TracerPayload) Cut(i int) *TracerPayload { + if i < 0 { + i = 0 + } + if i > len(p.Chunks) { + i = len(p.Chunks) + } + new := TracerPayload{ + ContainerID: p.GetContainerID(), + LanguageName: p.GetLanguageName(), + LanguageVersion: p.GetLanguageVersion(), + TracerVersion: p.GetTracerVersion(), + RuntimeID: p.GetRuntimeID(), + Env: p.GetEnv(), + Hostname: p.GetHostname(), + AppVersion: p.GetAppVersion(), + Tags: p.GetTags(), + } + + new.Chunks = p.Chunks[:i] + p.Chunks = p.Chunks[i:] + + return &new +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/trace_gen.go b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/trace_gen.go new file mode 100644 index 000000000..2a2865f3d --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/trace_gen.go @@ -0,0 +1,158 @@ +package trace + +// Code generated by github.com/tinylib/msgp DO NOT EDIT. + +import ( + "github.com/tinylib/msgp/msgp" +) + +// MarshalMsg implements msgp.Marshaler +func (z Trace) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + o = msgp.AppendArrayHeader(o, uint32(len(z))) + for za0001 := range z { + if z[za0001] == nil { + o = msgp.AppendNil(o) + } else { + o, err = z[za0001].MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, za0001) + return + } + } + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *Trace) UnmarshalMsg(bts []byte) (o []byte, err error) { + var zb0002 uint32 + zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + if cap((*z)) >= int(zb0002) { + (*z) = (*z)[:zb0002] + } else { + (*z) = make(Trace, zb0002) + } + for zb0001 := range *z { + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + (*z)[zb0001] = nil + } else { + if (*z)[zb0001] == nil { + (*z)[zb0001] = new(Span) + } + bts, err = (*z)[zb0001].UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, zb0001) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z Trace) Msgsize() (s int) { + s = msgp.ArrayHeaderSize + for zb0003 := range z { + if z[zb0003] == nil { + s += msgp.NilSize + } else { + s += z[zb0003].Msgsize() + } + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z Traces) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + o = msgp.AppendArrayHeader(o, uint32(len(z))) + for za0001 := range z { + o = msgp.AppendArrayHeader(o, uint32(len(z[za0001]))) + for za0002 := range z[za0001] { + if z[za0001][za0002] == nil { + o = msgp.AppendNil(o) + } else { + o, err = z[za0001][za0002].MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, za0001, za0002) + return + } + } + } + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *Traces) UnmarshalMsg(bts []byte) (o []byte, err error) { + var zb0003 uint32 + zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + if cap((*z)) >= int(zb0003) { + (*z) = (*z)[:zb0003] + } else { + (*z) = make(Traces, zb0003) + } + for zb0001 := range *z { + var zb0004 uint32 + zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, zb0001) + return + } + if cap((*z)[zb0001]) >= int(zb0004) { + (*z)[zb0001] = ((*z)[zb0001])[:zb0004] + } else { + (*z)[zb0001] = make(Trace, zb0004) + } + for zb0002 := range (*z)[zb0001] { + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + (*z)[zb0001][zb0002] = nil + } else { + if (*z)[zb0001][zb0002] == nil { + (*z)[zb0001][zb0002] = new(Span) + } + bts, err = (*z)[zb0001][zb0002].UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, zb0001, zb0002) + return + } + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z Traces) Msgsize() (s int) { + s = msgp.ArrayHeaderSize + for zb0005 := range z { + s += msgp.ArrayHeaderSize + for zb0006 := range z[zb0005] { + if z[zb0005][zb0006] == nil { + s += msgp.NilSize + } else { + s += z[zb0005][zb0006].Msgsize() + } + } + } + return +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/tracer_payload.pb.go b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/tracer_payload.pb.go new file mode 100644 index 000000000..71d366004 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/tracer_payload.pb.go @@ -0,0 +1,391 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.5 +// source: datadog/trace/tracer_payload.proto + +package trace + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// TraceChunk represents a list of spans with the same trace ID. In other words, a chunk of a trace. +type TraceChunk struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // priority specifies sampling priority of the trace. + // @gotags: json:"priority" msg:"priority" + Priority int32 `protobuf:"varint,1,opt,name=priority,proto3" json:"priority" msg:"priority"` + // origin specifies origin product ("lambda", "rum", etc.) of the trace. + // @gotags: json:"origin" msg:"origin" + Origin string `protobuf:"bytes,2,opt,name=origin,proto3" json:"origin" msg:"origin"` + // spans specifies list of containing spans. + // @gotags: json:"spans" msg:"spans" + Spans []*Span `protobuf:"bytes,3,rep,name=spans,proto3" json:"spans" msg:"spans"` + // tags specifies tags common in all `spans`. + // @gotags: json:"tags" msg:"tags" + Tags map[string]string `protobuf:"bytes,4,rep,name=tags,proto3" json:"tags" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3" msg:"tags"` + // droppedTrace specifies whether the trace was dropped by samplers or not. + // @gotags: json:"dropped_trace" msg:"dropped_trace" + DroppedTrace bool `protobuf:"varint,5,opt,name=droppedTrace,proto3" json:"dropped_trace" msg:"dropped_trace"` +} + +func (x *TraceChunk) Reset() { + *x = TraceChunk{} + if protoimpl.UnsafeEnabled { + mi := &file_datadog_trace_tracer_payload_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TraceChunk) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TraceChunk) ProtoMessage() {} + +func (x *TraceChunk) ProtoReflect() protoreflect.Message { + mi := &file_datadog_trace_tracer_payload_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TraceChunk.ProtoReflect.Descriptor instead. +func (*TraceChunk) Descriptor() ([]byte, []int) { + return file_datadog_trace_tracer_payload_proto_rawDescGZIP(), []int{0} +} + +func (x *TraceChunk) GetPriority() int32 { + if x != nil { + return x.Priority + } + return 0 +} + +func (x *TraceChunk) GetOrigin() string { + if x != nil { + return x.Origin + } + return "" +} + +func (x *TraceChunk) GetSpans() []*Span { + if x != nil { + return x.Spans + } + return nil +} + +func (x *TraceChunk) GetTags() map[string]string { + if x != nil { + return x.Tags + } + return nil +} + +func (x *TraceChunk) GetDroppedTrace() bool { + if x != nil { + return x.DroppedTrace + } + return false +} + +// TracerPayload represents a payload the trace agent receives from tracers. +type TracerPayload struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // containerID specifies the ID of the container where the tracer is running on. + // @gotags: json:"container_id" msg:"container_id" + ContainerID string `protobuf:"bytes,1,opt,name=containerID,proto3" json:"container_id" msg:"container_id"` + // languageName specifies language of the tracer. + // @gotags: json:"language_name" msg:"language_name" + LanguageName string `protobuf:"bytes,2,opt,name=languageName,proto3" json:"language_name" msg:"language_name"` + // languageVersion specifies language version of the tracer. + // @gotags: json:"language_version" msg:"language_version" + LanguageVersion string `protobuf:"bytes,3,opt,name=languageVersion,proto3" json:"language_version" msg:"language_version"` + // tracerVersion specifies version of the tracer. + // @gotags: json:"tracer_version" msg:"tracer_version" + TracerVersion string `protobuf:"bytes,4,opt,name=tracerVersion,proto3" json:"tracer_version" msg:"tracer_version"` + // runtimeID specifies V4 UUID representation of a tracer session. + // @gotags: json:"runtime_id" msg:"runtime_id" + RuntimeID string `protobuf:"bytes,5,opt,name=runtimeID,proto3" json:"runtime_id" msg:"runtime_id"` + // chunks specifies list of containing trace chunks. + // @gotags: json:"chunks" msg:"chunks" + Chunks []*TraceChunk `protobuf:"bytes,6,rep,name=chunks,proto3" json:"chunks" msg:"chunks"` + // tags specifies tags common in all `chunks`. + // @gotags: json:"tags" msg:"tags" + Tags map[string]string `protobuf:"bytes,7,rep,name=tags,proto3" json:"tags" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3" msg:"tags"` + // env specifies `env` tag that set with the tracer. + // @gotags: json:"env" msg:"env" + Env string `protobuf:"bytes,8,opt,name=env,proto3" json:"env" msg:"env"` + // hostname specifies hostname of where the tracer is running. + // @gotags: json:"hostname" msg:"hostname" + Hostname string `protobuf:"bytes,9,opt,name=hostname,proto3" json:"hostname" msg:"hostname"` + // version specifies `version` tag that set with the tracer. + // @gotags: json:"app_version" msg:"app_version" + AppVersion string `protobuf:"bytes,10,opt,name=appVersion,proto3" json:"app_version" msg:"app_version"` +} + +func (x *TracerPayload) Reset() { + *x = TracerPayload{} + if protoimpl.UnsafeEnabled { + mi := &file_datadog_trace_tracer_payload_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TracerPayload) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TracerPayload) ProtoMessage() {} + +func (x *TracerPayload) ProtoReflect() protoreflect.Message { + mi := &file_datadog_trace_tracer_payload_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TracerPayload.ProtoReflect.Descriptor instead. +func (*TracerPayload) Descriptor() ([]byte, []int) { + return file_datadog_trace_tracer_payload_proto_rawDescGZIP(), []int{1} +} + +func (x *TracerPayload) GetContainerID() string { + if x != nil { + return x.ContainerID + } + return "" +} + +func (x *TracerPayload) GetLanguageName() string { + if x != nil { + return x.LanguageName + } + return "" +} + +func (x *TracerPayload) GetLanguageVersion() string { + if x != nil { + return x.LanguageVersion + } + return "" +} + +func (x *TracerPayload) GetTracerVersion() string { + if x != nil { + return x.TracerVersion + } + return "" +} + +func (x *TracerPayload) GetRuntimeID() string { + if x != nil { + return x.RuntimeID + } + return "" +} + +func (x *TracerPayload) GetChunks() []*TraceChunk { + if x != nil { + return x.Chunks + } + return nil +} + +func (x *TracerPayload) GetTags() map[string]string { + if x != nil { + return x.Tags + } + return nil +} + +func (x *TracerPayload) GetEnv() string { + if x != nil { + return x.Env + } + return "" +} + +func (x *TracerPayload) GetHostname() string { + if x != nil { + return x.Hostname + } + return "" +} + +func (x *TracerPayload) GetAppVersion() string { + if x != nil { + return x.AppVersion + } + return "" +} + +var File_datadog_trace_tracer_payload_proto protoreflect.FileDescriptor + +var file_datadog_trace_tracer_payload_proto_rawDesc = []byte{ + 0x0a, 0x22, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, + 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x74, 0x72, + 0x61, 0x63, 0x65, 0x1a, 0x18, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2f, 0x74, 0x72, 0x61, + 0x63, 0x65, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x81, 0x02, + 0x0a, 0x0a, 0x54, 0x72, 0x61, 0x63, 0x65, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x12, 0x1a, 0x0a, 0x08, + 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, + 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x72, 0x69, 0x67, + 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, + 0x12, 0x29, 0x0a, 0x05, 0x73, 0x70, 0x61, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x13, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, + 0x53, 0x70, 0x61, 0x6e, 0x52, 0x05, 0x73, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x04, 0x74, + 0x61, 0x67, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x64, 0x61, 0x74, 0x61, + 0x64, 0x6f, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x43, + 0x68, 0x75, 0x6e, 0x6b, 0x2e, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, + 0x74, 0x61, 0x67, 0x73, 0x12, 0x22, 0x0a, 0x0c, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x54, + 0x72, 0x61, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x64, 0x72, 0x6f, 0x70, + 0x70, 0x65, 0x64, 0x54, 0x72, 0x61, 0x63, 0x65, 0x1a, 0x37, 0x0a, 0x09, 0x54, 0x61, 0x67, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x22, 0xb9, 0x03, 0x0a, 0x0d, 0x54, 0x72, 0x61, 0x63, 0x65, 0x72, 0x50, 0x61, 0x79, 0x6c, + 0x6f, 0x61, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x49, 0x44, 0x12, 0x22, 0x0a, 0x0c, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, + 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6c, 0x61, 0x6e, + 0x67, 0x75, 0x61, 0x67, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x0f, 0x6c, 0x61, 0x6e, + 0x67, 0x75, 0x61, 0x67, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x24, 0x0a, 0x0d, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x63, + 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x75, 0x6e, + 0x74, 0x69, 0x6d, 0x65, 0x49, 0x44, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x75, + 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x49, 0x44, 0x12, 0x31, 0x0a, 0x06, 0x63, 0x68, 0x75, 0x6e, 0x6b, + 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, + 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x43, 0x68, 0x75, + 0x6e, 0x6b, 0x52, 0x06, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x12, 0x3a, 0x0a, 0x04, 0x74, 0x61, + 0x67, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, + 0x6f, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x72, 0x50, + 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x76, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x65, 0x6e, 0x76, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x61, 0x70, 0x70, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x70, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x1a, 0x37, 0x0a, 0x09, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x16, 0x5a, + 0x14, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x67, 0x6f, 0x2f, + 0x74, 0x72, 0x61, 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_datadog_trace_tracer_payload_proto_rawDescOnce sync.Once + file_datadog_trace_tracer_payload_proto_rawDescData = file_datadog_trace_tracer_payload_proto_rawDesc +) + +func file_datadog_trace_tracer_payload_proto_rawDescGZIP() []byte { + file_datadog_trace_tracer_payload_proto_rawDescOnce.Do(func() { + file_datadog_trace_tracer_payload_proto_rawDescData = protoimpl.X.CompressGZIP(file_datadog_trace_tracer_payload_proto_rawDescData) + }) + return file_datadog_trace_tracer_payload_proto_rawDescData +} + +var file_datadog_trace_tracer_payload_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_datadog_trace_tracer_payload_proto_goTypes = []interface{}{ + (*TraceChunk)(nil), // 0: datadog.trace.TraceChunk + (*TracerPayload)(nil), // 1: datadog.trace.TracerPayload + nil, // 2: datadog.trace.TraceChunk.TagsEntry + nil, // 3: datadog.trace.TracerPayload.TagsEntry + (*Span)(nil), // 4: datadog.trace.Span +} +var file_datadog_trace_tracer_payload_proto_depIdxs = []int32{ + 4, // 0: datadog.trace.TraceChunk.spans:type_name -> datadog.trace.Span + 2, // 1: datadog.trace.TraceChunk.tags:type_name -> datadog.trace.TraceChunk.TagsEntry + 0, // 2: datadog.trace.TracerPayload.chunks:type_name -> datadog.trace.TraceChunk + 3, // 3: datadog.trace.TracerPayload.tags:type_name -> datadog.trace.TracerPayload.TagsEntry + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_datadog_trace_tracer_payload_proto_init() } +func file_datadog_trace_tracer_payload_proto_init() { + if File_datadog_trace_tracer_payload_proto != nil { + return + } + file_datadog_trace_span_proto_init() + if !protoimpl.UnsafeEnabled { + file_datadog_trace_tracer_payload_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TraceChunk); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_datadog_trace_tracer_payload_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TracerPayload); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_datadog_trace_tracer_payload_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_datadog_trace_tracer_payload_proto_goTypes, + DependencyIndexes: file_datadog_trace_tracer_payload_proto_depIdxs, + MessageInfos: file_datadog_trace_tracer_payload_proto_msgTypes, + }.Build() + File_datadog_trace_tracer_payload_proto = out.File + file_datadog_trace_tracer_payload_proto_rawDesc = nil + file_datadog_trace_tracer_payload_proto_goTypes = nil + file_datadog_trace_tracer_payload_proto_depIdxs = nil +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/tracer_payload_gen.go b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/tracer_payload_gen.go new file mode 100644 index 000000000..cd2b39250 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/tracer_payload_gen.go @@ -0,0 +1,384 @@ +package trace + +// Code generated by github.com/tinylib/msgp DO NOT EDIT. + +import ( + "github.com/tinylib/msgp/msgp" +) + +// MarshalMsg implements msgp.Marshaler +func (z *TraceChunk) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 5 + // string "priority" + o = append(o, 0x85, 0xa8, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79) + o = msgp.AppendInt32(o, z.Priority) + // string "origin" + o = append(o, 0xa6, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e) + o = msgp.AppendString(o, z.Origin) + // string "spans" + o = append(o, 0xa5, 0x73, 0x70, 0x61, 0x6e, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.Spans))) + for za0001 := range z.Spans { + if z.Spans[za0001] == nil { + o = msgp.AppendNil(o) + } else { + o, err = z.Spans[za0001].MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "Spans", za0001) + return + } + } + } + // string "tags" + o = append(o, 0xa4, 0x74, 0x61, 0x67, 0x73) + o = msgp.AppendMapHeader(o, uint32(len(z.Tags))) + for za0002, za0003 := range z.Tags { + o = msgp.AppendString(o, za0002) + o = msgp.AppendString(o, za0003) + } + // string "dropped_trace" + o = append(o, 0xad, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x74, 0x72, 0x61, 0x63, 0x65) + o = msgp.AppendBool(o, z.DroppedTrace) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *TraceChunk) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "priority": + z.Priority, bts, err = msgp.ReadInt32Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Priority") + return + } + case "origin": + z.Origin, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Origin") + return + } + case "spans": + var zb0002 uint32 + zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Spans") + return + } + if cap(z.Spans) >= int(zb0002) { + z.Spans = (z.Spans)[:zb0002] + } else { + z.Spans = make([]*Span, zb0002) + } + for za0001 := range z.Spans { + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.Spans[za0001] = nil + } else { + if z.Spans[za0001] == nil { + z.Spans[za0001] = new(Span) + } + bts, err = z.Spans[za0001].UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "Spans", za0001) + return + } + } + } + case "tags": + var zb0003 uint32 + zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Tags") + return + } + if z.Tags == nil { + z.Tags = make(map[string]string, zb0003) + } else if len(z.Tags) > 0 { + for key := range z.Tags { + delete(z.Tags, key) + } + } + for zb0003 > 0 { + var za0002 string + var za0003 string + zb0003-- + za0002, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Tags") + return + } + za0003, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Tags", za0002) + return + } + z.Tags[za0002] = za0003 + } + case "dropped_trace": + z.DroppedTrace, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "DroppedTrace") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *TraceChunk) Msgsize() (s int) { + s = 1 + 9 + msgp.Int32Size + 7 + msgp.StringPrefixSize + len(z.Origin) + 6 + msgp.ArrayHeaderSize + for za0001 := range z.Spans { + if z.Spans[za0001] == nil { + s += msgp.NilSize + } else { + s += z.Spans[za0001].Msgsize() + } + } + s += 5 + msgp.MapHeaderSize + if z.Tags != nil { + for za0002, za0003 := range z.Tags { + _ = za0003 + s += msgp.StringPrefixSize + len(za0002) + msgp.StringPrefixSize + len(za0003) + } + } + s += 14 + msgp.BoolSize + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *TracerPayload) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 10 + // string "container_id" + o = append(o, 0x8a, 0xac, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64) + o = msgp.AppendString(o, z.ContainerID) + // string "language_name" + o = append(o, 0xad, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65) + o = msgp.AppendString(o, z.LanguageName) + // string "language_version" + o = append(o, 0xb0, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + o = msgp.AppendString(o, z.LanguageVersion) + // string "tracer_version" + o = append(o, 0xae, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + o = msgp.AppendString(o, z.TracerVersion) + // string "runtime_id" + o = append(o, 0xaa, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x69, 0x64) + o = msgp.AppendString(o, z.RuntimeID) + // string "chunks" + o = append(o, 0xa6, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.Chunks))) + for za0001 := range z.Chunks { + if z.Chunks[za0001] == nil { + o = msgp.AppendNil(o) + } else { + o, err = z.Chunks[za0001].MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "Chunks", za0001) + return + } + } + } + // string "tags" + o = append(o, 0xa4, 0x74, 0x61, 0x67, 0x73) + o = msgp.AppendMapHeader(o, uint32(len(z.Tags))) + for za0002, za0003 := range z.Tags { + o = msgp.AppendString(o, za0002) + o = msgp.AppendString(o, za0003) + } + // string "env" + o = append(o, 0xa3, 0x65, 0x6e, 0x76) + o = msgp.AppendString(o, z.Env) + // string "hostname" + o = append(o, 0xa8, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65) + o = msgp.AppendString(o, z.Hostname) + // string "app_version" + o = append(o, 0xab, 0x61, 0x70, 0x70, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + o = msgp.AppendString(o, z.AppVersion) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *TracerPayload) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "container_id": + z.ContainerID, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ContainerID") + return + } + case "language_name": + z.LanguageName, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "LanguageName") + return + } + case "language_version": + z.LanguageVersion, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "LanguageVersion") + return + } + case "tracer_version": + z.TracerVersion, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "TracerVersion") + return + } + case "runtime_id": + z.RuntimeID, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "RuntimeID") + return + } + case "chunks": + var zb0002 uint32 + zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Chunks") + return + } + if cap(z.Chunks) >= int(zb0002) { + z.Chunks = (z.Chunks)[:zb0002] + } else { + z.Chunks = make([]*TraceChunk, zb0002) + } + for za0001 := range z.Chunks { + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.Chunks[za0001] = nil + } else { + if z.Chunks[za0001] == nil { + z.Chunks[za0001] = new(TraceChunk) + } + bts, err = z.Chunks[za0001].UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "Chunks", za0001) + return + } + } + } + case "tags": + var zb0003 uint32 + zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Tags") + return + } + if z.Tags == nil { + z.Tags = make(map[string]string, zb0003) + } else if len(z.Tags) > 0 { + for key := range z.Tags { + delete(z.Tags, key) + } + } + for zb0003 > 0 { + var za0002 string + var za0003 string + zb0003-- + za0002, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Tags") + return + } + za0003, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Tags", za0002) + return + } + z.Tags[za0002] = za0003 + } + case "env": + z.Env, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Env") + return + } + case "hostname": + z.Hostname, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Hostname") + return + } + case "app_version": + z.AppVersion, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "AppVersion") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *TracerPayload) Msgsize() (s int) { + s = 1 + 13 + msgp.StringPrefixSize + len(z.ContainerID) + 14 + msgp.StringPrefixSize + len(z.LanguageName) + 17 + msgp.StringPrefixSize + len(z.LanguageVersion) + 15 + msgp.StringPrefixSize + len(z.TracerVersion) + 11 + msgp.StringPrefixSize + len(z.RuntimeID) + 7 + msgp.ArrayHeaderSize + for za0001 := range z.Chunks { + if z.Chunks[za0001] == nil { + s += msgp.NilSize + } else { + s += z.Chunks[za0001].Msgsize() + } + } + s += 5 + msgp.MapHeaderSize + if z.Tags != nil { + for za0002, za0003 := range z.Tags { + _ = za0003 + s += msgp.StringPrefixSize + len(za0002) + msgp.StringPrefixSize + len(za0003) + } + } + s += 4 + msgp.StringPrefixSize + len(z.Env) + 9 + msgp.StringPrefixSize + len(z.Hostname) + 12 + msgp.StringPrefixSize + len(z.AppVersion) + return +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/tracer_payload_utils.go b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/tracer_payload_utils.go new file mode 100644 index 000000000..9f7fabba2 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/tracer_payload_utils.go @@ -0,0 +1,35 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package trace + +// traceChunkCopiedFields records the fields that are copied in ShallowCopy. +// This should match exactly the fields set in (*TraceChunk).ShallowCopy. +// This is used by tests to enforce the correctness of ShallowCopy. +var traceChunkCopiedFields = map[string]struct{}{ + "Priority": {}, + "Origin": {}, + "Spans": {}, + "Tags": {}, + "DroppedTrace": {}, +} + +// ShallowCopy returns a shallow copy of the copy-able portion of a TraceChunk. These are the +// public fields which will have a Get* method for them. The completeness of this +// method is enforced by the init function above. Instead of using pkg/proto/utils.ProtoCopier, +// which incurs heavy reflection cost for every copy at runtime, we use reflection once at +// startup to ensure our method is complete. +func (t *TraceChunk) ShallowCopy() *TraceChunk { + if t == nil { + return nil + } + return &TraceChunk{ + Priority: t.Priority, + Origin: t.Origin, + Spans: t.Spans, + Tags: t.Tags, + DroppedTrace: t.DroppedTrace, + } +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/tracer_payload_vtproto.pb.go b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/tracer_payload_vtproto.pb.go new file mode 100644 index 000000000..b1544fa22 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/tracer_payload_vtproto.pb.go @@ -0,0 +1,1066 @@ +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// protoc-gen-go-vtproto version: v0.4.0 +// source: datadog/trace/tracer_payload.proto + +package trace + +import ( + fmt "fmt" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + io "io" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *TraceChunk) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TraceChunk) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *TraceChunk) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.DroppedTrace { + i-- + if m.DroppedTrace { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if len(m.Tags) > 0 { + for k := range m.Tags { + v := m.Tags[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarint(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Spans) > 0 { + for iNdEx := len(m.Spans) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Spans[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Origin) > 0 { + i -= len(m.Origin) + copy(dAtA[i:], m.Origin) + i = encodeVarint(dAtA, i, uint64(len(m.Origin))) + i-- + dAtA[i] = 0x12 + } + if m.Priority != 0 { + i = encodeVarint(dAtA, i, uint64(m.Priority)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *TracerPayload) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TracerPayload) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *TracerPayload) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.AppVersion) > 0 { + i -= len(m.AppVersion) + copy(dAtA[i:], m.AppVersion) + i = encodeVarint(dAtA, i, uint64(len(m.AppVersion))) + i-- + dAtA[i] = 0x52 + } + if len(m.Hostname) > 0 { + i -= len(m.Hostname) + copy(dAtA[i:], m.Hostname) + i = encodeVarint(dAtA, i, uint64(len(m.Hostname))) + i-- + dAtA[i] = 0x4a + } + if len(m.Env) > 0 { + i -= len(m.Env) + copy(dAtA[i:], m.Env) + i = encodeVarint(dAtA, i, uint64(len(m.Env))) + i-- + dAtA[i] = 0x42 + } + if len(m.Tags) > 0 { + for k := range m.Tags { + v := m.Tags[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarint(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x3a + } + } + if len(m.Chunks) > 0 { + for iNdEx := len(m.Chunks) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Chunks[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + } + if len(m.RuntimeID) > 0 { + i -= len(m.RuntimeID) + copy(dAtA[i:], m.RuntimeID) + i = encodeVarint(dAtA, i, uint64(len(m.RuntimeID))) + i-- + dAtA[i] = 0x2a + } + if len(m.TracerVersion) > 0 { + i -= len(m.TracerVersion) + copy(dAtA[i:], m.TracerVersion) + i = encodeVarint(dAtA, i, uint64(len(m.TracerVersion))) + i-- + dAtA[i] = 0x22 + } + if len(m.LanguageVersion) > 0 { + i -= len(m.LanguageVersion) + copy(dAtA[i:], m.LanguageVersion) + i = encodeVarint(dAtA, i, uint64(len(m.LanguageVersion))) + i-- + dAtA[i] = 0x1a + } + if len(m.LanguageName) > 0 { + i -= len(m.LanguageName) + copy(dAtA[i:], m.LanguageName) + i = encodeVarint(dAtA, i, uint64(len(m.LanguageName))) + i-- + dAtA[i] = 0x12 + } + if len(m.ContainerID) > 0 { + i -= len(m.ContainerID) + copy(dAtA[i:], m.ContainerID) + i = encodeVarint(dAtA, i, uint64(len(m.ContainerID))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TraceChunk) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Priority != 0 { + n += 1 + sov(uint64(m.Priority)) + } + l = len(m.Origin) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Spans) > 0 { + for _, e := range m.Spans { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if len(m.Tags) > 0 { + for k, v := range m.Tags { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + 1 + len(v) + sov(uint64(len(v))) + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + if m.DroppedTrace { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *TracerPayload) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ContainerID) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.LanguageName) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.LanguageVersion) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.TracerVersion) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.RuntimeID) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Chunks) > 0 { + for _, e := range m.Chunks { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if len(m.Tags) > 0 { + for k, v := range m.Tags { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + 1 + len(v) + sov(uint64(len(v))) + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + l = len(m.Env) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Hostname) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.AppVersion) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *TraceChunk) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TraceChunk: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TraceChunk: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Priority", wireType) + } + m.Priority = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Priority |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Origin", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Origin = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spans", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Spans = append(m.Spans, &Span{}) + if err := m.Spans[len(m.Spans)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Tags == nil { + m.Tags = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLength + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLength + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Tags[mapkey] = mapvalue + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DroppedTrace", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.DroppedTrace = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TracerPayload) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TracerPayload: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TracerPayload: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LanguageName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LanguageName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LanguageVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LanguageVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TracerVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TracerVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RuntimeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RuntimeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Chunks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Chunks = append(m.Chunks, &TraceChunk{}) + if err := m.Chunks[len(m.Chunks)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Tags == nil { + m.Tags = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLength + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLength + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Tags[mapkey] = mapvalue + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hostname = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AppVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/remoteconfig/state/products.go b/vendor/github.com/DataDog/datadog-agent/pkg/remoteconfig/state/products.go index 89cdb8c31..96f635df6 100644 --- a/vendor/github.com/DataDog/datadog-agent/pkg/remoteconfig/state/products.go +++ b/vendor/github.com/DataDog/datadog-agent/pkg/remoteconfig/state/products.go @@ -12,6 +12,7 @@ var validProducts = map[string]struct{}{ ProductCWSDD: {}, ProductCWSCustom: {}, ProductCWSProfiles: {}, + ProductASM: {}, ProductASMFeatures: {}, ProductASMDD: {}, ProductASMData: {}, @@ -31,6 +32,8 @@ const ( ProductCWSCustom = "CWS_CUSTOM" // ProductCWSProfiles is the cloud workload security profile product ProductCWSProfiles = "CWS_SECURITY_PROFILES" + // ProductASM is the ASM product used by customers to issue rules configurations + ProductASM = "ASM" // ProductASMFeatures is the ASM product used form ASM activation through remote config ProductASMFeatures = "ASM_FEATURES" // ProductASMDD is the application security monitoring product managed by datadog employees diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/remoteconfig/state/tuf.go b/vendor/github.com/DataDog/datadog-agent/pkg/remoteconfig/state/tuf.go index 3cc712d9c..f67ab9c19 100644 --- a/vendor/github.com/DataDog/datadog-agent/pkg/remoteconfig/state/tuf.go +++ b/vendor/github.com/DataDog/datadog-agent/pkg/remoteconfig/state/tuf.go @@ -30,7 +30,7 @@ func newTufRootsClient(root []byte) (*tufRootsClient, error) { rootRemoteStore := &rootClientRemoteStore{} rootClient := client.NewClient(rootLocalStore, rootRemoteStore) - err := rootClient.InitLocal(root) + err := rootClient.Init(root) if err != nil { return nil, err } diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/log/log.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/log/log.go index cd7673ea7..6507da3d1 100644 --- a/vendor/github.com/DataDog/datadog-agent/pkg/util/log/log.go +++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/log/log.go @@ -457,7 +457,12 @@ func logWithError(logLevel seelog.LogLevel, bufferFunc func(), logFunc func(stri addLogToBuffer(bufferFunc) } err := formatError(v...) - if fallbackStderr { + + // Originally (PR 6436) fallbackStderr check had been added to handle a small window + // where error messages had been lost before Logger had been initialized. Adjusting + // just for that case because if the error log should not be logged - because it has + // been suppressed then it should be taken into account. + if fallbackStderr && (Logger == nil || Logger.inner == nil) { fmt.Fprintf(os.Stderr, "%s: %s\n", logLevel.String(), err.Error()) } return err @@ -482,7 +487,12 @@ func logFormatWithError(logLevel seelog.LogLevel, bufferFunc func(), logFunc fun addLogToBuffer(bufferFunc) } err := formatErrorf(format, params...) - if fallbackStderr { + + // Originally (PR 6436) fallbackStderr check had been added to handle a small window + // where error messages had been lost before Logger had been initialized. Adjusting + // just for that case because if the error log should not be logged - because it has + // been suppressed then it should be taken into account. + if fallbackStderr && (Logger == nil || Logger.inner == nil) { fmt.Fprintf(os.Stderr, "%s: %s\n", logLevel.String(), err.Error()) } return err @@ -534,6 +544,18 @@ func Tracef(format string, params ...interface{}) { logFormat(seelog.TraceLvl, func() { Tracef(format, params...) }, Logger.tracef, format, params...) } +// TracefStackDepth logs with format at the trace level and the current stack depth plus the given depth +func TracefStackDepth(depth int, format string, params ...interface{}) { + currentLevel, _ := GetLogLevel() + if currentLevel > seelog.TraceLvl { + return + } + msg := fmt.Sprintf(format, params...) + Log(seelog.TraceLvl, func() { TraceStackDepth(depth, msg) }, func(s string) { + Logger.traceStackDepth(s, depth) + }, msg) +} + // TracecStackDepth logs at the trace level with context and the current stack depth plus the additional given one func TracecStackDepth(message string, depth int, context ...interface{}) { logContext(seelog.TraceLvl, func() { Tracec(message, context...) }, Logger.trace, message, depth, context...) @@ -562,6 +584,18 @@ func Debugf(format string, params ...interface{}) { logFormat(seelog.DebugLvl, func() { Debugf(format, params...) }, Logger.debugf, format, params...) } +// DebugfStackDepth logs with format at the debug level and the current stack depth plus the given depth +func DebugfStackDepth(depth int, format string, params ...interface{}) { + currentLevel, _ := GetLogLevel() + if currentLevel > seelog.DebugLvl { + return + } + msg := fmt.Sprintf(format, params...) + Log(seelog.DebugLvl, func() { DebugStackDepth(depth, msg) }, func(s string) { + Logger.debugStackDepth(s, depth) + }, msg) +} + // DebugcStackDepth logs at the debug level with context and the current stack depth plus the additional given one func DebugcStackDepth(message string, depth int, context ...interface{}) { logContext(seelog.DebugLvl, func() { Debugc(message, context...) }, Logger.debug, message, depth, context...) @@ -590,6 +624,18 @@ func Infof(format string, params ...interface{}) { logFormat(seelog.InfoLvl, func() { Infof(format, params...) }, Logger.infof, format, params...) } +// InfofStackDepth logs with format at the info level and the current stack depth plus the given depth +func InfofStackDepth(depth int, format string, params ...interface{}) { + currentLevel, _ := GetLogLevel() + if currentLevel > seelog.InfoLvl { + return + } + msg := fmt.Sprintf(format, params...) + Log(seelog.InfoLvl, func() { InfoStackDepth(depth, msg) }, func(s string) { + Logger.infoStackDepth(s, depth) + }, msg) +} + // InfocStackDepth logs at the info level with context and the current stack depth plus the additional given one func InfocStackDepth(message string, depth int, context ...interface{}) { logContext(seelog.InfoLvl, func() { Infoc(message, context...) }, Logger.info, message, depth, context...) @@ -618,6 +664,14 @@ func Warnf(format string, params ...interface{}) error { return logFormatWithError(seelog.WarnLvl, func() { Warnf(format, params...) }, Logger.warnf, format, false, params...) } +// WarnfStackDepth logs with format at the warn level and the current stack depth plus the given depth +func WarnfStackDepth(depth int, format string, params ...interface{}) error { + msg := fmt.Sprintf(format, params...) + return logWithError(seelog.WarnLvl, func() { WarnStackDepth(depth, msg) }, func(s string) error { + return Logger.warnStackDepth(s, depth) + }, false, msg) +} + // WarncStackDepth logs at the warn level with context and the current stack depth plus the additional given one and returns an error containing the formated log message func WarncStackDepth(message string, depth int, context ...interface{}) error { return logContextWithError(seelog.WarnLvl, func() { Warnc(message, context...) }, Logger.warn, message, false, depth, context...) @@ -646,6 +700,14 @@ func Errorf(format string, params ...interface{}) error { return logFormatWithError(seelog.ErrorLvl, func() { Errorf(format, params...) }, Logger.errorf, format, true, params...) } +// ErrorfStackDepth logs with format at the error level and the current stack depth plus the given depth +func ErrorfStackDepth(depth int, format string, params ...interface{}) error { + msg := fmt.Sprintf(format, params...) + return logWithError(seelog.ErrorLvl, func() { ErrorStackDepth(depth, msg) }, func(s string) error { + return Logger.errorStackDepth(s, depth) + }, false, msg) +} + // ErrorcStackDepth logs at the error level with context and the current stack depth plus the additional given one and returns an error containing the formated log message func ErrorcStackDepth(message string, depth int, context ...interface{}) error { return logContextWithError(seelog.ErrorLvl, func() { Errorc(message, context...) }, Logger.error, message, true, depth, context...) @@ -674,6 +736,14 @@ func Criticalf(format string, params ...interface{}) error { return logFormatWithError(seelog.CriticalLvl, func() { Criticalf(format, params...) }, Logger.criticalf, format, true, params...) } +// CriticalfStackDepth logs with format at the critical level and the current stack depth plus the given depth +func CriticalfStackDepth(depth int, format string, params ...interface{}) error { + msg := fmt.Sprintf(format, params...) + return logWithError(seelog.CriticalLvl, func() { CriticalStackDepth(depth, msg) }, func(s string) error { + return Logger.criticalStackDepth(s, depth) + }, false, msg) +} + // CriticalcStackDepth logs at the critical level with context and the current stack depth plus the additional given one and returns an error containing the formated log message func CriticalcStackDepth(message string, depth int, context ...interface{}) error { return logContextWithError(seelog.CriticalLvl, func() { Criticalc(message, context...) }, Logger.critical, message, true, depth, context...) diff --git a/vendor/github.com/DataDog/go-tuf/client/client.go b/vendor/github.com/DataDog/go-tuf/client/client.go index 8715e0f28..58196dcc3 100644 --- a/vendor/github.com/DataDog/go-tuf/client/client.go +++ b/vendor/github.com/DataDog/go-tuf/client/client.go @@ -4,8 +4,9 @@ import ( "bytes" "encoding/hex" "encoding/json" + "errors" + "fmt" "io" - "io/ioutil" "github.com/DataDog/go-tuf/data" "github.com/DataDog/go-tuf/internal/roles" @@ -107,56 +108,13 @@ func NewClient(local LocalStore, remote RemoteStore) *Client { } } -// Init initializes a local repository. -// -// The latest root.json is fetched from remote storage, verified using rootKeys -// and threshold, and then saved in local storage. It is expected that rootKeys -// were securely distributed with the software being updated. -// -// Deprecated: Use c.InitLocal and c.Update to initialize a local repository. -func (c *Client) Init(rootKeys []*data.PublicKey, threshold int) error { - if len(rootKeys) < threshold { - return ErrInsufficientKeys - } - rootJSON, err := c.downloadMetaUnsafe("root.json", defaultRootDownloadLimit) - if err != nil { - return err - } - - // create a new key database, and add all the public `rootKeys` to it. - c.db = verify.NewDB() - rootKeyIDs := make([]string, 0, len(rootKeys)) - for _, key := range rootKeys { - for _, id := range key.IDs() { - rootKeyIDs = append(rootKeyIDs, id) - if err := c.db.AddKey(id, key); err != nil { - return err - } - } - } - - // add a mock "root" role that trusts the passed in key ids. These keys - // will be used to verify the `root.json` we just fetched. - role := &data.Role{Threshold: threshold, KeyIDs: rootKeyIDs} - if err := c.db.AddRole("root", role); err != nil { - return err - } - - // verify that the new root is valid. - if err := c.decodeRoot(rootJSON); err != nil { - return err - } - - return c.local.SetMeta("root.json", rootJSON) -} - -// InitLocal initializes a local repository from root metadata. +// Init initializes a local repository from root metadata. // // The root's keys are extracted from the root and saved in local storage. // Root expiration is not checked. // It is expected that rootJSON was securely distributed with the software // being updated. -func (c *Client) InitLocal(rootJSON []byte) error { +func (c *Client) Init(rootJSON []byte) error { err := c.loadAndVerifyRootMeta(rootJSON, true /*ignoreExpiredCheck*/) if err != nil { return err @@ -167,7 +125,7 @@ func (c *Client) InitLocal(rootJSON []byte) error { // Update downloads and verifies remote metadata and returns updated targets. // It always performs root update (5.2 and 5.3) section of the v1.0.19 spec. // -// https://DataDog.github.io/specification/v1.0.19/index.html#load-trusted-root +// https://theupdateframework.github.io/specification/v1.0.19/index.html#load-trusted-root func (c *Client) Update() (data.TargetFiles, error) { if err := c.UpdateRoots(); err != nil { if _, ok := err.(verify.ErrExpired); ok { @@ -188,7 +146,13 @@ func (c *Client) Update() (data.TargetFiles, error) { } // 5.4.(2,3 and 4) - Verify timestamp against various attacks // Returns the extracted snapshot metadata - snapshotMeta, err := c.decodeTimestamp(timestampJSON) + snapshotMeta, sameTimestampVersion, err := c.decodeTimestamp(timestampJSON) + if sameTimestampVersion { + // The new timestamp.json file had the same version; we don't need to + // update, so bail early. + return c.targets, nil + } + if err != nil { return nil, err } @@ -240,7 +204,7 @@ func (c *Client) Update() (data.TargetFiles, error) { } func (c *Client) UpdateRoots() error { - // https://DataDog.github.io/specification/v1.0.19/index.html#load-trusted-root + // https://theupdateframework.github.io/specification/v1.0.19/index.html#load-trusted-root // 5.2 Load the trusted root metadata file. We assume that a good, // trusted copy of this file was shipped with the package manager // or software updater using an out-of-band process. @@ -286,7 +250,7 @@ func (c *Client) UpdateRoots() error { nRootMetadata := m["root.json"] - // https://DataDog.github.io/specification/v1.0.19/index.html#update-root + // https://theupdateframework.github.io/specification/v1.0.19/index.html#update-root // 5.3.1 Since it may now be signed using entirely different keys, // the client MUST somehow be able to establish a trusted line of @@ -438,8 +402,8 @@ func (c *Client) getLocalMeta() error { } } + snapshot := &data.Snapshot{} if snapshotJSON, ok := meta["snapshot.json"]; ok { - snapshot := &data.Snapshot{} if err := c.db.UnmarshalTrusted(snapshotJSON, snapshot, "snapshot"); err != nil { loadFailed = true retErr = err @@ -463,12 +427,34 @@ func (c *Client) getLocalMeta() error { } } + if loadFailed { + // If any of the metadata failed to be verified, return the reason for that failure + // and fail fast before delegated targets + return retErr + } + + // verifiedDelegatedTargets is a set of verified delegated targets + verifiedDelegatedTargets := make(map[string]bool) for fileName := range meta { - if roles.IsDelegatedTargetsManifest(fileName) { - c.localMeta[fileName] = meta[fileName] + if !verifiedDelegatedTargets[fileName] && roles.IsDelegatedTargetsManifest(fileName) { + if delegationPath, err := c.getDelegationPathFromRaw(snapshot, meta[fileName]); err != nil { + loadFailed = true + retErr = err + } else { + // Every delegated targets in the path has been verified + // as a side effect of getDelegationPathFromRaw + for _, key := range delegationPath { + fileName := fmt.Sprintf("%s.json", key) + verifiedDelegatedTargets[fileName] = true + } + } } } + for fileName := range verifiedDelegatedTargets { + c.localMeta[fileName] = meta[fileName] + } + if loadFailed { // If any of the metadata failed to be verified, return the reason for that failure return retErr @@ -476,6 +462,55 @@ func (c *Client) getLocalMeta() error { return nil } +// getDelegationPathFromRaw verifies a delegated targets against +// a given snapshot and returns an error if it's invalid +// +// Delegation must have targets to get a path, else an empty list +// will be returned: this is because the delegation iterator is leveraged. +// +// Concrete example: +// targets +// └── a.json +//   └── b.json +//      └── c.json +//        └── target_file.txt +// +// If you try to use that function on "a.json" or "b.json", it'll return an empty list +// with no error, as neither of them declare a target file +// On the other hand, if you use that function on "c.json", it'll return & verify +// [c.json, b.json, a.json]. Running that function on every delegated targets +// guarantees that if a delegated targets is in the path of a target file, then it will +// appear at least once in the result +func (c *Client) getDelegationPathFromRaw(snapshot *data.Snapshot, delegatedTargetsJSON json.RawMessage) ([]string, error) { + // unmarshal the delegated targets first without verifying as + // we need at least one targets file name to leverage the + // getTargetFileMetaDelegationPath method + s := &data.Signed{} + if err := json.Unmarshal(delegatedTargetsJSON, s); err != nil { + return nil, err + } + targets := &data.Targets{} + if err := json.Unmarshal(s.Signed, targets); err != nil { + return nil, err + } + for targetPath := range targets.Targets { + // Gets target file from remote store + _, resp, err := c.getTargetFileMetaDelegationPath(targetPath, snapshot) + // We only need to test one targets file: + // - If it is valid, it means the delegated targets has been validated + // - If it is not, the delegated targets isn't valid + if errors.As(err, &ErrMissingRemoteMetadata{}) { + // As this function is used to fill the local store cache, the targets + // will be downloaded from the remote store as the local store cache is + // empty, meaning that the delegated targets may not exist anymore. In + // that case, ignore it. + return nil, nil + } + return resp, err + } + return nil, nil +} + // loadAndVerifyLocalRootMeta decodes and verifies root metadata from // local storage and loads the top-level keys. This method first clears // the DB for top-level keys and then loads the new keys. @@ -507,15 +542,7 @@ func (c *Client) loadAndVerifyRootMeta(rootJSON []byte, ignoreExpiredCheck bool) ndb := verify.NewDB() for id, k := range root.Keys { if err := ndb.AddKey(id, k); err != nil { - // TUF is considering in TAP-12 removing the - // requirement that the keyid hash algorithm be derived - // from the public key. So to be forwards compatible, - // we ignore `ErrWrongID` errors. - // - // TAP-12: https://github.com/DataDog/taps/blob/master/tap12.md - if _, ok := err.(verify.ErrWrongID); !ok { - return err - } + return err } } for name, role := range root.Roles { @@ -563,15 +590,7 @@ func (c *Client) verifyRoot(aJSON []byte, bJSON []byte) (*data.Root, error) { ndb := verify.NewDB() for id, k := range aRoot.Keys { if err := ndb.AddKey(id, k); err != nil { - // TUF is considering in TAP-12 removing the - // requirement that the keyid hash algorithm be derived - // from the public key. So to be forwards compatible, - // we ignore `ErrWrongID` errors. - // - // TAP-12: https://github.com/DataDog/taps/blob/master/tap12.md - if _, ok := err.(verify.ErrWrongID); !ok { - return nil, err - } + return nil, err } } for name, role := range aRoot.Roles { @@ -618,7 +637,7 @@ func (c *Client) downloadMetaUnsafe(name string, maxMetaSize int64) ([]byte, err // although the size has been checked above, use a LimitReader in case // the reported size is inaccurate, or size is -1 which indicates an // unknown length - return ioutil.ReadAll(io.LimitReader(r, maxMetaSize)) + return io.ReadAll(io.LimitReader(r, maxMetaSize)) } // remoteGetFunc is the type of function the download method uses to download @@ -689,52 +708,55 @@ func (c *Client) downloadMeta(name string, version int64, m data.FileMeta) ([]by stream = r } - return ioutil.ReadAll(stream) + return io.ReadAll(stream) } func (c *Client) downloadMetaFromSnapshot(name string, m data.SnapshotFileMeta) ([]byte, error) { - b, err := c.downloadMeta(name, m.Version, m.FileMeta) + b, err := c.downloadMeta(name, m.Version, data.FileMeta{Length: m.Length, Hashes: m.Hashes}) if err != nil { return nil, err } - meta, err := util.GenerateSnapshotFileMeta(bytes.NewReader(b), m.HashAlgorithms()...) + // 5.6.2 – Check length and hashes of fetched bytes *before* parsing metadata + if err := util.BytesMatchLenAndHashes(b, m.Length, m.Hashes); err != nil { + return nil, ErrDownloadFailed{name, err} + } + + meta, err := util.GenerateSnapshotFileMeta(bytes.NewReader(b), m.Hashes.HashAlgorithms()...) if err != nil { return nil, err } - // 5.6.2 and 5.6.4 - Check against snapshot role's targets hash and version - if err := util.SnapshotFileMetaEqual(meta, m); err != nil { + + // 5.6.4 - Check against snapshot role's version + if err := util.VersionEqual(meta.Version, m.Version); err != nil { return nil, ErrDownloadFailed{name, err} } + return b, nil } func (c *Client) downloadMetaFromTimestamp(name string, m data.TimestampFileMeta) ([]byte, error) { - b, err := c.downloadMeta(name, m.Version, m.FileMeta) + b, err := c.downloadMeta(name, m.Version, data.FileMeta{Length: m.Length, Hashes: m.Hashes}) if err != nil { return nil, err } - meta, err := util.GenerateTimestampFileMeta(bytes.NewReader(b), m.HashAlgorithms()...) + // 5.2.2. – Check length and hashes of fetched bytes *before* parsing metadata + if err := util.BytesMatchLenAndHashes(b, m.Length, m.Hashes); err != nil { + return nil, ErrDownloadFailed{name, err} + } + + meta, err := util.GenerateTimestampFileMeta(bytes.NewReader(b), m.Hashes.HashAlgorithms()...) if err != nil { return nil, err } - // 5.5.2 and 5.5.4 - Check against timestamp role's snapshot hash and version - if err := util.TimestampFileMetaEqual(meta, m); err != nil { + + // 5.5.4 - Check against timestamp role's version + if err := util.VersionEqual(meta.Version, m.Version); err != nil { return nil, ErrDownloadFailed{name, err} } - return b, nil -} -// decodeRoot decodes and verifies root metadata. -func (c *Client) decodeRoot(b json.RawMessage) error { - root := &data.Root{} - if err := c.db.Unmarshal(b, root, "root", c.rootVer); err != nil { - return ErrDecodeFailed{"root.json", err} - } - c.rootVer = root.Version - c.consistentSnapshot = root.ConsistentSnapshot - return nil + return b, nil } // decodeSnapshot decodes and verifies snapshot metadata, and returns the new @@ -805,22 +827,31 @@ func (c *Client) decodeTargets(b json.RawMessage) (data.TargetFiles, error) { } // decodeTimestamp decodes and verifies timestamp metadata, and returns the -// new snapshot file meta. -func (c *Client) decodeTimestamp(b json.RawMessage) (data.TimestampFileMeta, error) { +// new snapshot file meta and signals whether the update should be aborted early +// (the new timestamp has the same version as the old one, so there's no need to +// complete the update). +func (c *Client) decodeTimestamp(b json.RawMessage) (data.TimestampFileMeta, bool, error) { timestamp := &data.Timestamp{} + if err := c.db.Unmarshal(b, timestamp, "timestamp", c.timestampVer); err != nil { - return data.TimestampFileMeta{}, ErrDecodeFailed{"timestamp.json", err} + return data.TimestampFileMeta{}, false, ErrDecodeFailed{"timestamp.json", err} + } + // 5.4.3.1 - Check for timestamp rollback attack + // We already checked for timestamp.Version < c.timestampVer in the Unmarshal call above. + // Here, we're checking for version equality, which indicates that we can abandon this update. + if timestamp.Version == c.timestampVer { + return data.TimestampFileMeta{}, true, nil } // 5.4.3.2 - Check for snapshot rollback attack // Verify that the current snapshot meta version is less than or equal to the new one if timestamp.Meta["snapshot.json"].Version < c.snapshotVer { - return data.TimestampFileMeta{}, verify.ErrLowVersion{Actual: timestamp.Meta["snapshot.json"].Version, Current: c.snapshotVer} + return data.TimestampFileMeta{}, false, verify.ErrLowVersion{Actual: timestamp.Meta["snapshot.json"].Version, Current: c.snapshotVer} } - // At this point we can trust the new timestamp and the snaphost version it refers to + // At this point we can trust the new timestamp and the snapshot version it refers to // so we can update the client's trusted versions and proceed with persisting the new timestamp c.timestampVer = timestamp.Version c.snapshotVer = timestamp.Meta["snapshot.json"].Version - return timestamp.Meta["snapshot.json"], nil + return timestamp.Meta["snapshot.json"], false, nil } // hasMetaFromSnapshot checks whether local metadata has the given meta @@ -835,7 +866,7 @@ func (c *Client) localMetaFromSnapshot(name string, m data.SnapshotFileMeta) (js if !ok { return nil, false } - meta, err := util.GenerateSnapshotFileMeta(bytes.NewReader(b), m.HashAlgorithms()...) + meta, err := util.GenerateSnapshotFileMeta(bytes.NewReader(b), m.Hashes.HashAlgorithms()...) if err != nil { return nil, false } @@ -843,38 +874,6 @@ func (c *Client) localMetaFromSnapshot(name string, m data.SnapshotFileMeta) (js return b, err == nil } -// hasTargetsMeta checks whether local metadata has the given snapshot meta -// -//lint:ignore U1000 unused -func (c *Client) hasTargetsMeta(m data.SnapshotFileMeta) bool { - b, ok := c.localMeta["targets.json"] - if !ok { - return false - } - meta, err := util.GenerateSnapshotFileMeta(bytes.NewReader(b), m.HashAlgorithms()...) - if err != nil { - return false - } - err = util.SnapshotFileMetaEqual(meta, m) - return err == nil -} - -// hasSnapshotMeta checks whether local metadata has the given meta -// -//lint:ignore U1000 unused -func (c *Client) hasMetaFromTimestamp(name string, m data.TimestampFileMeta) bool { - b, ok := c.localMeta[name] - if !ok { - return false - } - meta, err := util.GenerateTimestampFileMeta(bytes.NewReader(b), m.HashAlgorithms()...) - if err != nil { - return false - } - err = util.TimestampFileMetaEqual(meta, m) - return err == nil -} - type Destination interface { io.Writer Delete() error @@ -889,6 +888,8 @@ type Destination interface { // - The target does not exist in any targets // - Metadata cannot be generated for the downloaded data // - Generated metadata does not match local metadata for the given file +// - Size of the download does not match if the reported size is known and +// incorrect func (c *Client) Download(name string, dest Destination) (err error) { // delete dest if there is an error defer func() { diff --git a/vendor/github.com/DataDog/go-tuf/client/delegations.go b/vendor/github.com/DataDog/go-tuf/client/delegations.go index ac3a31914..4cf540455 100644 --- a/vendor/github.com/DataDog/go-tuf/client/delegations.go +++ b/vendor/github.com/DataDog/go-tuf/client/delegations.go @@ -8,13 +8,23 @@ import ( // getTargetFileMeta searches for a verified TargetFileMeta matching a target // Requires a local snapshot to be loaded and is locked to the snapshot versions. -// Searches through delegated targets following TUF spec 1.0.19 section 5.6. func (c *Client) getTargetFileMeta(target string) (data.TargetFileMeta, error) { snapshot, err := c.loadLocalSnapshot() if err != nil { return data.TargetFileMeta{}, err } + targetFileMeta, _, err := c.getTargetFileMetaDelegationPath(target, snapshot) + if err != nil { + return data.TargetFileMeta{}, err + } + return targetFileMeta, nil +} + +// getTargetFileMetaDelegationPath searches for a verified TargetFileMeta matching a target +// Requires snapshot to be passed and is locked to that specific snapshot versions. +// Searches through delegated targets following TUF spec 1.0.19 section 5.6. +func (c *Client) getTargetFileMetaDelegationPath(target string, snapshot *data.Snapshot) (data.TargetFileMeta, []string, error) { // delegationsIterator covers 5.6.7 // - pre-order depth-first search starting with the top targets // - filter delegations with paths or path_hash_prefixes matching searched target @@ -22,50 +32,75 @@ func (c *Client) getTargetFileMeta(target string) (data.TargetFileMeta, error) { // - 5.6.7.2 terminations delegations, err := targets.NewDelegationsIterator(target, c.db) if err != nil { - return data.TargetFileMeta{}, err + return data.TargetFileMeta{}, nil, err } + targetFileMeta := data.TargetFileMeta{} + delegationRole := "" + for i := 0; i < c.MaxDelegations; i++ { d, ok := delegations.Next() if !ok { - return data.TargetFileMeta{}, ErrUnknownTarget{target, snapshot.Version} + return data.TargetFileMeta{}, nil, ErrUnknownTarget{target, snapshot.Version} } // covers 5.6.{1,2,3,4,5,6} targets, err := c.loadDelegatedTargets(snapshot, d.Delegatee.Name, d.DB) if err != nil { - return data.TargetFileMeta{}, err + return data.TargetFileMeta{}, nil, err } // stop when the searched TargetFileMeta is found if m, ok := targets.Targets[target]; ok { - return m, nil + delegationRole = d.Delegatee.Name + targetFileMeta = m + break } if targets.Delegations != nil { delegationsDB, err := verify.NewDBFromDelegations(targets.Delegations) if err != nil { - return data.TargetFileMeta{}, err + return data.TargetFileMeta{}, nil, err } err = delegations.Add(targets.Delegations.Roles, d.Delegatee.Name, delegationsDB) if err != nil { - return data.TargetFileMeta{}, err + return data.TargetFileMeta{}, nil, err } } } - return data.TargetFileMeta{}, ErrMaxDelegations{ + if len(delegationRole) > 0 { + return targetFileMeta, buildPath(delegations.Parent, delegationRole, ""), nil + } + + return data.TargetFileMeta{}, nil, ErrMaxDelegations{ Target: target, MaxDelegations: c.MaxDelegations, SnapshotVersion: snapshot.Version, } } +func buildPath(parent func(string) string, start string, end string) []string { + if start == end { + return nil + } + + path := []string{start} + current := start + for { + current = parent(current) + if current == end { + break + } + path = append(path, current) + } + return path +} + func (c *Client) loadLocalSnapshot() (*data.Snapshot, error) { if err := c.getLocalMeta(); err != nil { return nil, err } - rawS, ok := c.localMeta["snapshot.json"] if !ok { return nil, ErrNoLocalSnapshot diff --git a/vendor/github.com/DataDog/go-tuf/client/file_store.go b/vendor/github.com/DataDog/go-tuf/client/file_store.go new file mode 100644 index 000000000..520bbe73a --- /dev/null +++ b/vendor/github.com/DataDog/go-tuf/client/file_store.go @@ -0,0 +1,90 @@ +package client + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/fs" +) + +// FileRemoteStore provides a RemoteStore interface compatible +// implementation that can be used where the RemoteStore is backed by a +// fs.FS. This is useful for example in air-gapped environments where there's no +// possibility to make outbound network connections. +// By having this be a fs.FS instead of directories allows the repository to +// be backed by something that's not persisted to disk. +func NewFileRemoteStore(fsys fs.FS, targetDir string) (*FileRemoteStore, error) { + if fsys == nil { + return nil, errors.New("nil fs.FS") + } + t := targetDir + if t == "" { + t = "targets" + } + // Make sure directory exists + d, err := fsys.Open(t) + if err != nil { + return nil, fmt.Errorf("failed to open targets directory %s: %w", t, err) + } + fi, err := d.Stat() + if err != nil { + return nil, fmt.Errorf("failed to stat targets directory %s: %w", t, err) + } + if !fi.IsDir() { + return nil, fmt.Errorf("targets directory not a directory %s", t) + } + + fsysT, err := fs.Sub(fsys, t) + if err != nil { + return nil, fmt.Errorf("failed to open targets directory %s: %w", t, err) + } + return &FileRemoteStore{fsys: fsys, targetDir: fsysT}, nil +} + +type FileRemoteStore struct { + // Meta directory fs + fsys fs.FS + // Target directory fs. + targetDir fs.FS + // In order to be able to make write operations (create, delete) we can't + // use fs.FS for it (it's read only), so we have to know the underlying + // directory that add/delete test methods can use. This is only necessary + // for testing purposes. + testDir string +} + +func (f *FileRemoteStore) GetMeta(name string) (io.ReadCloser, int64, error) { + rc, b, err := f.get(f.fsys, name) + return handleErrors(name, rc, b, err) +} + +func (f *FileRemoteStore) GetTarget(name string) (io.ReadCloser, int64, error) { + rc, b, err := f.get(f.targetDir, name) + return handleErrors(name, rc, b, err) +} + +func (f *FileRemoteStore) get(fsys fs.FS, s string) (io.ReadCloser, int64, error) { + if !fs.ValidPath(s) { + return nil, 0, fmt.Errorf("invalid path %s", s) + } + + b, err := fs.ReadFile(fsys, s) + if err != nil { + return nil, -1, err + } + return io.NopCloser(bytes.NewReader(b)), int64(len(b)), nil +} + +// handleErrors converts NotFound errors to something that TUF knows how to +// handle properly. For example, when looking for n+1 root files, this is a +// signal that it will stop looking. +func handleErrors(name string, rc io.ReadCloser, b int64, err error) (io.ReadCloser, int64, error) { + if err == nil { + return rc, b, err + } + if errors.Is(err, fs.ErrNotExist) { + return rc, b, ErrNotFound{name} + } + return rc, b, err +} diff --git a/vendor/github.com/DataDog/go-tuf/data/types.go b/vendor/github.com/DataDog/go-tuf/data/types.go index 44d9bf13c..eb00489b6 100644 --- a/vendor/github.com/DataDog/go-tuf/data/types.go +++ b/vendor/github.com/DataDog/go-tuf/data/types.go @@ -1,12 +1,13 @@ package data import ( + "bytes" "crypto/sha256" "encoding/hex" "encoding/json" "errors" "fmt" - "path/filepath" + "path" "strings" "sync" "time" @@ -14,18 +15,32 @@ import ( "github.com/secure-systems-lab/go-securesystemslib/cjson" ) +type KeyType string + +type KeyScheme string + +type HashAlgorithm string + const ( - KeyIDLength = sha256.Size * 2 - KeyTypeEd25519 = "ed25519" - KeyTypeECDSA_SHA2_P256 = "ecdsa-sha2-nistp256" - KeySchemeEd25519 = "ed25519" - KeySchemeECDSA_SHA2_P256 = "ecdsa-sha2-nistp256" - KeyTypeRSASSA_PSS_SHA256 = "rsa" - KeySchemeRSASSA_PSS_SHA256 = "rsassa-pss-sha256" + KeyIDLength = sha256.Size * 2 + + KeyTypeEd25519 KeyType = "ed25519" + // From version 1.0.32, the reference implementation defines 'ecdsa', + // not 'ecdsa-sha2-nistp256' for NIST P-256 curves. + KeyTypeECDSA_SHA2_P256 KeyType = "ecdsa" + KeyTypeECDSA_SHA2_P256_OLD_FMT KeyType = "ecdsa-sha2-nistp256" + KeyTypeRSASSA_PSS_SHA256 KeyType = "rsa" + + KeySchemeEd25519 KeyScheme = "ed25519" + KeySchemeECDSA_SHA2_P256 KeyScheme = "ecdsa-sha2-nistp256" + KeySchemeRSASSA_PSS_SHA256 KeyScheme = "rsassa-pss-sha256" + + HashAlgorithmSHA256 HashAlgorithm = "sha256" + HashAlgorithmSHA512 HashAlgorithm = "sha512" ) var ( - HashAlgorithms = []string{"sha256", "sha512"} + HashAlgorithms = []HashAlgorithm{HashAlgorithmSHA256, HashAlgorithmSHA512} ErrPathsAndPathHashesSet = errors.New("tuf: failed validation of delegated target: paths and path_hash_prefixes are both set") ) @@ -40,9 +55,9 @@ type Signature struct { } type PublicKey struct { - Type string `json:"keytype"` - Scheme string `json:"scheme"` - Algorithms []string `json:"keyid_hash_algorithms,omitempty"` + Type KeyType `json:"keytype"` + Scheme KeyScheme `json:"scheme"` + Algorithms []HashAlgorithm `json:"keyid_hash_algorithms,omitempty"` Value json.RawMessage `json:"keyval"` ids []string @@ -50,9 +65,9 @@ type PublicKey struct { } type PrivateKey struct { - Type string `json:"keytype"` - Scheme string `json:"scheme,omitempty"` - Algorithms []string `json:"keyid_hash_algorithms,omitempty"` + Type KeyType `json:"keytype"` + Scheme KeyScheme `json:"scheme,omitempty"` + Algorithms []HashAlgorithm `json:"keyid_hash_algorithms,omitempty"` Value json.RawMessage `json:"keyval"` } @@ -147,29 +162,29 @@ func (r *Role) AddKeyIDs(ids []string) bool { return changed } -type Files map[string]FileMeta - -type FileMeta struct { - Length int64 `json:"length,omitempty"` - Hashes Hashes `json:"hashes,omitempty"` - Custom *json.RawMessage `json:"custom,omitempty"` -} +type Files map[string]TargetFileMeta type Hashes map[string]HexBytes -func (f FileMeta) HashAlgorithms() []string { - funcs := make([]string, 0, len(f.Hashes)) - for name := range f.Hashes { +func (f Hashes) HashAlgorithms() []string { + funcs := make([]string, 0, len(f)) + for name := range f { funcs = append(funcs, name) } return funcs } -type SnapshotFileMeta struct { - FileMeta - Version int64 `json:"version"` +type metapathFileMeta struct { + Length int64 `json:"length,omitempty"` + Hashes Hashes `json:"hashes,omitempty"` + Version int64 `json:"version"` + Custom *json.RawMessage `json:"custom,omitempty"` } +// SnapshotFileMeta is the meta field of a snapshot +// Note: Contains a `custom` field +type SnapshotFileMeta metapathFileMeta + type SnapshotFiles map[string]SnapshotFileMeta type Snapshot struct { @@ -190,14 +205,20 @@ func NewSnapshot() *Snapshot { } } +type FileMeta struct { + Length int64 `json:"length"` + Hashes Hashes `json:"hashes"` +} + type TargetFiles map[string]TargetFileMeta type TargetFileMeta struct { FileMeta + Custom *json.RawMessage `json:"custom,omitempty"` } func (f TargetFileMeta) HashAlgorithms() []string { - return f.FileMeta.HashAlgorithms() + return f.FileMeta.Hashes.HashAlgorithms() } type Targets struct { @@ -237,7 +258,7 @@ func (d *DelegatedRole) MatchesPath(file string) (bool, error) { } for _, pattern := range d.Paths { - if matched, _ := filepath.Match(pattern, file); matched { + if matched, _ := path.Match(pattern, file); matched { return true, nil } } @@ -253,7 +274,7 @@ func (d *DelegatedRole) MatchesPath(file string) (bool, error) { } // validatePaths enforces the spec -// https://DataDog.github.io/specification/v1.0.19/index.html#file-formats-targets +// https://theupdateframework.github.io/specification/v1.0.19/index.html#file-formats-targets // 'role MUST specify only one of the "path_hash_prefixes" or "paths"' // Marshalling and unmarshalling JSON will fail and return // ErrPathsAndPathHashesSet if both fields are set and not empty. @@ -284,7 +305,11 @@ func (d *DelegatedRole) MarshalJSON() ([]byte, error) { func (d *DelegatedRole) UnmarshalJSON(b []byte) error { type delegatedRoleAlias DelegatedRole - if err := json.Unmarshal(b, (*delegatedRoleAlias)(d)); err != nil { + // Prepare decoder + dec := json.NewDecoder(bytes.NewReader(b)) + + // Unmarshal delegated role + if err := dec.Decode((*delegatedRoleAlias)(d)); err != nil { return err } @@ -300,10 +325,7 @@ func NewTargets() *Targets { } } -type TimestampFileMeta struct { - FileMeta - Version int64 `json:"version"` -} +type TimestampFileMeta metapathFileMeta type TimestampFiles map[string]TimestampFileMeta diff --git a/vendor/github.com/DataDog/go-tuf/internal/roles/roles.go b/vendor/github.com/DataDog/go-tuf/internal/roles/roles.go index f7841c268..0b134b2a0 100644 --- a/vendor/github.com/DataDog/go-tuf/internal/roles/roles.go +++ b/vendor/github.com/DataDog/go-tuf/internal/roles/roles.go @@ -22,6 +22,13 @@ func IsDelegatedTargetsRole(name string) bool { } func IsTopLevelManifest(name string) bool { + if IsVersionedManifest(name) { + var found bool + _, name, found = strings.Cut(name, ".") + if !found { + panic("expected a versioned manifest of the form x.role.json") + } + } return IsTopLevelRole(strings.TrimSuffix(name, ".json")) } diff --git a/vendor/github.com/DataDog/go-tuf/pkg/keys/deprecated_ecdsa.go b/vendor/github.com/DataDog/go-tuf/pkg/keys/deprecated_ecdsa.go new file mode 100644 index 000000000..6c4c20682 --- /dev/null +++ b/vendor/github.com/DataDog/go-tuf/pkg/keys/deprecated_ecdsa.go @@ -0,0 +1,101 @@ +package keys + +import ( + "bytes" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/sha256" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/DataDog/go-tuf/data" +) + +func NewDeprecatedEcdsaVerifier() Verifier { + return &ecdsaVerifierWithDeprecatedSupport{} +} + +type ecdsaVerifierWithDeprecatedSupport struct { + key *data.PublicKey + // This will switch based on whether this is a PEM-encoded key + // or a deprecated hex-encoded key. + Verifier +} + +func (p *ecdsaVerifierWithDeprecatedSupport) UnmarshalPublicKey(key *data.PublicKey) error { + p.key = key + pemVerifier := &EcdsaVerifier{} + if err := pemVerifier.UnmarshalPublicKey(key); err != nil { + // Try the deprecated hex-encoded verifier + hexVerifier := &deprecatedP256Verifier{} + if err := hexVerifier.UnmarshalPublicKey(key); err != nil { + return err + } + p.Verifier = hexVerifier + return nil + } + p.Verifier = pemVerifier + return nil +} + +/* + Deprecated ecdsaVerifier that used hex-encoded public keys. + This MAY be used to verify existing metadata that used this + old format. This will be deprecated soon, ensure that repositories + are re-signed and clients receieve a fully compliant root. +*/ + +type deprecatedP256Verifier struct { + PublicKey data.HexBytes `json:"public"` + key *data.PublicKey +} + +func (p *deprecatedP256Verifier) Public() string { + return p.PublicKey.String() +} + +func (p *deprecatedP256Verifier) Verify(msg, sigBytes []byte) error { + x, y := elliptic.Unmarshal(elliptic.P256(), p.PublicKey) + k := &ecdsa.PublicKey{ + Curve: elliptic.P256(), + X: x, + Y: y, + } + + hash := sha256.Sum256(msg) + + if !ecdsa.VerifyASN1(k, hash[:], sigBytes) { + return errors.New("tuf: deprecated ecdsa signature verification failed") + } + return nil +} + +func (p *deprecatedP256Verifier) MarshalPublicKey() *data.PublicKey { + return p.key +} + +func (p *deprecatedP256Verifier) UnmarshalPublicKey(key *data.PublicKey) error { + // Prepare decoder limited to 512Kb + dec := json.NewDecoder(io.LimitReader(bytes.NewReader(key.Value), MaxJSONKeySize)) + + // Unmarshal key value + if err := dec.Decode(p); err != nil { + if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) { + return fmt.Errorf("tuf: the public key is truncated or too large: %w", err) + } + return err + } + + curve := elliptic.P256() + + // Parse as uncompressed marshalled point. + x, _ := elliptic.Unmarshal(curve, p.PublicKey) + if x == nil { + return errors.New("tuf: invalid ecdsa public key point") + } + + p.key = key + return nil +} diff --git a/vendor/github.com/DataDog/go-tuf/pkg/keys/ecdsa.go b/vendor/github.com/DataDog/go-tuf/pkg/keys/ecdsa.go index bfd9b69c2..ea75b97e8 100644 --- a/vendor/github.com/DataDog/go-tuf/pkg/keys/ecdsa.go +++ b/vendor/github.com/DataDog/go-tuf/pkg/keys/ecdsa.go @@ -1,71 +1,173 @@ package keys import ( + "bytes" "crypto/ecdsa" "crypto/elliptic" + "crypto/rand" "crypto/sha256" - "encoding/asn1" + "crypto/x509" "encoding/json" + "encoding/pem" "errors" - "math/big" + "fmt" + "io" "github.com/DataDog/go-tuf/data" ) func init() { - VerifierMap.Store(data.KeyTypeECDSA_SHA2_P256, NewEcdsaVerifier) + // Note: we use LoadOrStore here to prevent accidentally overriding the + // an explicit deprecated ECDSA verifier. + // TODO: When deprecated ECDSA is removed, this can switch back to Store. + VerifierMap.LoadOrStore(data.KeyTypeECDSA_SHA2_P256_OLD_FMT, NewEcdsaVerifier) + VerifierMap.LoadOrStore(data.KeyTypeECDSA_SHA2_P256, NewEcdsaVerifier) + SignerMap.Store(data.KeyTypeECDSA_SHA2_P256_OLD_FMT, newEcdsaSigner) + SignerMap.Store(data.KeyTypeECDSA_SHA2_P256, newEcdsaSigner) } func NewEcdsaVerifier() Verifier { - return &p256Verifier{} + return &EcdsaVerifier{} } -type ecdsaSignature struct { - R, S *big.Int +func newEcdsaSigner() Signer { + return &ecdsaSigner{} } -type p256Verifier struct { - PublicKey data.HexBytes `json:"public"` +type EcdsaVerifier struct { + PublicKey *PKIXPublicKey `json:"public"` + ecdsaKey *ecdsa.PublicKey key *data.PublicKey } -func (p *p256Verifier) Public() string { - return p.PublicKey.String() +func (p *EcdsaVerifier) Public() string { + // This is already verified to succeed when unmarshalling a public key. + r, err := x509.MarshalPKIXPublicKey(p.ecdsaKey) + if err != nil { + // TODO: Gracefully handle these errors. + // See https://github.com/DataDog/go-tuf/issues/363 + panic(err) + } + return string(r) } -func (p *p256Verifier) Verify(msg, sigBytes []byte) error { - x, y := elliptic.Unmarshal(elliptic.P256(), p.PublicKey) - k := &ecdsa.PublicKey{ - Curve: elliptic.P256(), - X: x, - Y: y, +func (p *EcdsaVerifier) Verify(msg, sigBytes []byte) error { + hash := sha256.Sum256(msg) + + if !ecdsa.VerifyASN1(p.ecdsaKey, hash[:], sigBytes) { + return errors.New("tuf: ecdsa signature verification failed") } + return nil +} + +func (p *EcdsaVerifier) MarshalPublicKey() *data.PublicKey { + return p.key +} + +func (p *EcdsaVerifier) UnmarshalPublicKey(key *data.PublicKey) error { + // Prepare decoder limited to 512Kb + dec := json.NewDecoder(io.LimitReader(bytes.NewReader(key.Value), MaxJSONKeySize)) - var sig ecdsaSignature - if _, err := asn1.Unmarshal(sigBytes, &sig); err != nil { + // Unmarshal key value + if err := dec.Decode(p); err != nil { + if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) { + return fmt.Errorf("tuf: the public key is truncated or too large: %w", err) + } return err } - hash := sha256.Sum256(msg) + ecdsaKey, ok := p.PublicKey.PublicKey.(*ecdsa.PublicKey) + if !ok { + return fmt.Errorf("invalid public key") + } - if !ecdsa.Verify(k, hash[:], sig.R, sig.S) { - return errors.New("tuf: ecdsa signature verification failed") + if _, err := x509.MarshalPKIXPublicKey(ecdsaKey); err != nil { + return fmt.Errorf("marshalling to PKIX key: invalid public key") } + + p.ecdsaKey = ecdsaKey + p.key = key return nil } -func (p *p256Verifier) MarshalPublicKey() *data.PublicKey { - return p.key +type ecdsaSigner struct { + *ecdsa.PrivateKey +} + +type ecdsaPrivateKeyValue struct { + Private string `json:"private"` + Public *PKIXPublicKey `json:"public"` } -func (p *p256Verifier) UnmarshalPublicKey(key *data.PublicKey) error { - if err := json.Unmarshal(key.Value, p); err != nil { +func (s *ecdsaSigner) PublicData() *data.PublicKey { + // This uses a trusted public key JSON format with a trusted Public value. + keyValBytes, _ := json.Marshal(EcdsaVerifier{PublicKey: &PKIXPublicKey{PublicKey: s.Public()}}) + return &data.PublicKey{ + Type: data.KeyTypeECDSA_SHA2_P256, + Scheme: data.KeySchemeECDSA_SHA2_P256, + Algorithms: data.HashAlgorithms, + Value: keyValBytes, + } +} + +func (s *ecdsaSigner) SignMessage(message []byte) ([]byte, error) { + hash := sha256.Sum256(message) + return ecdsa.SignASN1(rand.Reader, s.PrivateKey, hash[:]) +} + +func (s *ecdsaSigner) MarshalPrivateKey() (*data.PrivateKey, error) { + priv, err := x509.MarshalECPrivateKey(s.PrivateKey) + if err != nil { + return nil, err + } + pemKey := pem.EncodeToMemory(&pem.Block{Type: "EC PRIVATE KEY", Bytes: priv}) + val, err := json.Marshal(ecdsaPrivateKeyValue{ + Private: string(pemKey), + Public: &PKIXPublicKey{PublicKey: s.Public()}, + }) + if err != nil { + return nil, err + } + return &data.PrivateKey{ + Type: data.KeyTypeECDSA_SHA2_P256, + Scheme: data.KeySchemeECDSA_SHA2_P256, + Algorithms: data.HashAlgorithms, + Value: val, + }, nil +} + +func (s *ecdsaSigner) UnmarshalPrivateKey(key *data.PrivateKey) error { + val := ecdsaPrivateKeyValue{} + if err := json.Unmarshal(key.Value, &val); err != nil { return err } - x, _ := elliptic.Unmarshal(elliptic.P256(), p.PublicKey) - if x == nil { - return errors.New("tuf: invalid ecdsa public key point") + block, _ := pem.Decode([]byte(val.Private)) + if block == nil { + return errors.New("invalid PEM value") } - p.key = key + if block.Type != "EC PRIVATE KEY" { + return fmt.Errorf("invalid block type: %s", block.Type) + } + k, err := x509.ParseECPrivateKey(block.Bytes) + if err != nil { + return err + } + if k.Curve != elliptic.P256() { + return errors.New("unsupported ecdsa curve") + } + if _, err := json.Marshal(EcdsaVerifier{ + PublicKey: &PKIXPublicKey{PublicKey: k.Public()}}); err != nil { + return fmt.Errorf("invalid public key: %s", err) + } + + s.PrivateKey = k return nil } + +func GenerateEcdsaKey() (*ecdsaSigner, error) { + privkey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + return nil, err + } + return &ecdsaSigner{privkey}, nil +} diff --git a/vendor/github.com/DataDog/go-tuf/pkg/keys/ed25519.go b/vendor/github.com/DataDog/go-tuf/pkg/keys/ed25519.go index 130f786b7..4667147fd 100644 --- a/vendor/github.com/DataDog/go-tuf/pkg/keys/ed25519.go +++ b/vendor/github.com/DataDog/go-tuf/pkg/keys/ed25519.go @@ -1,25 +1,29 @@ package keys import ( + "bytes" "crypto" "crypto/ed25519" "crypto/rand" + "crypto/subtle" "encoding/json" "errors" + "fmt" + "io" "github.com/DataDog/go-tuf/data" ) func init() { - SignerMap.Store(data.KeySchemeEd25519, NewP256Signer) - VerifierMap.Store(data.KeySchemeEd25519, NewP256Verifier) + SignerMap.Store(data.KeyTypeEd25519, NewEd25519Signer) + VerifierMap.Store(data.KeyTypeEd25519, NewEd25519Verifier) } -func NewP256Signer() Signer { +func NewEd25519Signer() Signer { return &ed25519Signer{} } -func NewP256Verifier() Verifier { +func NewEd25519Verifier() Verifier { return &ed25519Verifier{} } @@ -45,11 +49,19 @@ func (e *ed25519Verifier) MarshalPublicKey() *data.PublicKey { func (e *ed25519Verifier) UnmarshalPublicKey(key *data.PublicKey) error { e.key = key - if err := json.Unmarshal(key.Value, e); err != nil { + + // Prepare decoder limited to 512Kb + dec := json.NewDecoder(io.LimitReader(bytes.NewReader(key.Value), MaxJSONKeySize)) + + // Unmarshal key value + if err := dec.Decode(e); err != nil { + if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) { + return fmt.Errorf("tuf: the public key is truncated or too large: %w", err) + } return err } - if len(e.PublicKey) != ed25519.PublicKeySize { - return errors.New("tuf: unexpected public key length for ed25519 key") + if n := len(e.PublicKey); n != ed25519.PublicKeySize { + return fmt.Errorf("tuf: unexpected public key length for ed25519 key, expected %d, got %d", ed25519.PublicKeySize, n) } return nil } @@ -61,10 +73,6 @@ type Ed25519PrivateKeyValue struct { type ed25519Signer struct { ed25519.PrivateKey - - keyType string - keyScheme string - keyAlgorithms []string } func GenerateEd25519Key() (*ed25519Signer, error) { @@ -76,19 +84,13 @@ func GenerateEd25519Key() (*ed25519Signer, error) { return nil, err } return &ed25519Signer{ - PrivateKey: ed25519.PrivateKey(data.HexBytes(private)), - keyType: data.KeyTypeEd25519, - keyScheme: data.KeySchemeEd25519, - keyAlgorithms: data.HashAlgorithms, + PrivateKey: ed25519.PrivateKey(data.HexBytes(private)), }, nil } -func NewEd25519Signer(keyValue Ed25519PrivateKeyValue) *ed25519Signer { +func NewEd25519SignerFromKey(keyValue Ed25519PrivateKeyValue) *ed25519Signer { return &ed25519Signer{ - PrivateKey: ed25519.PrivateKey(data.HexBytes(keyValue.Private)), - keyType: data.KeyTypeEd25519, - keyScheme: data.KeySchemeEd25519, - keyAlgorithms: data.HashAlgorithms, + PrivateKey: ed25519.PrivateKey(data.HexBytes(keyValue.Private)), } } @@ -105,23 +107,45 @@ func (e *ed25519Signer) MarshalPrivateKey() (*data.PrivateKey, error) { return nil, err } return &data.PrivateKey{ - Type: e.keyType, - Scheme: e.keyScheme, - Algorithms: e.keyAlgorithms, + Type: data.KeyTypeEd25519, + Scheme: data.KeySchemeEd25519, + Algorithms: data.HashAlgorithms, Value: valueBytes, }, nil } func (e *ed25519Signer) UnmarshalPrivateKey(key *data.PrivateKey) error { keyValue := &Ed25519PrivateKeyValue{} - if err := json.Unmarshal(key.Value, keyValue); err != nil { - return err + + // Prepare decoder limited to 512Kb + dec := json.NewDecoder(io.LimitReader(bytes.NewReader(key.Value), MaxJSONKeySize)) + + // Unmarshal key value + if err := dec.Decode(keyValue); err != nil { + if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) { + return fmt.Errorf("tuf: the private key is truncated or too large: %w", err) + } + } + + // Check private key length + if n := len(keyValue.Private); n != ed25519.PrivateKeySize { + return fmt.Errorf("tuf: invalid ed25519 private key length, expected %d, got %d", ed25519.PrivateKeySize, n) + } + + // Generate public key from private key + pub, _, err := ed25519.GenerateKey(bytes.NewReader(keyValue.Private)) + if err != nil { + return fmt.Errorf("tuf: unable to derive public key from private key: %w", err) } + + // Compare keys + if subtle.ConstantTimeCompare(keyValue.Public, pub) != 1 { + return errors.New("tuf: public and private keys don't match") + } + + // Prepare signer *e = ed25519Signer{ - PrivateKey: ed25519.PrivateKey(data.HexBytes(keyValue.Private)), - keyType: key.Type, - keyScheme: key.Scheme, - keyAlgorithms: key.Algorithms, + PrivateKey: ed25519.PrivateKey(data.HexBytes(keyValue.Private)), } return nil } @@ -129,9 +153,9 @@ func (e *ed25519Signer) UnmarshalPrivateKey(key *data.PrivateKey) error { func (e *ed25519Signer) PublicData() *data.PublicKey { keyValBytes, _ := json.Marshal(ed25519Verifier{PublicKey: []byte(e.PrivateKey.Public().(ed25519.PublicKey))}) return &data.PublicKey{ - Type: e.keyType, - Scheme: e.keyScheme, - Algorithms: e.keyAlgorithms, + Type: data.KeyTypeEd25519, + Scheme: data.KeySchemeEd25519, + Algorithms: data.HashAlgorithms, Value: keyValBytes, } } diff --git a/vendor/github.com/DataDog/go-tuf/pkg/keys/keys.go b/vendor/github.com/DataDog/go-tuf/pkg/keys/keys.go index b8ef3f24c..7fc25316e 100644 --- a/vendor/github.com/DataDog/go-tuf/pkg/keys/keys.go +++ b/vendor/github.com/DataDog/go-tuf/pkg/keys/keys.go @@ -8,6 +8,9 @@ import ( "github.com/DataDog/go-tuf/data" ) +// MaxJSONKeySize defines the maximum length of a JSON payload. +const MaxJSONKeySize = 512 * 1024 // 512Kb + // SignerMap stores mapping between key type strings and signer constructors. var SignerMap sync.Map diff --git a/vendor/github.com/DataDog/go-tuf/pkg/keys/pkix.go b/vendor/github.com/DataDog/go-tuf/pkg/keys/pkix.go new file mode 100644 index 000000000..e58d4c9f8 --- /dev/null +++ b/vendor/github.com/DataDog/go-tuf/pkg/keys/pkix.go @@ -0,0 +1,56 @@ +package keys + +import ( + "bytes" + "crypto" + "crypto/x509" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" +) + +type PKIXPublicKey struct { + crypto.PublicKey +} + +func (p *PKIXPublicKey) MarshalJSON() ([]byte, error) { + bytes, err := x509.MarshalPKIXPublicKey(p.PublicKey) + if err != nil { + return nil, err + } + pemBytes := pem.EncodeToMemory(&pem.Block{ + Type: "PUBLIC KEY", + Bytes: bytes, + }) + return json.Marshal(string(pemBytes)) +} + +func (p *PKIXPublicKey) UnmarshalJSON(b []byte) error { + var pemValue string + // Prepare decoder limited to 512Kb + dec := json.NewDecoder(io.LimitReader(bytes.NewReader(b), MaxJSONKeySize)) + + // Unmarshal key value + if err := dec.Decode(&pemValue); err != nil { + if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) { + return fmt.Errorf("tuf: the public key is truncated or too large: %w", err) + } + return err + } + + block, _ := pem.Decode([]byte(pemValue)) + if block == nil { + return errors.New("invalid PEM value") + } + if block.Type != "PUBLIC KEY" { + return fmt.Errorf("invalid block type: %s", block.Type) + } + pub, err := x509.ParsePKIXPublicKey(block.Bytes) + if err != nil { + return err + } + p.PublicKey = pub + return nil +} diff --git a/vendor/github.com/DataDog/go-tuf/pkg/keys/rsa.go b/vendor/github.com/DataDog/go-tuf/pkg/keys/rsa.go index 3c73e6d80..17d7690a7 100644 --- a/vendor/github.com/DataDog/go-tuf/pkg/keys/rsa.go +++ b/vendor/github.com/DataDog/go-tuf/pkg/keys/rsa.go @@ -1,6 +1,7 @@ package keys import ( + "bytes" "crypto" "crypto/rand" "crypto/rsa" @@ -9,36 +10,38 @@ import ( "encoding/json" "encoding/pem" "errors" + "fmt" + "io" "github.com/DataDog/go-tuf/data" ) func init() { - VerifierMap.Store(data.KeyTypeRSASSA_PSS_SHA256, NewRsaVerifier) - SignerMap.Store(data.KeyTypeRSASSA_PSS_SHA256, NewRsaSigner) + VerifierMap.Store(data.KeyTypeRSASSA_PSS_SHA256, newRsaVerifier) + SignerMap.Store(data.KeyTypeRSASSA_PSS_SHA256, newRsaSigner) } -func NewRsaVerifier() Verifier { +func newRsaVerifier() Verifier { return &rsaVerifier{} } -func NewRsaSigner() Signer { +func newRsaSigner() Signer { return &rsaSigner{} } type rsaVerifier struct { - PublicKey string `json:"public"` + PublicKey *PKIXPublicKey `json:"public"` rsaKey *rsa.PublicKey key *data.PublicKey } func (p *rsaVerifier) Public() string { - // Unique public key identifier, use a uniform encodng + // This is already verified to succeed when unmarshalling a public key. r, err := x509.MarshalPKIXPublicKey(p.rsaKey) if err != nil { - // This shouldn't happen with a valid rsa key, but fallback on the - // JSON public key string - return string(p.PublicKey) + // TODO: Gracefully handle these errors. + // See https://github.com/DataDog/go-tuf/issues/363 + panic(err) } return string(r) } @@ -54,56 +57,42 @@ func (p *rsaVerifier) MarshalPublicKey() *data.PublicKey { } func (p *rsaVerifier) UnmarshalPublicKey(key *data.PublicKey) error { - if err := json.Unmarshal(key.Value, p); err != nil { - return err - } - var err error - p.rsaKey, err = parseKey(p.PublicKey) - if err != nil { + // Prepare decoder limited to 512Kb + dec := json.NewDecoder(io.LimitReader(bytes.NewReader(key.Value), MaxJSONKeySize)) + + // Unmarshal key value + if err := dec.Decode(p); err != nil { + if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) { + return fmt.Errorf("tuf: the public key is truncated or too large: %w", err) + } return err } - p.key = key - return nil -} -// parseKey tries to parse a PEM []byte slice by attempting PKCS1 and PKIX in order. -func parseKey(data string) (*rsa.PublicKey, error) { - block, _ := pem.Decode([]byte(data)) - if block == nil { - return nil, errors.New("tuf: pem decoding public key failed") + rsaKey, ok := p.PublicKey.PublicKey.(*rsa.PublicKey) + if !ok { + return fmt.Errorf("invalid public key") } - rsaPub, err := x509.ParsePKCS1PublicKey(block.Bytes) - if err == nil { - return rsaPub, nil - } - key, err := x509.ParsePKIXPublicKey(block.Bytes) - if err == nil { - rsaPub, ok := key.(*rsa.PublicKey) - if !ok { - return nil, errors.New("tuf: invalid rsa key") - } - return rsaPub, nil + + if _, err := x509.MarshalPKIXPublicKey(rsaKey); err != nil { + return fmt.Errorf("marshalling to PKIX key: invalid public key") } - return nil, errors.New("tuf: error unmarshalling rsa key") + + p.rsaKey = rsaKey + p.key = key + return nil } type rsaSigner struct { *rsa.PrivateKey } -type rsaPublic struct { - // PEM encoded public key. - PublicKey string `json:"public"` +type rsaPrivateKeyValue struct { + Private string `json:"private"` + Public *PKIXPublicKey `json:"public"` } func (s *rsaSigner) PublicData() *data.PublicKey { - pub, _ := x509.MarshalPKIXPublicKey(s.Public().(*rsa.PublicKey)) - pubBytes := pem.EncodeToMemory(&pem.Block{ - Type: "RSA PUBLIC KEY", - Bytes: pub, - }) - - keyValBytes, _ := json.Marshal(rsaPublic{PublicKey: string(pubBytes)}) + keyValBytes, _ := json.Marshal(rsaVerifier{PublicKey: &PKIXPublicKey{PublicKey: s.Public()}}) return &data.PublicKey{ Type: data.KeyTypeRSASSA_PSS_SHA256, Scheme: data.KeySchemeRSASSA_PSS_SHA256, @@ -122,11 +111,46 @@ func (s *rsaSigner) ContainsID(id string) bool { } func (s *rsaSigner) MarshalPrivateKey() (*data.PrivateKey, error) { - return nil, errors.New("not implemented for test") + priv := x509.MarshalPKCS1PrivateKey(s.PrivateKey) + pemKey := pem.EncodeToMemory(&pem.Block{Type: "RSA PRIVATE KEY", Bytes: priv}) + val, err := json.Marshal(rsaPrivateKeyValue{ + Private: string(pemKey), + Public: &PKIXPublicKey{PublicKey: s.Public()}, + }) + if err != nil { + return nil, err + } + return &data.PrivateKey{ + Type: data.KeyTypeRSASSA_PSS_SHA256, + Scheme: data.KeySchemeRSASSA_PSS_SHA256, + Algorithms: data.HashAlgorithms, + Value: val, + }, nil } func (s *rsaSigner) UnmarshalPrivateKey(key *data.PrivateKey) error { - return errors.New("not implemented for test") + val := rsaPrivateKeyValue{} + if err := json.Unmarshal(key.Value, &val); err != nil { + return err + } + block, _ := pem.Decode([]byte(val.Private)) + if block == nil { + return errors.New("invalid PEM value") + } + if block.Type != "RSA PRIVATE KEY" { + return fmt.Errorf("invalid block type: %s", block.Type) + } + k, err := x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return err + } + if _, err := json.Marshal(rsaVerifier{ + PublicKey: &PKIXPublicKey{PublicKey: k.Public()}}); err != nil { + return fmt.Errorf("invalid public key: %s", err) + } + + s.PrivateKey = k + return nil } func GenerateRsaKey() (*rsaSigner, error) { diff --git a/vendor/github.com/DataDog/go-tuf/pkg/targets/delegation.go b/vendor/github.com/DataDog/go-tuf/pkg/targets/delegation.go index 22d2ceece..d155d070f 100644 --- a/vendor/github.com/DataDog/go-tuf/pkg/targets/delegation.go +++ b/vendor/github.com/DataDog/go-tuf/pkg/targets/delegation.go @@ -18,6 +18,7 @@ type delegationsIterator struct { stack []Delegation target string visitedRoles map[string]struct{} + parents map[string]string } var ErrTopLevelTargetsRoleMissing = errors.New("tuf: top level targets role missing from top level keys DB") @@ -43,6 +44,7 @@ func NewDelegationsIterator(target string, topLevelKeysDB *verify.DB) (*delegati }, }, visitedRoles: make(map[string]struct{}), + parents: make(map[string]string), } return i, nil } @@ -64,7 +66,7 @@ func (d *delegationsIterator) Next() (value Delegation, ok bool) { // 5.6.7.2 trim delegations to visit, only the current role and its delegations // will be considered - // https://github.com/DataDog/specification/issues/168 + // https://github.com/theupdateframework/specification/issues/168 if delegation.Delegatee.Terminating { // Empty the stack. d.stack = d.stack[0:0] @@ -88,8 +90,13 @@ func (d *delegationsIterator) Add(roles []data.DelegatedRole, delegator string, DB: db, } d.stack = append(d.stack, delegation) + d.parents[r.Name] = delegator } } return nil } + +func (d *delegationsIterator) Parent(role string) string { + return d.parents[role] +} diff --git a/vendor/github.com/DataDog/go-tuf/util/util.go b/vendor/github.com/DataDog/go-tuf/util/util.go index 4c1c0dff4..a9b23b007 100644 --- a/vendor/github.com/DataDog/go-tuf/util/util.go +++ b/vendor/github.com/DataDog/go-tuf/util/util.go @@ -10,7 +10,6 @@ import ( "fmt" "hash" "io" - "io/ioutil" "os" "path" "path/filepath" @@ -86,6 +85,32 @@ func FileMetaEqual(actual data.FileMeta, expected data.FileMeta) error { return nil } +func BytesMatchLenAndHashes(fetched []byte, length int64, hashes data.Hashes) error { + flen := int64(len(fetched)) + if length != 0 && flen != length { + return ErrWrongLength{length, flen} + } + + for alg, expected := range hashes { + var h hash.Hash + switch alg { + case "sha256": + h = sha256.New() + case "sha512": + h = sha512.New() + default: + return ErrUnknownHashAlgorithm{alg} + } + h.Write(fetched) + hash := h.Sum(nil) + if !hmac.Equal(hash, expected) { + return ErrWrongHash{alg, expected, hash} + } + } + + return nil +} + func hashEqual(actual data.Hashes, expected data.Hashes) error { hashChecked := false for typ, hash := range expected { @@ -102,7 +127,7 @@ func hashEqual(actual data.Hashes, expected data.Hashes) error { return nil } -func versionEqual(actual int64, expected int64) error { +func VersionEqual(actual int64, expected int64) error { if actual != expected { return ErrWrongVersion{expected, actual} } @@ -114,7 +139,7 @@ func SnapshotFileMetaEqual(actual data.SnapshotFileMeta, expected data.SnapshotF // member of snapshots. However they are considering requiring hashes // for delegated roles to avoid an attack described in Section 5.6 of // the Mercury paper: - // https://github.com/DataDog/specification/pull/40 + // https://github.com/theupdateframework/specification/pull/40 if expected.Length != 0 && actual.Length != expected.Length { return ErrWrongLength{expected.Length, actual.Length} } @@ -125,7 +150,7 @@ func SnapshotFileMetaEqual(actual data.SnapshotFileMeta, expected data.SnapshotF } } // 5.6.4 - Check against snapshot role's snapshot version - if err := versionEqual(actual.Version, expected.Version); err != nil { + if err := VersionEqual(actual.Version, expected.Version); err != nil { return err } @@ -149,7 +174,7 @@ func TimestampFileMetaEqual(actual data.TimestampFileMeta, expected data.Timesta } } // 5.5.4 - Check against timestamp role's snapshot version - if err := versionEqual(actual.Version, expected.Version); err != nil { + if err := VersionEqual(actual.Version, expected.Version); err != nil { return err } @@ -176,7 +201,7 @@ func GenerateFileMeta(r io.Reader, hashAlgorithms ...string) (data.FileMeta, err hashes[hashAlgorithm] = h r = io.TeeReader(r, h) } - n, err := io.Copy(ioutil.Discard, r) + n, err := io.Copy(io.Discard, r) if err != nil { return data.FileMeta{}, err } @@ -192,7 +217,7 @@ type versionedMeta struct { } func generateVersionedFileMeta(r io.Reader, hashAlgorithms ...string) (data.FileMeta, int64, error) { - b, err := ioutil.ReadAll(r) + b, err := io.ReadAll(r) if err != nil { return data.FileMeta{}, 0, err } @@ -221,8 +246,9 @@ func GenerateSnapshotFileMeta(r io.Reader, hashAlgorithms ...string) (data.Snaps return data.SnapshotFileMeta{}, err } return data.SnapshotFileMeta{ - FileMeta: m, - Version: v, + Length: m.Length, + Hashes: m.Hashes, + Version: v, }, nil } @@ -242,8 +268,9 @@ func GenerateTimestampFileMeta(r io.Reader, hashAlgorithms ...string) (data.Time return data.TimestampFileMeta{}, err } return data.TimestampFileMeta{ - FileMeta: m, - Version: v, + Length: m.Length, + Hashes: m.Hashes, + Version: v, }, nil } @@ -273,7 +300,7 @@ func HashedPaths(p string, hashes data.Hashes) []string { func AtomicallyWriteFile(filename string, data []byte, perm os.FileMode) error { dir, name := filepath.Split(filename) - f, err := ioutil.TempFile(dir, name) + f, err := os.CreateTemp(dir, name) if err != nil { return err } diff --git a/vendor/github.com/DataDog/go-tuf/verify/db.go b/vendor/github.com/DataDog/go-tuf/verify/db.go index 02b20063a..1961c98fb 100644 --- a/vendor/github.com/DataDog/go-tuf/verify/db.go +++ b/vendor/github.com/DataDog/go-tuf/verify/db.go @@ -53,13 +53,23 @@ func NewDBFromDelegations(d *data.Delegations) (*DB, error) { } func (db *DB) AddKey(id string, k *data.PublicKey) error { - if !k.ContainsID(id) { - return ErrWrongID{} - } verifier, err := keys.GetVerifier(k) if err != nil { - return ErrInvalidKey + return err // ErrInvalidKey + } + + // TUF is considering in TAP-12 removing the + // requirement that the keyid hash algorithm be derived + // from the public key. So to be forwards compatible, + // we allow any key ID, rather than checking k.ContainsID(id) + // + // AddKey should be idempotent, so we allow re-adding the same PublicKey. + // + // TAP-12: https://github.com/theupdateframework/taps/blob/master/tap12.md + if oldVerifier, exists := db.verifiers[id]; exists && oldVerifier.Public() != verifier.Public() { + return ErrRepeatID{id} } + db.verifiers[id] = verifier return nil } @@ -74,9 +84,6 @@ func (db *DB) AddRole(name string, r *data.Role) error { Threshold: r.Threshold, } for _, id := range r.KeyIDs { - if len(id) != data.KeyIDLength { - return ErrInvalidKeyID - } role.KeyIDs[id] = struct{}{} } diff --git a/vendor/github.com/DataDog/go-tuf/verify/errors.go b/vendor/github.com/DataDog/go-tuf/verify/errors.go index f94321e29..f71d4bda9 100644 --- a/vendor/github.com/DataDog/go-tuf/verify/errors.go +++ b/vendor/github.com/DataDog/go-tuf/verify/errors.go @@ -21,10 +21,12 @@ var ( ErrMissingTargetFile = errors.New("tuf: missing previously listed targets metadata file") ) -type ErrWrongID struct{} +type ErrRepeatID struct { + KeyID string +} -func (ErrWrongID) Error() string { - return "tuf: key id mismatch" +func (e ErrRepeatID) Error() string { + return fmt.Sprintf("tuf: duplicate key id (%s)", e.KeyID) } type ErrUnknownRole struct { diff --git a/vendor/github.com/DataDog/go-tuf/verify/verify.go b/vendor/github.com/DataDog/go-tuf/verify/verify.go index e892c537a..b0cf333ca 100644 --- a/vendor/github.com/DataDog/go-tuf/verify/verify.go +++ b/vendor/github.com/DataDog/go-tuf/verify/verify.go @@ -7,6 +7,7 @@ import ( "github.com/DataDog/go-tuf/data" "github.com/DataDog/go-tuf/internal/roles" + "github.com/DataDog/go-tuf/pkg/keys" "github.com/secure-systems-lab/go-securesystemslib/cjson" ) @@ -16,6 +17,22 @@ type signedMeta struct { Version int64 `json:"version"` } +// VerifySignature takes a signed JSON message, a signature, and a +// verifier and verifies the given signature on the JSON message +// using the verifier. It returns an error if verification fails. +func VerifySignature(signed json.RawMessage, sig data.HexBytes, + verifier keys.Verifier) error { + var decoded map[string]interface{} + if err := json.Unmarshal(signed, &decoded); err != nil { + return err + } + msg, err := cjson.EncodeCanonical(decoded) + if err != nil { + return err + } + return verifier.Verify(msg, sig) +} + func (db *DB) VerifyIgnoreExpiredCheck(s *data.Signed, role string, minVersion int64) error { if err := db.VerifySignatures(s, role); err != nil { return err @@ -80,20 +97,11 @@ func (db *DB) VerifySignatures(s *data.Signed, role string) error { return ErrUnknownRole{role} } - var decoded map[string]interface{} - if err := json.Unmarshal(s.Signed, &decoded); err != nil { - return err - } - msg, err := cjson.EncodeCanonical(decoded) - if err != nil { - return err - } - // Verify that a threshold of keys signed the data. Since keys can have // multiple key ids, we need to protect against multiple attached // signatures that just differ on the key id. - seen := make(map[string]struct{}) - valid := 0 + verifiedKeyIDs := make(map[string]struct{}) + numVerifiedKeys := 0 for _, sig := range s.Signatures { if !roleData.ValidKey(sig.KeyID) { continue @@ -103,22 +111,37 @@ func (db *DB) VerifySignatures(s *data.Signed, role string) error { continue } - if err := verifier.Verify(msg, sig.Signature); err != nil { - return ErrInvalid + if err := VerifySignature(s.Signed, sig.Signature, verifier); err != nil { + // If a signature fails verification, don't count it towards the + // threshold but also return early and error out immediately. + // Note: Because of this, it is impossible to distinguish between + // an error of an invalid signature and a threshold not achieved. + // Invalid signatures lead to not achieving the threshold. + continue } // Only consider this key valid if we haven't seen any of it's // key ids before. - if _, ok := seen[sig.KeyID]; !ok { - for _, id := range verifier.MarshalPublicKey().IDs() { - seen[id] = struct{}{} + // Careful: we must not rely on the key IDs _declared in the file_, + // instead we get to decide what key IDs this key correspond to. + // XXX dangerous; better stop supporting multiple key IDs altogether. + keyIDs := verifier.MarshalPublicKey().IDs() + wasKeySeen := false + for _, keyID := range keyIDs { + if _, present := verifiedKeyIDs[keyID]; present { + wasKeySeen = true + } + } + if !wasKeySeen { + for _, id := range keyIDs { + verifiedKeyIDs[id] = struct{}{} } - valid++ + numVerifiedKeys++ } } - if valid < roleData.Threshold { - return ErrRoleThreshold{roleData.Threshold, valid} + if numVerifiedKeys < roleData.Threshold { + return ErrRoleThreshold{roleData.Threshold, numVerifiedKeys} } return nil diff --git a/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs/logs_translator.go b/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs/logs_translator.go index bf0ba81cb..02e6b09e7 100644 --- a/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs/logs_translator.go +++ b/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs/logs_translator.go @@ -60,9 +60,6 @@ const ( logLevelFatal = "fatal" ) -// otelTag specifies a tag to be added to all logs sent from the Datadog exporter -const otelTag = "otel_source:datadog_exporter" - // Transform converts the log record in lr, which came in with the resource in res to a Datadog log item. // the variable specifies if the log body should be sent as an attribute or as a plain message. func Transform(lr plog.LogRecord, res pcommon.Resource, logger *zap.Logger) datadogV2.HTTPLogItem { @@ -113,7 +110,7 @@ func Transform(lr plog.LogRecord, res pcommon.Resource, logger *zap.Logger) data l.AdditionalProperties[otelSpanID] = v.AsString() } case "ddtags": - var tags = append(attributes.TagsFromAttributes(res.Attributes()), v.AsString(), otelTag) + var tags = append(attributes.TagsFromAttributes(res.Attributes()), v.AsString()) tagStr := strings.Join(tags, ",") l.Ddtags = datadog.PtrString(tagStr) default: @@ -121,6 +118,16 @@ func Transform(lr plog.LogRecord, res pcommon.Resource, logger *zap.Logger) data } return true }) + res.Attributes().Range(func(k string, v pcommon.Value) bool { + // "hostname" and "service" are reserved keywords in HTTPLogItem + // Prefix the keys so they aren't overwritten when marshalling + if k == "hostname" || k == "service" { + l.AdditionalProperties["otel."+k] = v.AsString() + } else { + l.AdditionalProperties[k] = v.AsString() + } + return true + }) if traceID := lr.TraceID(); !traceID.IsEmpty() { l.AdditionalProperties[ddTraceID] = strconv.FormatUint(traceIDToUint64(traceID), 10) l.AdditionalProperties[otelTraceID] = hex.EncodeToString(traceID[:]) @@ -157,7 +164,7 @@ func Transform(lr plog.LogRecord, res pcommon.Resource, logger *zap.Logger) data } if !l.HasDdtags() { - var tags = append(attributes.TagsFromAttributes(res.Attributes()), otelTag) + var tags = attributes.TagsFromAttributes(res.Attributes()) tagStr := strings.Join(tags, ",") l.Ddtags = datadog.PtrString(tagStr) } diff --git a/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics/consumer.go b/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics/consumer.go index 58c477f62..56e023f3f 100644 --- a/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics/consumer.go +++ b/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics/consumer.go @@ -19,7 +19,7 @@ import ( "encoding" "fmt" - "github.com/DataDog/datadog-agent/pkg/trace/pb" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/opentelemetry-mapping-go/pkg/quantile" ) @@ -95,7 +95,7 @@ type Consumer interface { // a Translator. type APMStatsConsumer interface { // ConsumeAPMStats consumes the given StatsPayload. - ConsumeAPMStats(pb.ClientStatsPayload) + ConsumeAPMStats(*pb.ClientStatsPayload) } // HostConsumer is a hostname consumer. diff --git a/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics/metrics_translator.go b/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics/metrics_translator.go index b84b31601..586534c35 100644 --- a/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics/metrics_translator.go +++ b/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics/metrics_translator.go @@ -244,8 +244,17 @@ func (t *Translator) getSketchBuckets( startTs := uint64(p.StartTimestamp()) ts := uint64(p.Timestamp()) as := &quantile.Agent{} + + // After the loop, + // - minBound contains the lower bound of the lowest nonzero bucket, + // - maxBound contains the upper bound of the highest nonzero bucket + // - minBoundSet indicates if the minBound is set, effectively because + // there was at least a nonzero bucket. + var minBound, maxBound float64 + var minBoundSet bool for j := 0; j < p.BucketCounts().Len(); j++ { lowerBound, upperBound := getBounds(p, j) + originalLowerBound, originalUpperBound := lowerBound, upperBound // Compute temporary bucketTags to have unique keys in the t.prevPts cache for each bucket // The bucketTags are computed from the bounds before the InsertInterpolate fix is done, @@ -265,12 +274,22 @@ func (t *Translator) getSketchBuckets( } count := p.BucketCounts().At(j) + var nonZeroBucket bool if delta { + nonZeroBucket = count > 0 as.InsertInterpolate(lowerBound, upperBound, uint(count)) } else if dx, ok := t.prevPts.Diff(bucketDims, startTs, ts, float64(count)); ok { + nonZeroBucket = dx > 0 as.InsertInterpolate(lowerBound, upperBound, uint(dx)) } + if nonZeroBucket { + if !minBoundSet { + minBound = originalLowerBound + minBoundSet = true + } + maxBound = originalUpperBound + } } sketch := as.Finish() @@ -282,6 +301,17 @@ func (t *Translator) getSketchBuckets( sketch.Basic.Avg = sketch.Basic.Sum / float64(sketch.Basic.Cnt) } + // If there is at least one bucket with nonzero count, + // override min/max with bounds if they are not infinite. + if minBoundSet { + if !math.IsInf(minBound, 0) { + sketch.Basic.Min = minBound + } + if !math.IsInf(maxBound, 0) { + sketch.Basic.Max = maxBound + } + } + if histInfo.hasMinFromLastTimeWindow { // We know exact minimum for the last time window. sketch.Basic.Min = p.Min() @@ -644,6 +674,7 @@ func (t *Translator) MapMetrics(ctx context.Context, md pmetric.Metrics, consume // Fetch tags from attributes. attributeTags := attributes.TagsFromAttributes(rm.Resource().Attributes()) ilms := rm.ScopeMetrics() + rattrs := rm.Resource().Attributes() for j := 0; j < ilms.Len(); j++ { ilm := ilms.At(j) metricsArray := ilm.Metrics() @@ -657,6 +688,7 @@ func (t *Translator) MapMetrics(ctx context.Context, md pmetric.Metrics, consume additionalTags = attributeTags } + newMetrics := pmetric.NewMetricSlice() for k := 0; k < metricsArray.Len(); k++ { md := metricsArray.At(k) if v, ok := runtimeMetricsMappings[md.Name()]; ok { @@ -664,86 +696,91 @@ func (t *Translator) MapMetrics(ctx context.Context, md pmetric.Metrics, consume for _, mp := range v { if mp.attributes == nil { // duplicate runtime metrics as Datadog runtime metrics - cp := metricsArray.AppendEmpty() + cp := newMetrics.AppendEmpty() md.CopyTo(cp) cp.SetName(mp.mappedName) break } if md.Type() == pmetric.MetricTypeSum { - mapSumRuntimeMetricWithAttributes(md, metricsArray, mp) + mapSumRuntimeMetricWithAttributes(md, newMetrics, mp) } else if md.Type() == pmetric.MetricTypeGauge { - mapGaugeRuntimeMetricWithAttributes(md, metricsArray, mp) + mapGaugeRuntimeMetricWithAttributes(md, newMetrics, mp) } else if md.Type() == pmetric.MetricTypeHistogram { - mapHistogramRuntimeMetricWithAttributes(md, metricsArray, mp) + mapHistogramRuntimeMetricWithAttributes(md, newMetrics, mp) } } } if t.cfg.withRemapping { - remapMetrics(metricsArray, md) - } - baseDims := &Dimensions{ - name: md.Name(), - tags: additionalTags, - host: host, - originID: attributes.OriginIDFromAttributes(rm.Resource().Attributes()), - } - switch md.Type() { - case pmetric.MetricTypeGauge: - t.mapNumberMetrics(ctx, consumer, baseDims, Gauge, md.Gauge().DataPoints()) - case pmetric.MetricTypeSum: - switch md.Sum().AggregationTemporality() { - case pmetric.AggregationTemporalityCumulative: - if isCumulativeMonotonic(md) { - switch t.cfg.NumberMode { - case NumberModeCumulativeToDelta: - t.mapNumberMonotonicMetrics(ctx, consumer, baseDims, md.Sum().DataPoints()) - case NumberModeRawValue: - t.mapNumberMetrics(ctx, consumer, baseDims, Gauge, md.Sum().DataPoints()) - } - } else { // delta and cumulative non-monotonic sums - t.mapNumberMetrics(ctx, consumer, baseDims, Gauge, md.Sum().DataPoints()) - } - case pmetric.AggregationTemporalityDelta: - t.mapNumberMetrics(ctx, consumer, baseDims, Count, md.Sum().DataPoints()) - default: // pmetric.AggregationTemporalityUnspecified or any other not supported type - t.logger.Debug("Unknown or unsupported aggregation temporality", - zap.String(metricName, md.Name()), - zap.Any("aggregation temporality", md.Sum().AggregationTemporality()), - ) - continue - } - case pmetric.MetricTypeHistogram: - switch md.Histogram().AggregationTemporality() { - case pmetric.AggregationTemporalityCumulative, pmetric.AggregationTemporalityDelta: - delta := md.Histogram().AggregationTemporality() == pmetric.AggregationTemporalityDelta - t.mapHistogramMetrics(ctx, consumer, baseDims, md.Histogram().DataPoints(), delta) - default: // pmetric.AggregationTemporalityUnspecified or any other not supported type - t.logger.Debug("Unknown or unsupported aggregation temporality", - zap.String("metric name", md.Name()), - zap.Any("aggregation temporality", md.Histogram().AggregationTemporality()), - ) - continue - } - case pmetric.MetricTypeExponentialHistogram: - switch md.ExponentialHistogram().AggregationTemporality() { - case pmetric.AggregationTemporalityDelta: - delta := md.ExponentialHistogram().AggregationTemporality() == pmetric.AggregationTemporalityDelta - t.mapExponentialHistogramMetrics(ctx, consumer, baseDims, md.ExponentialHistogram().DataPoints(), delta) - default: // pmetric.AggregationTemporalityCumulative, pmetric.AggregationTemporalityUnspecified or any other not supported type - t.logger.Debug("Unknown or unsupported aggregation temporality", - zap.String("metric name", md.Name()), - zap.Any("aggregation temporality", md.ExponentialHistogram().AggregationTemporality()), - ) - continue - } - case pmetric.MetricTypeSummary: - t.mapSummaryMetrics(ctx, consumer, baseDims, md.Summary().DataPoints()) - default: // pmetric.MetricDataTypeNone or any other not supported type - t.logger.Debug("Unknown or unsupported metric type", zap.String(metricName, md.Name()), zap.Any("data type", md.Type())) - continue + remapMetrics(newMetrics, md) } + t.mapToDDFormat(ctx, md, consumer, additionalTags, host, rattrs) + } + + for k := 0; k < newMetrics.Len(); k++ { + md := newMetrics.At(k) + t.mapToDDFormat(ctx, md, consumer, additionalTags, host, rattrs) } } } return metadata, nil } + +func (t *Translator) mapToDDFormat(ctx context.Context, md pmetric.Metric, consumer Consumer, additionalTags []string, host string, rattrs pcommon.Map) { + baseDims := &Dimensions{ + name: md.Name(), + tags: additionalTags, + host: host, + originID: attributes.OriginIDFromAttributes(rattrs), + } + switch md.Type() { + case pmetric.MetricTypeGauge: + t.mapNumberMetrics(ctx, consumer, baseDims, Gauge, md.Gauge().DataPoints()) + case pmetric.MetricTypeSum: + switch md.Sum().AggregationTemporality() { + case pmetric.AggregationTemporalityCumulative: + if isCumulativeMonotonic(md) { + switch t.cfg.NumberMode { + case NumberModeCumulativeToDelta: + t.mapNumberMonotonicMetrics(ctx, consumer, baseDims, md.Sum().DataPoints()) + case NumberModeRawValue: + t.mapNumberMetrics(ctx, consumer, baseDims, Gauge, md.Sum().DataPoints()) + } + } else { // delta and cumulative non-monotonic sums + t.mapNumberMetrics(ctx, consumer, baseDims, Gauge, md.Sum().DataPoints()) + } + case pmetric.AggregationTemporalityDelta: + t.mapNumberMetrics(ctx, consumer, baseDims, Count, md.Sum().DataPoints()) + default: // pmetric.AggregationTemporalityUnspecified or any other not supported type + t.logger.Debug("Unknown or unsupported aggregation temporality", + zap.String(metricName, md.Name()), + zap.Any("aggregation temporality", md.Sum().AggregationTemporality()), + ) + } + case pmetric.MetricTypeHistogram: + switch md.Histogram().AggregationTemporality() { + case pmetric.AggregationTemporalityCumulative, pmetric.AggregationTemporalityDelta: + delta := md.Histogram().AggregationTemporality() == pmetric.AggregationTemporalityDelta + t.mapHistogramMetrics(ctx, consumer, baseDims, md.Histogram().DataPoints(), delta) + default: // pmetric.AggregationTemporalityUnspecified or any other not supported type + t.logger.Debug("Unknown or unsupported aggregation temporality", + zap.String("metric name", md.Name()), + zap.Any("aggregation temporality", md.Histogram().AggregationTemporality()), + ) + } + case pmetric.MetricTypeExponentialHistogram: + switch md.ExponentialHistogram().AggregationTemporality() { + case pmetric.AggregationTemporalityDelta: + delta := md.ExponentialHistogram().AggregationTemporality() == pmetric.AggregationTemporalityDelta + t.mapExponentialHistogramMetrics(ctx, consumer, baseDims, md.ExponentialHistogram().DataPoints(), delta) + default: // pmetric.AggregationTemporalityCumulative, pmetric.AggregationTemporalityUnspecified or any other not supported type + t.logger.Debug("Unknown or unsupported aggregation temporality", + zap.String("metric name", md.Name()), + zap.Any("aggregation temporality", md.ExponentialHistogram().AggregationTemporality()), + ) + } + case pmetric.MetricTypeSummary: + t.mapSummaryMetrics(ctx, consumer, baseDims, md.Summary().DataPoints()) + default: // pmetric.MetricDataTypeNone or any other not supported type + t.logger.Debug("Unknown or unsupported metric type", zap.String(metricName, md.Name()), zap.Any("data type", md.Type())) + } +} diff --git a/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics/runtime_metric_mappings.go b/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics/runtime_metric_mappings.go index c90ba0db2..dbb87a6e4 100644 --- a/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics/runtime_metric_mappings.go +++ b/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics/runtime_metric_mappings.go @@ -91,7 +91,7 @@ var dotnetRuntimeMetricsMappings = runtimeMetricMappingList{ var javaRuntimeMetricsMappings = runtimeMetricMappingList{ "process.runtime.jvm.threads.count": {{mappedName: "jvm.thread_count"}}, - "process.runtime.jvm.classes.loaded": {{mappedName: "jvm.loaded_classes"}}, + "process.runtime.jvm.classes.current_loaded": {{mappedName: "jvm.loaded_classes"}}, "process.runtime.jvm.system.cpu.utilization": {{mappedName: "jvm.cpu_load.system"}}, "process.runtime.jvm.cpu.utilization": {{mappedName: "jvm.cpu_load.process"}}, "process.runtime.jvm.gc.duration": {{mappedName: "jvm.gc.parnew.time"}}, diff --git a/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics/statspayload.go b/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics/statspayload.go index 7ce98ea67..dd6d7617f 100644 --- a/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics/statspayload.go +++ b/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics/statspayload.go @@ -27,7 +27,7 @@ import ( "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" - "github.com/DataDog/datadog-agent/pkg/trace/pb" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes/source" ) @@ -68,7 +68,7 @@ const ( ) // StatsPayloadToMetrics converts an APM Stats Payload to a set of OTLP Metrics. -func (t *Translator) StatsPayloadToMetrics(sp pb.StatsPayload) pmetric.Metrics { +func (t *Translator) StatsPayloadToMetrics(sp *pb.StatsPayload) pmetric.Metrics { mmx := pmetric.NewMetrics() // We ignore Agent{Hostname,Env,Version} and fill those in later. We want those // values to be consistent with the ones that appear on traces and logs. They are @@ -103,12 +103,12 @@ func (t *Translator) StatsPayloadToMetrics(sp pb.StatsPayload) pmetric.Metrics { metricNameDuration: cgs.Duration, metricNameTopLevelHits: cgs.TopLevelHits, } { - appendSum(mxs, name, int64(val), sb.Start, sb.Start+sb.Duration, &cgs) + appendSum(mxs, name, int64(val), sb.Start, sb.Start+sb.Duration, cgs) } - if err := appendSketch(mxs, metricNameOkSummary, cgs.OkSummary, sb.Start, sb.Start+sb.Duration, &cgs); err != nil { + if err := appendSketch(mxs, metricNameOkSummary, cgs.OkSummary, sb.Start, sb.Start+sb.Duration, cgs); err != nil { t.logger.Error("Error exporting APM Stats ok_summary", zap.Error(err)) } - if err := appendSketch(mxs, metricNameErrorSummary, cgs.ErrorSummary, sb.Start, sb.Start+sb.Duration, &cgs); err != nil { + if err := appendSketch(mxs, metricNameErrorSummary, cgs.ErrorSummary, sb.Start, sb.Start+sb.Duration, cgs); err != nil { t.logger.Error("Error exporting APM Stats error_summary", zap.Error(err)) } } @@ -286,10 +286,10 @@ func (a *aggregations) Value(m pcommon.Map) *aggregationValue { // Stats returns the set of pb.ClientGroupedStats based on all the aggregated key/value // pairs. -func (a *aggregations) Stats() []pb.ClientGroupedStats { - cgs := make([]pb.ClientGroupedStats, 0, len(a.agg)) +func (a *aggregations) Stats() []*pb.ClientGroupedStats { + cgs := make([]*pb.ClientGroupedStats, 0, len(a.agg)) for k, v := range a.agg { - cgs = append(cgs, pb.ClientGroupedStats{ + cgs = append(cgs, &pb.ClientGroupedStats{ Service: k.Service, Name: k.Name, Resource: k.Resource, @@ -319,17 +319,17 @@ func (a *aggregations) Stats() []pb.ClientGroupedStats { const UnsetHostnamePlaceholder = "__unset__" // statsPayloadFromMetrics converts Resource Metrics to an APM Client Stats Payload. -func (t *Translator) statsPayloadFromMetrics(rmx pmetric.ResourceMetrics) (pb.ClientStatsPayload, error) { +func (t *Translator) statsPayloadFromMetrics(rmx pmetric.ResourceMetrics) (*pb.ClientStatsPayload, error) { attr := rmx.Resource().Attributes() if v, ok := attr.Get(keyAPMStats); !ok || !v.Bool() { - return pb.ClientStatsPayload{}, fmt.Errorf("was asked to convert metrics to stats payload, but identifier key %q was not present. Skipping.", keyAPMStats) + return &pb.ClientStatsPayload{}, fmt.Errorf("was asked to convert metrics to stats payload, but identifier key %q was not present. Skipping.", keyAPMStats) } hostname := getStr(attr, statsKeyHostname) tags := strings.Split(getStr(attr, statsKeyTags), ",") if hostname == UnsetHostnamePlaceholder { src, err := t.source(attr) if err != nil { - return pb.ClientStatsPayload{}, err + return &pb.ClientStatsPayload{}, err } switch src.Kind { case source.HostnameKind: @@ -339,7 +339,7 @@ func (t *Translator) statsPayloadFromMetrics(rmx pmetric.ResourceMetrics) (pb.Cl tags = append(tags, src.Tag()) } } - cp := pb.ClientStatsPayload{ + cp := &pb.ClientStatsPayload{ Hostname: hostname, Env: getStr(attr, statsKeyEnv), Version: getStr(attr, statsKeyVersion), @@ -383,11 +383,11 @@ func (t *Translator) statsPayloadFromMetrics(rmx pmetric.ResourceMetrics) (pb.Cl agg.Value(key).ErrorSummary = val } default: - return pb.ClientStatsPayload{}, fmt.Errorf(`metric named %q in Stats Payload should be of type "Sum" or "ExponentialHistogram" but is %q instead`, m.Name(), m.Type()) + return &pb.ClientStatsPayload{}, fmt.Errorf(`metric named %q in Stats Payload should be of type "Sum" or "ExponentialHistogram" but is %q instead`, m.Name(), m.Type()) } } buck.Stats = agg.Stats() - cp.Stats = append(cp.Stats, buck) + cp.Stats = append(cp.Stats, &buck) } return cp, nil } diff --git a/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/quantile/agent.go b/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/quantile/agent.go index 0f9bf6d63..14f09f73d 100644 --- a/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/quantile/agent.go +++ b/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/quantile/agent.go @@ -14,9 +14,9 @@ var agentConfig = Default() // An Agent sketch is an insert optimized version of the sketch for use in the // datadog-agent. type Agent struct { - Sketch Sketch Buf []Key CountBuf []KeyCount + Sketch Sketch } // IsEmpty returns true if the sketch is empty diff --git a/vendor/github.com/Shopify/sarama/.gitignore b/vendor/github.com/IBM/sarama/.gitignore similarity index 100% rename from vendor/github.com/Shopify/sarama/.gitignore rename to vendor/github.com/IBM/sarama/.gitignore diff --git a/vendor/github.com/Shopify/sarama/.golangci.yml b/vendor/github.com/IBM/sarama/.golangci.yml similarity index 86% rename from vendor/github.com/Shopify/sarama/.golangci.yml rename to vendor/github.com/IBM/sarama/.golangci.yml index 0b419abbf..3d87645c1 100644 --- a/vendor/github.com/Shopify/sarama/.golangci.yml +++ b/vendor/github.com/IBM/sarama/.golangci.yml @@ -19,7 +19,7 @@ linters-settings: misspell: locale: US goimports: - local-prefixes: github.com/Shopify/sarama + local-prefixes: github.com/IBM/sarama gocritic: enabled-tags: - diagnostic @@ -39,11 +39,18 @@ linters-settings: lines: 300 statements: 300 + depguard: + rules: + main: + deny: + - pkg: "io/ioutil" + desc: Use the "io" and "os" packages instead. + linters: disable-all: true enable: - bodyclose - - deadcode + # - deadcode - depguard - exportloopref - dogsled @@ -68,12 +75,12 @@ linters: # - paralleltest # - scopelint - staticcheck - - structcheck + # - structcheck # - stylecheck - typecheck - unconvert - unused - - varcheck + # - varcheck - whitespace issues: diff --git a/vendor/github.com/IBM/sarama/CHANGELOG.md b/vendor/github.com/IBM/sarama/CHANGELOG.md new file mode 100644 index 000000000..3737fc327 --- /dev/null +++ b/vendor/github.com/IBM/sarama/CHANGELOG.md @@ -0,0 +1,1511 @@ +# Changelog + +## Version 1.40.0 (2023-07-17) + +## What's Changed + +Note: this is the first release after the transition of Sarama ownership from Shopify to IBM in https://github.com/IBM/sarama/issues/2461 + +### :rotating_light: Breaking Changes + +- chore: migrate module to github.com/IBM/sarama by @dnwe in https://github.com/IBM/sarama/pull/2492 +- fix: restore (\*OffsetCommitRequest) AddBlock func by @dnwe in https://github.com/IBM/sarama/pull/2494 + +### :bug: Fixes + +- fix(consumer): don't retry FindCoordinator forever by @dnwe in https://github.com/IBM/sarama/pull/2427 +- fix(metrics): fix race condition when calling Broker.Open() twice by @vincentbernat in https://github.com/IBM/sarama/pull/2428 +- fix: use version 4 of DescribeGroupsRequest only if kafka broker vers… …ion is >= 2.4 by @faillefer in https://github.com/IBM/sarama/pull/2451 +- Fix HighWaterMarkOffset of mocks partition consumer by @gr8web in https://github.com/IBM/sarama/pull/2447 +- fix: prevent data race in balance strategy by @napallday in https://github.com/IBM/sarama/pull/2453 + +### :package: Dependency updates + +- chore(deps): bump golang.org/x/net from 0.5.0 to 0.7.0 by @dependabot in https://github.com/IBM/sarama/pull/2452 + +### :wrench: Maintenance + +- chore: add kafka 3.3.2 by @dnwe in https://github.com/IBM/sarama/pull/2434 +- chore(ci): remove Shopify/shopify-cla-action by @dnwe in https://github.com/IBM/sarama/pull/2489 +- chore: bytes.Equal instead bytes.Compare by @testwill in https://github.com/IBM/sarama/pull/2485 + +## New Contributors + +- @dependabot made their first contribution in https://github.com/IBM/sarama/pull/2452 +- @gr8web made their first contribution in https://github.com/IBM/sarama/pull/2447 +- @testwill made their first contribution in https://github.com/IBM/sarama/pull/2485 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.38.1...v1.40.0 + +## Version 1.38.1 (2023-01-22) + +## What's Changed +### :bug: Fixes +* fix(example): correct `records-number` param in txn producer readme by @diallo-han in https://github.com/IBM/sarama/pull/2420 +* fix: use newConsumer method in newConsumerGroup method by @Lumotheninja in https://github.com/IBM/sarama/pull/2424 +### :package: Dependency updates +* chore(deps): bump module github.com/klauspost/compress to v1.15.14 by @dnwe in https://github.com/IBM/sarama/pull/2410 +* chore(deps): bump module golang.org/x/net to v0.5.0 by @dnwe in https://github.com/IBM/sarama/pull/2413 +* chore(deps): bump module github.com/stretchr/testify to v1.8.1 by @dnwe in https://github.com/IBM/sarama/pull/2411 +* chore(deps): bump module github.com/xdg-go/scram to v1.1.2 by @dnwe in https://github.com/IBM/sarama/pull/2412 +* chore(deps): bump module golang.org/x/sync to v0.1.0 by @dnwe in https://github.com/IBM/sarama/pull/2414 +* chore(deps): bump github.com/eapache/go-xerial-snappy digest to bf00bc1 by @dnwe in https://github.com/IBM/sarama/pull/2418 + +## New Contributors +* @diallo-han made their first contribution in https://github.com/IBM/sarama/pull/2420 +* @Lumotheninja made their first contribution in https://github.com/IBM/sarama/pull/2424 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.38.0...v1.38.1 + +## Version 1.38.0 (2023-01-08) + +## What's Changed +### :tada: New Features / Improvements +* feat(producer): improve memory usage of zstd encoder by using our own pool management by @rtreffer in https://github.com/IBM/sarama/pull/2375 +* feat(proto): implement and use MetadataRequest v7 by @dnwe in https://github.com/IBM/sarama/pull/2388 +* feat(metrics): add protocol-requests-rate metric by @auntan in https://github.com/IBM/sarama/pull/2373 +### :bug: Fixes +* fix(proto): track and supply leader epoch to FetchRequest by @dnwe in https://github.com/IBM/sarama/pull/2389 +* fix(example): improve arg name used for tls skip verify by @michaeljmarshall in https://github.com/IBM/sarama/pull/2385 +* fix(zstd): default back to GOMAXPROCS concurrency by @bgreenlee in https://github.com/IBM/sarama/pull/2404 +* fix(producer): add nil check while producer is retrying by @hsweif in https://github.com/IBM/sarama/pull/2387 +* fix(producer): return errors for every message in retryBatch to avoid producer hang forever by @cch123 in https://github.com/IBM/sarama/pull/2378 +* fix(metrics): fix race when accessing metric registry by @vincentbernat in https://github.com/IBM/sarama/pull/2409 +### :package: Dependency updates +* chore(deps): bump golang.org/x/net to v0.4.0 by @dnwe in https://github.com/IBM/sarama/pull/2403 +### :wrench: Maintenance +* chore(ci): replace set-output command in GH Action by @dnwe in https://github.com/IBM/sarama/pull/2390 +* chore(ci): include kafka 3.3.1 in testing matrix by @dnwe in https://github.com/IBM/sarama/pull/2406 + +## New Contributors +* @michaeljmarshall made their first contribution in https://github.com/IBM/sarama/pull/2385 +* @bgreenlee made their first contribution in https://github.com/IBM/sarama/pull/2404 +* @hsweif made their first contribution in https://github.com/IBM/sarama/pull/2387 +* @cch123 made their first contribution in https://github.com/IBM/sarama/pull/2378 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.37.2...v1.38.0 + +## Version 1.37.2 (2022-10-04) + +## What's Changed +### :bug: Fixes +* fix: ensure updateMetaDataMs is 64-bit aligned by @dnwe in https://github.com/IBM/sarama/pull/2356 +### :heavy_plus_sign: Other Changes +* fix: bump go.mod specification to go 1.17 by @dnwe in https://github.com/IBM/sarama/pull/2357 + + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.37.1...v1.37.2 + +## Version 1.37.1 (2022-10-04) + +## What's Changed +### :bug: Fixes +* fix: support existing deprecated Rebalance.Strategy field usage by @spongecaptain in https://github.com/IBM/sarama/pull/2352 +* fix(test): consumer group rebalance strategy compatibility by @Jacob-bzx in https://github.com/IBM/sarama/pull/2353 +* fix(producer): replace time.After with time.Timer to avoid high memory usage by @Jacob-bzx in https://github.com/IBM/sarama/pull/2355 + +## New Contributors +* @spongecaptain made their first contribution in https://github.com/IBM/sarama/pull/2352 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.37.0...v1.37.1 + +## Version 1.37.0 (2022-09-28) + +## What's Changed + +### :rotating_light: Breaking Changes +* Due to a change in [github.com/klauspost/compress v1.15.10](https://github.com/klauspost/compress/releases/tag/v1.15.10), Sarama v1.37.0 requires Go 1.17 going forward, unfortunately due to an oversight this wasn't reflected in the go.mod declaration at time of release. + +### :tada: New Features / Improvements +* feat(consumer): support multiple balance strategies by @Jacob-bzx in https://github.com/IBM/sarama/pull/2339 +* feat(producer): transactional API by @ryarnyah in https://github.com/IBM/sarama/pull/2295 +* feat(mocks): support key in MockFetchResponse. by @Skandalik in https://github.com/IBM/sarama/pull/2328 +### :bug: Fixes +* fix: avoid panic when Metadata.RefreshFrequency is 0 by @Jacob-bzx in https://github.com/IBM/sarama/pull/2329 +* fix(consumer): avoid pushing unrelated responses to paused children by @pkoutsovasilis in https://github.com/IBM/sarama/pull/2317 +* fix: prevent metrics leak with cleanup by @auntan in https://github.com/IBM/sarama/pull/2340 +* fix: race condition(may panic) when closing consumer group by @Jacob-bzx in https://github.com/IBM/sarama/pull/2331 +* fix(consumer): default ResetInvalidOffsets to true by @dnwe in https://github.com/IBM/sarama/pull/2345 +* Validate the `Config` when creating a mock producer/consumer by @joewreschnig in https://github.com/IBM/sarama/pull/2327 +### :package: Dependency updates +* chore(deps): bump module github.com/pierrec/lz4/v4 to v4.1.16 by @dnwe in https://github.com/IBM/sarama/pull/2335 +* chore(deps): bump golang.org/x/net digest to bea034e by @dnwe in https://github.com/IBM/sarama/pull/2333 +* chore(deps): bump golang.org/x/sync digest to 7f9b162 by @dnwe in https://github.com/IBM/sarama/pull/2334 +* chore(deps): bump golang.org/x/net digest to f486391 by @dnwe in https://github.com/IBM/sarama/pull/2348 +* chore(deps): bump module github.com/shopify/toxiproxy/v2 to v2.5.0 by @dnwe in https://github.com/IBM/sarama/pull/2336 +* chore(deps): bump module github.com/klauspost/compress to v1.15.11 by @dnwe in https://github.com/IBM/sarama/pull/2349 +* chore(deps): bump module github.com/pierrec/lz4/v4 to v4.1.17 by @dnwe in https://github.com/IBM/sarama/pull/2350 +### :wrench: Maintenance +* chore(ci): bump kafka-versions to latest by @dnwe in https://github.com/IBM/sarama/pull/2346 +* chore(ci): bump go-versions to N and N-1 by @dnwe in https://github.com/IBM/sarama/pull/2347 + +## New Contributors +* @Jacob-bzx made their first contribution in https://github.com/IBM/sarama/pull/2329 +* @pkoutsovasilis made their first contribution in https://github.com/IBM/sarama/pull/2317 +* @Skandalik made their first contribution in https://github.com/IBM/sarama/pull/2328 +* @auntan made their first contribution in https://github.com/IBM/sarama/pull/2340 +* @ryarnyah made their first contribution in https://github.com/IBM/sarama/pull/2295 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.36.0...v1.37.0 + +## Version 1.36.0 (2022-08-11) + +## What's Changed +### :tada: New Features / Improvements +* feat: add option to propagate OffsetOutOfRange error by @dkolistratova in https://github.com/IBM/sarama/pull/2252 +* feat(producer): expose ProducerMessage.byteSize() function by @k8scat in https://github.com/IBM/sarama/pull/2315 +* feat(metrics): track consumer fetch request rates by @dnwe in https://github.com/IBM/sarama/pull/2299 +### :bug: Fixes +* fix(consumer): avoid submitting empty fetch requests when paused by @raulnegreiros in https://github.com/IBM/sarama/pull/2143 +### :package: Dependency updates +* chore(deps): bump module github.com/klauspost/compress to v1.15.9 by @dnwe in https://github.com/IBM/sarama/pull/2304 +* chore(deps): bump golang.org/x/net digest to c7608f3 by @dnwe in https://github.com/IBM/sarama/pull/2301 +* chore(deps): bump golangci/golangci-lint-action action to v3 by @dnwe in https://github.com/IBM/sarama/pull/2311 +* chore(deps): bump golang.org/x/net digest to 07c6da5 by @dnwe in https://github.com/IBM/sarama/pull/2307 +* chore(deps): bump github actions versions (major) by @dnwe in https://github.com/IBM/sarama/pull/2313 +* chore(deps): bump module github.com/jcmturner/gofork to v1.7.6 by @dnwe in https://github.com/IBM/sarama/pull/2305 +* chore(deps): bump golang.org/x/sync digest to 886fb93 by @dnwe in https://github.com/IBM/sarama/pull/2302 +* chore(deps): bump module github.com/jcmturner/gokrb5/v8 to v8.4.3 by @dnwe in https://github.com/IBM/sarama/pull/2303 +### :wrench: Maintenance +* chore: add kafka 3.1.1 to the version matrix by @dnwe in https://github.com/IBM/sarama/pull/2300 +### :heavy_plus_sign: Other Changes +* Migrate off probot-CLA to new GitHub Action by @cursedcoder in https://github.com/IBM/sarama/pull/2294 +* Forgot to remove cla probot by @cursedcoder in https://github.com/IBM/sarama/pull/2297 +* chore(lint): re-enable a small amount of go-critic by @dnwe in https://github.com/IBM/sarama/pull/2312 + +## New Contributors +* @cursedcoder made their first contribution in https://github.com/IBM/sarama/pull/2294 +* @dkolistratova made their first contribution in https://github.com/IBM/sarama/pull/2252 +* @k8scat made their first contribution in https://github.com/IBM/sarama/pull/2315 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.35.0...v1.36.0 + +## Version 1.35.0 (2022-07-22) + +## What's Changed +### :bug: Fixes +* fix: fix metadata retry backoff invalid when get metadata failed by @Stephan14 in https://github.com/IBM/sarama/pull/2256 +* fix(balance): sort and de-deplicate memberIDs by @dnwe in https://github.com/IBM/sarama/pull/2285 +* fix: prevent DescribeLogDirs hang in admin client by @zerowidth in https://github.com/IBM/sarama/pull/2269 +* fix: include assignment-less members in SyncGroup by @dnwe in https://github.com/IBM/sarama/pull/2292 +### :package: Dependency updates +* chore(deps): bump module github.com/stretchr/testify to v1.8.0 by @dnwe in https://github.com/IBM/sarama/pull/2284 +* chore(deps): bump module github.com/eapache/go-resiliency to v1.3.0 by @dnwe in https://github.com/IBM/sarama/pull/2283 +* chore(deps): bump golang.org/x/net digest to 1185a90 by @dnwe in https://github.com/IBM/sarama/pull/2279 +* chore(deps): bump module github.com/pierrec/lz4/v4 to v4.1.15 by @dnwe in https://github.com/IBM/sarama/pull/2281 +* chore(deps): bump module github.com/klauspost/compress to v1.15.8 by @dnwe in https://github.com/IBM/sarama/pull/2280 +### :wrench: Maintenance +* chore: rename `any` func to avoid identifier by @dnwe in https://github.com/IBM/sarama/pull/2272 +* chore: add and test against kafka 3.2.0 by @dnwe in https://github.com/IBM/sarama/pull/2288 +* chore: document Fetch protocol fields by @dnwe in https://github.com/IBM/sarama/pull/2289 +### :heavy_plus_sign: Other Changes +* chore(ci): fix redirect with GITHUB_STEP_SUMMARY by @dnwe in https://github.com/IBM/sarama/pull/2286 +* fix(test): permit ECONNRESET in TestInitProducerID by @dnwe in https://github.com/IBM/sarama/pull/2287 +* fix: ensure empty or devel version valid by @dnwe in https://github.com/IBM/sarama/pull/2291 + +## New Contributors +* @zerowidth made their first contribution in https://github.com/IBM/sarama/pull/2269 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.34.1...v1.35.0 + +## Version 1.34.1 (2022-06-07) + +## What's Changed +### :bug: Fixes +* fix(examples): check session.Context().Done() in examples/consumergroup by @zxc111 in https://github.com/IBM/sarama/pull/2240 +* fix(protocol): move AuthorizedOperations into GroupDescription of DescribeGroupsResponse by @aiquestion in https://github.com/IBM/sarama/pull/2247 +* fix(protocol): tidyup DescribeGroupsResponse by @dnwe in https://github.com/IBM/sarama/pull/2248 +* fix(consumer): range balance strategy not like reference by @njhartwell in https://github.com/IBM/sarama/pull/2245 +### :wrench: Maintenance +* chore(ci): experiment with using tparse by @dnwe in https://github.com/IBM/sarama/pull/2236 +* chore(deps): bump thirdparty dependencies to latest releases by @dnwe in https://github.com/IBM/sarama/pull/2242 + +## New Contributors +* @zxc111 made their first contribution in https://github.com/IBM/sarama/pull/2240 +* @njhartwell made their first contribution in https://github.com/IBM/sarama/pull/2245 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.34.0...v1.34.1 + +## Version 1.34.0 (2022-05-30) + +## What's Changed +### :tada: New Features / Improvements +* KIP-345: support static membership by @aiquestion in https://github.com/IBM/sarama/pull/2230 +### :bug: Fixes +* fix: KIP-368 use receiver goroutine to process all sasl v1 responses by @k-wall in https://github.com/IBM/sarama/pull/2234 +### :wrench: Maintenance +* chore(deps): bump module github.com/pierrec/lz4 to v4 by @dnwe in https://github.com/IBM/sarama/pull/2231 +* chore(deps): bump golang.org/x/net digest to 2e3eb7b by @dnwe in https://github.com/IBM/sarama/pull/2232 + +## New Contributors +* @aiquestion made their first contribution in https://github.com/IBM/sarama/pull/2230 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.33.0...v1.34.0 + +## Version 1.33.0 (2022-05-11) + +## What's Changed +### :rotating_light: Breaking Changes + +**Note: with this change, the user of Sarama is required to use Go 1.13's errors.Is etc (rather then ==) when forming conditionals returned by this library.** +* feat: make `ErrOutOfBrokers` wrap the underlying error that prevented connections to the brokers by @k-wall in https://github.com/IBM/sarama/pull/2131 + + +### :tada: New Features / Improvements +* feat(message): add UnmarshalText method to CompressionCodec by @vincentbernat in https://github.com/IBM/sarama/pull/2172 +* KIP-368 : Allow SASL Connections to Periodically Re-Authenticate by @k-wall in https://github.com/IBM/sarama/pull/2197 +* feat: add batched CreateACLs func to ClusterAdmin by @nkostoulas in https://github.com/IBM/sarama/pull/2191 +### :bug: Fixes +* fix: TestRecordBatchDecoding failing sporadically by @k-wall in https://github.com/IBM/sarama/pull/2154 +* feat(test): add an fvt for broker deadlock by @dnwe in https://github.com/IBM/sarama/pull/2144 +* fix: avoid starvation in subscriptionManager by @dnwe in https://github.com/IBM/sarama/pull/2109 +* fix: remove "Is your cluster reachable?" from msg by @dnwe in https://github.com/IBM/sarama/pull/2165 +* fix: remove trailing fullstop from error strings by @dnwe in https://github.com/IBM/sarama/pull/2166 +* fix: return underlying sasl error message by @dnwe in https://github.com/IBM/sarama/pull/2164 +* fix: potential data race on a global variable by @pior in https://github.com/IBM/sarama/pull/2171 +* fix: AdminClient | CreateACLs | check for error in response, return error if needed by @omris94 in https://github.com/IBM/sarama/pull/2185 +* producer: ensure that the management message (fin) is never "leaked" by @niamster in https://github.com/IBM/sarama/pull/2182 +* fix: prevent RefreshBrokers leaking old brokers by @k-wall in https://github.com/IBM/sarama/pull/2203 +* fix: prevent RefreshController leaking controller by @k-wall in https://github.com/IBM/sarama/pull/2204 +* fix: prevent AsyncProducer retryBatch from leaking by @k-wall in https://github.com/IBM/sarama/pull/2208 +* fix: prevent metrics leak when authenticate fails by @Stephan14 in https://github.com/IBM/sarama/pull/2205 +* fix: prevent deadlock between subscription manager and consumer goroutines by @niamster in https://github.com/IBM/sarama/pull/2194 +* fix: prevent idempotent producer epoch exhaustion by @ladislavmacoun in https://github.com/IBM/sarama/pull/2178 +* fix(test): mockbroker offsetResponse vers behavior by @dnwe in https://github.com/IBM/sarama/pull/2213 +* fix: cope with OffsetsLoadInProgress on Join+Sync by @dnwe in https://github.com/IBM/sarama/pull/2214 +* fix: make default MaxWaitTime 500ms by @dnwe in https://github.com/IBM/sarama/pull/2227 +### :package: Dependency updates +* chore(deps): bump xdg-go/scram and klauspost/compress by @dnwe in https://github.com/IBM/sarama/pull/2170 +### :wrench: Maintenance +* fix(test): skip TestReadOnlyAndAllCommittedMessages by @dnwe in https://github.com/IBM/sarama/pull/2161 +* fix(test): remove t.Parallel() by @dnwe in https://github.com/IBM/sarama/pull/2162 +* chore(ci): bump along to Go 1.17+1.18 and bump golangci-lint by @dnwe in https://github.com/IBM/sarama/pull/2183 +* chore: switch to multi-arch compatible docker images by @dnwe in https://github.com/IBM/sarama/pull/2210 +### :heavy_plus_sign: Other Changes +* Remediate a number go-routine leaks (mainly test issues) by @k-wall in https://github.com/IBM/sarama/pull/2198 +* chore: retract v1.32.0 due to #2150 by @dnwe in https://github.com/IBM/sarama/pull/2199 +* chore: bump functional test timeout to 12m by @dnwe in https://github.com/IBM/sarama/pull/2200 +* fix(admin): make DeleteRecords err consistent by @dnwe in https://github.com/IBM/sarama/pull/2226 + +## New Contributors +* @k-wall made their first contribution in https://github.com/IBM/sarama/pull/2154 +* @pior made their first contribution in https://github.com/IBM/sarama/pull/2171 +* @omris94 made their first contribution in https://github.com/IBM/sarama/pull/2185 +* @vincentbernat made their first contribution in https://github.com/IBM/sarama/pull/2172 +* @niamster made their first contribution in https://github.com/IBM/sarama/pull/2182 +* @ladislavmacoun made their first contribution in https://github.com/IBM/sarama/pull/2178 +* @nkostoulas made their first contribution in https://github.com/IBM/sarama/pull/2191 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.32.0...v1.33.0 + +## Version 1.32.0 (2022-02-24) + +### ⚠️ This release has been superseded by v1.33.0 and should _not_ be used. + +* chore: retract v1.32.0 due to #2150 by @dnwe in https://github.com/IBM/sarama/pull/2199 + +--- + +## What's Changed +### :bug: Fixes +* Fix deadlock when closing Broker in brokerProducer by @slaunay in https://github.com/IBM/sarama/pull/2133 +### :package: Dependency updates +* chore: refresh dependencies to latest by @dnwe in https://github.com/IBM/sarama/pull/2159 +### :wrench: Maintenance +* fix: rework RebalancingMultiplePartitions test by @dnwe in https://github.com/IBM/sarama/pull/2130 +* fix(test): use Sarama transactional producer by @dnwe in https://github.com/IBM/sarama/pull/1939 +* chore: enable t.Parallel() wherever possible by @dnwe in https://github.com/IBM/sarama/pull/2138 +### :heavy_plus_sign: Other Changes +* chore: restrict to 1 testbinary at once by @dnwe in https://github.com/IBM/sarama/pull/2145 +* chore: restrict to 1 parallel test at once by @dnwe in https://github.com/IBM/sarama/pull/2146 +* Remove myself from codeowners by @bai in https://github.com/IBM/sarama/pull/2147 +* chore: add retractions for known bad versions by @dnwe in https://github.com/IBM/sarama/pull/2160 + + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.31.1...v1.32.0 + +## Version 1.31.1 (2022-02-01) + +- #2126 - @bai - Populate missing kafka versions +- #2124 - @bai - Add Kafka 3.1.0 to CI matrix, migrate to bitnami kafka image +- #2123 - @bai - Update klauspost/compress to 0.14 +- #2122 - @dnwe - fix(test): make it simpler to re-use toxiproxy +- #2119 - @bai - Add Kafka 3.1.0 version number +- #2005 - @raulnegreiros - feat: add methods to pause/resume consumer's consumption +- #2051 - @seveas - Expose the TLS connection state of a broker connection +- #2117 - @wuhuizuo - feat: add method MockApiVersionsResponse.SetApiKeys +- #2110 - @dnwe - fix: ensure heartbeats only stop after cleanup +- #2113 - @mosceo - Fix typo + +## Version 1.31.0 (2022-01-18) + +## What's Changed +### :tada: New Features / Improvements +* feat: expose IncrementalAlterConfigs API in admin.go by @fengyinqiao in https://github.com/IBM/sarama/pull/2088 +* feat: allow AsyncProducer to have MaxOpenRequests inflight produce requests per broker by @xujianhai666 in https://github.com/IBM/sarama/pull/1686 +* Support request pipelining in AsyncProducer by @slaunay in https://github.com/IBM/sarama/pull/2094 +### :bug: Fixes +* fix(test): add fluent interface for mocks where missing by @grongor in https://github.com/IBM/sarama/pull/2080 +* fix(test): test for ConsumePartition with OffsetOldest by @grongor in https://github.com/IBM/sarama/pull/2081 +* fix: set HWMO during creation of partitionConsumer (fix incorrect HWMO before first fetch) by @grongor in https://github.com/IBM/sarama/pull/2082 +* fix: ignore non-nil but empty error strings in Describe/Alter client quotas responses by @agriffaut in https://github.com/IBM/sarama/pull/2096 +* fix: skip over KIP-482 tagged fields by @dnwe in https://github.com/IBM/sarama/pull/2107 +* fix: clear preferredReadReplica if broker shutdown by @dnwe in https://github.com/IBM/sarama/pull/2108 +* fix(test): correct wrong offsets in mock Consumer by @grongor in https://github.com/IBM/sarama/pull/2078 +* fix: correct bugs in DescribeGroupsResponse by @dnwe in https://github.com/IBM/sarama/pull/2111 +### :wrench: Maintenance +* chore: bump runtime and test dependencies by @dnwe in https://github.com/IBM/sarama/pull/2100 +### :memo: Documentation +* docs: refresh README.md for Kafka 3.0.0 by @dnwe in https://github.com/IBM/sarama/pull/2099 +### :heavy_plus_sign: Other Changes +* Fix typo by @mosceo in https://github.com/IBM/sarama/pull/2084 + +## New Contributors +* @grongor made their first contribution in https://github.com/IBM/sarama/pull/2080 +* @fengyinqiao made their first contribution in https://github.com/IBM/sarama/pull/2088 +* @xujianhai666 made their first contribution in https://github.com/IBM/sarama/pull/1686 +* @mosceo made their first contribution in https://github.com/IBM/sarama/pull/2084 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.30.1...v1.31.0 + +## Version 1.30.1 (2021-12-04) + +## What's Changed +### :tada: New Features / Improvements +* feat(zstd): pass level param through to compress/zstd encoder by @lizthegrey in https://github.com/IBM/sarama/pull/2045 +### :bug: Fixes +* fix: set min-go-version to 1.16 by @troyanov in https://github.com/IBM/sarama/pull/2048 +* logger: fix debug logs' formatting directives by @utrack in https://github.com/IBM/sarama/pull/2054 +* fix: stuck on the batch with zero records length by @pachmu in https://github.com/IBM/sarama/pull/2057 +* fix: only update preferredReadReplica if valid by @dnwe in https://github.com/IBM/sarama/pull/2076 +### :wrench: Maintenance +* chore: add release notes configuration by @dnwe in https://github.com/IBM/sarama/pull/2046 +* chore: confluent platform version bump by @lizthegrey in https://github.com/IBM/sarama/pull/2070 + +## Notes +* ℹ️ from Sarama 1.30.x onward the minimum version of Go toolchain required is 1.16.x + +## New Contributors +* @troyanov made their first contribution in https://github.com/IBM/sarama/pull/2048 +* @lizthegrey made their first contribution in https://github.com/IBM/sarama/pull/2045 +* @utrack made their first contribution in https://github.com/IBM/sarama/pull/2054 +* @pachmu made their first contribution in https://github.com/IBM/sarama/pull/2057 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.30.0...v1.30.1 + +## Version 1.30.0 (2021-09-29) + +⚠️ This release has been superseded by v1.30.1 and should _not_ be used. + +**regression**: enabling rackawareness causes severe throughput drops (#2071) — fixed in v1.30.1 via #2076 + +--- + +ℹ️ **Note: from Sarama 1.30.0 the minimum version of Go toolchain required is 1.16.x** + +--- + +# New Features / Improvements + +- #1983 - @zifengyu - allow configure AllowAutoTopicCreation argument in metadata refresh +- #2000 - @matzew - Using xdg-go module for SCRAM +- #2003 - @gdm85 - feat: add counter metrics for consumer group join/sync and their failures +- #1992 - @zhaomoran - feat: support SaslHandshakeRequest v0 for SCRAM +- #2006 - @faillefer - Add support for DeleteOffsets operation +- #1909 - @agriffaut - KIP-546 Client quota APIs +- #1633 - @aldelucca1 - feat: allow balance strategies to provide initial state +- #1275 - @dnwe - log: add a DebugLogger that proxies to Logger +- #2018 - @dnwe - feat: use DebugLogger reference for goldenpath log +- #2019 - @dnwe - feat: add logging & a metric for producer throttle +- #2023 - @dnwe - feat: add Controller() to ClusterAdmin interface +- #2025 - @dnwe - feat: support ApiVersionsRequest V3 protocol +- #2028 - @dnwe - feat: send ApiVersionsRequest on broker open +- #2034 - @bai - Add support for kafka 3.0.0 + +# Fixes + +- #1990 - @doxsch - fix: correctly pass ValidateOnly through to CreatePartitionsRequest +- #1988 - @LubergAlexander - fix: correct WithCustomFallbackPartitioner implementation +- #2001 - @HurSungYun - docs: inform AsyncProducer Close pitfalls +- #1973 - @qiangmzsx - fix: metrics still taking up too much memory when metrics.UseNilMetrics=true +- #2007 - @bai - Add support for Go 1.17 +- #2009 - @dnwe - fix: enable nilerr linter and fix iferr checks +- #2010 - @dnwe - chore: enable exportloopref and misspell linters +- #2013 - @faillefer - fix(test): disable encoded response/request check when map contains multiple elements +- #2015 - @bai - Change default branch to main +- #1718 - @crivera-fastly - fix: correct the error handling in client.InitProducerID() +- #1984 - @null-sleep - fix(test): bump confluentPlatformVersion from 6.1.1 to 6.2.0 +- #2016 - @dnwe - chore: replace deprecated Go calls +- #2017 - @dnwe - chore: delete legacy vagrant script +- #2020 - @dnwe - fix(test): remove testLogger from TrackLeader test +- #2024 - @dnwe - chore: bump toxiproxy container to v2.1.5 +- #2033 - @bai - Update dependencies +- #2031 - @gdm85 - docs: do not mention buffered messages in sync producer Close method +- #2035 - @dnwe - chore: populate the missing kafka versions +- #2038 - @dnwe - feat: add a fuzzing workflow to github actions + +## New Contributors +* @zifengyu made their first contribution in https://github.com/IBM/sarama/pull/1983 +* @doxsch made their first contribution in https://github.com/IBM/sarama/pull/1990 +* @LubergAlexander made their first contribution in https://github.com/IBM/sarama/pull/1988 +* @HurSungYun made their first contribution in https://github.com/IBM/sarama/pull/2001 +* @gdm85 made their first contribution in https://github.com/IBM/sarama/pull/2003 +* @qiangmzsx made their first contribution in https://github.com/IBM/sarama/pull/1973 +* @zhaomoran made their first contribution in https://github.com/IBM/sarama/pull/1992 +* @faillefer made their first contribution in https://github.com/IBM/sarama/pull/2006 +* @crivera-fastly made their first contribution in https://github.com/IBM/sarama/pull/1718 +* @null-sleep made their first contribution in https://github.com/IBM/sarama/pull/1984 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.29.1...v1.30.0 + +## Version 1.29.1 (2021-06-24) + +# New Features / Improvements + +- #1966 - @ajanikow - KIP-339: Add Incremental Config updates API +- #1964 - @ajanikow - Add DelegationToken ResourceType + +# Fixes + +- #1962 - @hanxiaolin - fix(consumer): call interceptors when MaxProcessingTime expire +- #1971 - @KerryJava - fix kafka-producer-performance throughput panic +- #1968 - @dnwe - chore: bump golang.org/x versions +- #1956 - @joewreschnig - Allow checking the entire `ProducerMessage` in the mock producers +- #1963 - @dnwe - fix: ensure backoff timer is re-used +- #1949 - @dnwe - fix: explicitly use uint64 for payload length + +## Version 1.29.0 (2021-05-07) + +### New Features / Improvements + +- #1917 - @arkady-emelyanov - KIP-554: Add Broker-side SCRAM Config API +- #1869 - @wyndhblb - zstd: encode+decode performance improvements +- #1541 - @izolight - add String, (Un)MarshalText for acl types. +- #1921 - @bai - Add support for Kafka 2.8.0 + +### Fixes +- #1936 - @dnwe - fix(consumer): follow preferred broker +- #1933 - @ozzieba - Use gofork for encoding/asn1 to fix ASN errors during Kerberos authentication +- #1929 - @celrenheit - Handle isolation level in Offset(Request|Response) and require stable offset in FetchOffset(Request|Response) +- #1926 - @dnwe - fix: correct initial CodeQL findings +- #1925 - @bai - Test out CodeQL +- #1923 - @bestgopher - Remove redundant switch-case, fix doc typos +- #1922 - @bai - Update go dependencies +- #1898 - @mmaslankaprv - Parsing only known control batches value +- #1887 - @withshubh - Fix: issues affecting code quality + +## Version 1.28.0 (2021-02-15) + +**Note that with this release we change `RoundRobinBalancer` strategy to match Java client behavior. See #1788 for details.** + +- #1870 - @kvch - Update Kerberos library to latest major +- #1876 - @bai - Update docs, reference pkg.go.dev +- #1846 - @wclaeys - Do not ignore Consumer.Offsets.AutoCommit.Enable config on Close +- #1747 - @XSAM - fix: mock sync producer does not handle the offset while sending messages +- #1863 - @bai - Add support for Kafka 2.7.0 + update lz4 and klauspost/compress dependencies +- #1788 - @kzinglzy - feat[balance_strategy]: announcing a new round robin balance strategy +- #1862 - @bai - Fix CI setenv permissions issues +- #1832 - @ilyakaznacheev - Update Godoc link to pkg.go.dev +- #1822 - @danp - KIP-392: Allow consumers to fetch from closest replica + +## Version 1.27.2 (2020-10-21) + +### Improvements + +#1750 - @krantideep95 Adds missing mock responses for mocking consumer group + +## Fixes + +#1817 - reverts #1785 - Add private method to Client interface to prevent implementation + +## Version 1.27.1 (2020-10-07) + +### Improvements + +#1775 - @d1egoaz - Adds a Producer Interceptor example +#1781 - @justin-chen - Refresh brokers given list of seed brokers +#1784 - @justin-chen - Add randomize seed broker method +#1790 - @d1egoaz - remove example binary +#1798 - @bai - Test against Go 1.15 +#1785 - @justin-chen - Add private method to Client interface to prevent implementation +#1802 - @uvw - Support Go 1.13 error unwrapping + +## Fixes + +#1791 - @stanislavkozlovski - bump default version to 1.0.0 + +## Version 1.27.0 (2020-08-11) + +### Improvements + +#1466 - @rubenvp8510 - Expose kerberos fast negotiation configuration +#1695 - @KJTsanaktsidis - Use docker-compose to run the functional tests +#1699 - @wclaeys - Consumer group support for manually comitting offsets +#1714 - @bai - Bump Go to version 1.14.3, golangci-lint to 1.27.0 +#1726 - @d1egoaz - Include zstd on the functional tests +#1730 - @d1egoaz - KIP-42 Add producer and consumer interceptors +#1738 - @varun06 - fixed variable names that are named same as some std lib package names +#1741 - @varun06 - updated zstd dependency to latest v1.10.10 +#1743 - @varun06 - Fixed declaration dependencies and other lint issues in code base +#1763 - @alrs - remove deprecated tls options from test +#1769 - @bai - Add support for Kafka 2.6.0 + +## Fixes + +#1697 - @kvch - Use gofork for encoding/asn1 to fix ASN errors during Kerberos authentication +#1744 - @alrs - Fix isBalanced Function Signature + +## Version 1.26.4 (2020-05-19) + +## Fixes + +- #1701 - @d1egoaz - Set server name only for the current broker +- #1694 - @dnwe - testfix: set KAFKA_HEAP_OPTS for zk and kafka + +## Version 1.26.3 (2020-05-07) + +## Fixes + +- #1692 - @d1egoaz - Set tls ServerName to fix issue: either ServerName or InsecureSkipVerify must be specified in the tls.Config + +## Version 1.26.2 (2020-05-06) + +## ⚠️ Known Issues + +This release has been marked as not ready for production and may be unstable, please use v1.26.4. + +### Improvements + +- #1560 - @iyacontrol - add sync pool for gzip 1-9 +- #1605 - @dnwe - feat: protocol support for V11 fetch w/ rackID +- #1617 - @sladkoff / @dwi-di / @random-dwi - Add support for alter/list partition reassignements APIs +- #1632 - @bai - Add support for Go 1.14 +- #1640 - @random-dwi - Feature/fix list partition reassignments +- #1646 - @mimaison - Add DescribeLogDirs to admin client +- #1667 - @bai - Add support for kafka 2.5.0 + +## Fixes + +- #1594 - @sladkoff - Sets ConfigEntry.Default flag in addition to the ConfigEntry.Source for Kafka versions > V1_1_0_0 +- #1601 - @alrs - fix: remove use of testing.T.FailNow() inside goroutine +- #1602 - @d1egoaz - adds a note about consumer groups Consume method +- #1607 - @darklore - Fix memory leak when Broker.Open and Broker.Close called repeatedly +- #1613 - @wblakecaldwell - Updated "retrying" log message when BackoffFunc implemented +- #1614 - @alrs - produce_response.go: Remove Unused Functions +- #1619 - @alrs - tools/kafka-producer-performance: prune unused flag variables +- #1639 - @agriffaut - Handle errors with no message but error code +- #1643 - @kzinglzy - fix `config.net.keepalive` +- #1644 - @KJTsanaktsidis - Fix brokers continually allocating new Session IDs +- #1645 - @Stephan14 - Remove broker(s) which no longer exist in metadata +- #1650 - @lavoiesl - Return the response error in heartbeatLoop +- #1661 - @KJTsanaktsidis - Fix "broker received out of order sequence" when brokers die +- #1666 - @KevinJCross - Bugfix: Allow TLS connections to work over socks proxy. + +## Version 1.26.1 (2020-02-04) + +Improvements: +- Add requests-in-flight metric ([1539](https://github.com/IBM/sarama/pull/1539)) +- Fix misleading example for cluster admin ([1595](https://github.com/IBM/sarama/pull/1595)) +- Replace Travis with GitHub Actions, linters housekeeping ([1573](https://github.com/IBM/sarama/pull/1573)) +- Allow BalanceStrategy to provide custom assignment data ([1592](https://github.com/IBM/sarama/pull/1592)) + +Bug Fixes: +- Adds back Consumer.Offsets.CommitInterval to fix API ([1590](https://github.com/IBM/sarama/pull/1590)) +- Fix error message s/CommitInterval/AutoCommit.Interval ([1589](https://github.com/IBM/sarama/pull/1589)) + +## Version 1.26.0 (2020-01-24) + +New Features: +- Enable zstd compression + ([1574](https://github.com/IBM/sarama/pull/1574), + [1582](https://github.com/IBM/sarama/pull/1582)) +- Support headers in tools kafka-console-producer + ([1549](https://github.com/IBM/sarama/pull/1549)) + +Improvements: +- Add SASL AuthIdentity to SASL frames (authzid) + ([1585](https://github.com/IBM/sarama/pull/1585)). + +Bug Fixes: +- Sending messages with ZStd compression enabled fails in multiple ways + ([1252](https://github.com/IBM/sarama/issues/1252)). +- Use the broker for any admin on BrokerConfig + ([1571](https://github.com/IBM/sarama/pull/1571)). +- Set DescribeConfigRequest Version field + ([1576](https://github.com/IBM/sarama/pull/1576)). +- ConsumerGroup flooding logs with client/metadata update req + ([1578](https://github.com/IBM/sarama/pull/1578)). +- MetadataRequest version in DescribeCluster + ([1580](https://github.com/IBM/sarama/pull/1580)). +- Fix deadlock in consumer group handleError + ([1581](https://github.com/IBM/sarama/pull/1581)) +- Fill in the Fetch{Request,Response} protocol + ([1582](https://github.com/IBM/sarama/pull/1582)). +- Retry topic request on ControllerNotAvailable + ([1586](https://github.com/IBM/sarama/pull/1586)). + +## Version 1.25.0 (2020-01-13) + +New Features: +- Support TLS protocol in kafka-producer-performance + ([1538](https://github.com/IBM/sarama/pull/1538)). +- Add support for kafka 2.4.0 + ([1552](https://github.com/IBM/sarama/pull/1552)). + +Improvements: +- Allow the Consumer to disable auto-commit offsets + ([1164](https://github.com/IBM/sarama/pull/1164)). +- Produce records with consistent timestamps + ([1455](https://github.com/IBM/sarama/pull/1455)). + +Bug Fixes: +- Fix incorrect SetTopicMetadata name mentions + ([1534](https://github.com/IBM/sarama/pull/1534)). +- Fix client.tryRefreshMetadata Println + ([1535](https://github.com/IBM/sarama/pull/1535)). +- Fix panic on calling updateMetadata on closed client + ([1531](https://github.com/IBM/sarama/pull/1531)). +- Fix possible faulty metrics in TestFuncProducing + ([1545](https://github.com/IBM/sarama/pull/1545)). + +## Version 1.24.1 (2019-10-31) + +New Features: +- Add DescribeLogDirs Request/Response pair + ([1520](https://github.com/IBM/sarama/pull/1520)). + +Bug Fixes: +- Fix ClusterAdmin returning invalid controller ID on DescribeCluster + ([1518](https://github.com/IBM/sarama/pull/1518)). +- Fix issue with consumergroup not rebalancing when new partition is added + ([1525](https://github.com/IBM/sarama/pull/1525)). +- Ensure consistent use of read/write deadlines + ([1529](https://github.com/IBM/sarama/pull/1529)). + +## Version 1.24.0 (2019-10-09) + +New Features: +- Add sticky partition assignor + ([1416](https://github.com/IBM/sarama/pull/1416)). +- Switch from cgo zstd package to pure Go implementation + ([1477](https://github.com/IBM/sarama/pull/1477)). + +Improvements: +- Allow creating ClusterAdmin from client + ([1415](https://github.com/IBM/sarama/pull/1415)). +- Set KafkaVersion in ListAcls method + ([1452](https://github.com/IBM/sarama/pull/1452)). +- Set request version in CreateACL ClusterAdmin method + ([1458](https://github.com/IBM/sarama/pull/1458)). +- Set request version in DeleteACL ClusterAdmin method + ([1461](https://github.com/IBM/sarama/pull/1461)). +- Handle missed error codes on TopicMetaDataRequest and GroupCoordinatorRequest + ([1464](https://github.com/IBM/sarama/pull/1464)). +- Remove direct usage of gofork + ([1465](https://github.com/IBM/sarama/pull/1465)). +- Add support for Go 1.13 + ([1478](https://github.com/IBM/sarama/pull/1478)). +- Improve behavior of NewMockListAclsResponse + ([1481](https://github.com/IBM/sarama/pull/1481)). + +Bug Fixes: +- Fix race condition in consumergroup example + ([1434](https://github.com/IBM/sarama/pull/1434)). +- Fix brokerProducer goroutine leak + ([1442](https://github.com/IBM/sarama/pull/1442)). +- Use released version of lz4 library + ([1469](https://github.com/IBM/sarama/pull/1469)). +- Set correct version in MockDeleteTopicsResponse + ([1484](https://github.com/IBM/sarama/pull/1484)). +- Fix CLI help message typo + ([1494](https://github.com/IBM/sarama/pull/1494)). + +Known Issues: +- Please **don't** use Zstd, as it doesn't work right now. + See https://github.com/IBM/sarama/issues/1252 + +## Version 1.23.1 (2019-07-22) + +Bug Fixes: +- Fix fetch delete bug record + ([1425](https://github.com/IBM/sarama/pull/1425)). +- Handle SASL/OAUTHBEARER token rejection + ([1428](https://github.com/IBM/sarama/pull/1428)). + +## Version 1.23.0 (2019-07-02) + +New Features: +- Add support for Kafka 2.3.0 + ([1418](https://github.com/IBM/sarama/pull/1418)). +- Add support for ListConsumerGroupOffsets v2 + ([1374](https://github.com/IBM/sarama/pull/1374)). +- Add support for DeleteConsumerGroup + ([1417](https://github.com/IBM/sarama/pull/1417)). +- Add support for SASLVersion configuration + ([1410](https://github.com/IBM/sarama/pull/1410)). +- Add kerberos support + ([1366](https://github.com/IBM/sarama/pull/1366)). + +Improvements: +- Improve sasl_scram_client example + ([1406](https://github.com/IBM/sarama/pull/1406)). +- Fix shutdown and race-condition in consumer-group example + ([1404](https://github.com/IBM/sarama/pull/1404)). +- Add support for error codes 77—81 + ([1397](https://github.com/IBM/sarama/pull/1397)). +- Pool internal objects allocated per message + ([1385](https://github.com/IBM/sarama/pull/1385)). +- Reduce packet decoder allocations + ([1373](https://github.com/IBM/sarama/pull/1373)). +- Support timeout when fetching metadata + ([1359](https://github.com/IBM/sarama/pull/1359)). + +Bug Fixes: +- Fix fetch size integer overflow + ([1376](https://github.com/IBM/sarama/pull/1376)). +- Handle and log throttled FetchResponses + ([1383](https://github.com/IBM/sarama/pull/1383)). +- Refactor misspelled word Resouce to Resource + ([1368](https://github.com/IBM/sarama/pull/1368)). + +## Version 1.22.1 (2019-04-29) + +Improvements: +- Use zstd 1.3.8 + ([1350](https://github.com/IBM/sarama/pull/1350)). +- Add support for SaslHandshakeRequest v1 + ([1354](https://github.com/IBM/sarama/pull/1354)). + +Bug Fixes: +- Fix V5 MetadataRequest nullable topics array + ([1353](https://github.com/IBM/sarama/pull/1353)). +- Use a different SCRAM client for each broker connection + ([1349](https://github.com/IBM/sarama/pull/1349)). +- Fix AllowAutoTopicCreation for MetadataRequest greater than v3 + ([1344](https://github.com/IBM/sarama/pull/1344)). + +## Version 1.22.0 (2019-04-09) + +New Features: +- Add Offline Replicas Operation to Client + ([1318](https://github.com/IBM/sarama/pull/1318)). +- Allow using proxy when connecting to broker + ([1326](https://github.com/IBM/sarama/pull/1326)). +- Implement ReadCommitted + ([1307](https://github.com/IBM/sarama/pull/1307)). +- Add support for Kafka 2.2.0 + ([1331](https://github.com/IBM/sarama/pull/1331)). +- Add SASL SCRAM-SHA-512 and SCRAM-SHA-256 mechanismes + ([1331](https://github.com/IBM/sarama/pull/1295)). + +Improvements: +- Unregister all broker metrics on broker stop + ([1232](https://github.com/IBM/sarama/pull/1232)). +- Add SCRAM authentication example + ([1303](https://github.com/IBM/sarama/pull/1303)). +- Add consumergroup examples + ([1304](https://github.com/IBM/sarama/pull/1304)). +- Expose consumer batch size metric + ([1296](https://github.com/IBM/sarama/pull/1296)). +- Add TLS options to console producer and consumer + ([1300](https://github.com/IBM/sarama/pull/1300)). +- Reduce client close bookkeeping + ([1297](https://github.com/IBM/sarama/pull/1297)). +- Satisfy error interface in create responses + ([1154](https://github.com/IBM/sarama/pull/1154)). +- Please lint gods + ([1346](https://github.com/IBM/sarama/pull/1346)). + +Bug Fixes: +- Fix multi consumer group instance crash + ([1338](https://github.com/IBM/sarama/pull/1338)). +- Update lz4 to latest version + ([1347](https://github.com/IBM/sarama/pull/1347)). +- Retry ErrNotCoordinatorForConsumer in new consumergroup session + ([1231](https://github.com/IBM/sarama/pull/1231)). +- Fix cleanup error handler + ([1332](https://github.com/IBM/sarama/pull/1332)). +- Fix rate condition in PartitionConsumer + ([1156](https://github.com/IBM/sarama/pull/1156)). + +## Version 1.21.0 (2019-02-24) + +New Features: +- Add CreateAclRequest, DescribeAclRequest, DeleteAclRequest + ([1236](https://github.com/IBM/sarama/pull/1236)). +- Add DescribeTopic, DescribeConsumerGroup, ListConsumerGroups, ListConsumerGroupOffsets admin requests + ([1178](https://github.com/IBM/sarama/pull/1178)). +- Implement SASL/OAUTHBEARER + ([1240](https://github.com/IBM/sarama/pull/1240)). + +Improvements: +- Add Go mod support + ([1282](https://github.com/IBM/sarama/pull/1282)). +- Add error codes 73—76 + ([1239](https://github.com/IBM/sarama/pull/1239)). +- Add retry backoff function + ([1160](https://github.com/IBM/sarama/pull/1160)). +- Maintain metadata in the producer even when retries are disabled + ([1189](https://github.com/IBM/sarama/pull/1189)). +- Include ReplicaAssignment in ListTopics + ([1274](https://github.com/IBM/sarama/pull/1274)). +- Add producer performance tool + ([1222](https://github.com/IBM/sarama/pull/1222)). +- Add support LogAppend timestamps + ([1258](https://github.com/IBM/sarama/pull/1258)). + +Bug Fixes: +- Fix potential deadlock when a heartbeat request fails + ([1286](https://github.com/IBM/sarama/pull/1286)). +- Fix consuming compacted topic + ([1227](https://github.com/IBM/sarama/pull/1227)). +- Set correct Kafka version for DescribeConfigsRequest v1 + ([1277](https://github.com/IBM/sarama/pull/1277)). +- Update kafka test version + ([1273](https://github.com/IBM/sarama/pull/1273)). + +## Version 1.20.1 (2019-01-10) + +New Features: +- Add optional replica id in offset request + ([1100](https://github.com/IBM/sarama/pull/1100)). + +Improvements: +- Implement DescribeConfigs Request + Response v1 & v2 + ([1230](https://github.com/IBM/sarama/pull/1230)). +- Reuse compression objects + ([1185](https://github.com/IBM/sarama/pull/1185)). +- Switch from png to svg for GoDoc link in README + ([1243](https://github.com/IBM/sarama/pull/1243)). +- Fix typo in deprecation notice for FetchResponseBlock.Records + ([1242](https://github.com/IBM/sarama/pull/1242)). +- Fix typos in consumer metadata response file + ([1244](https://github.com/IBM/sarama/pull/1244)). + +Bug Fixes: +- Revert to individual msg retries for non-idempotent + ([1203](https://github.com/IBM/sarama/pull/1203)). +- Respect MaxMessageBytes limit for uncompressed messages + ([1141](https://github.com/IBM/sarama/pull/1141)). + +## Version 1.20.0 (2018-12-10) + +New Features: + - Add support for zstd compression + ([#1170](https://github.com/IBM/sarama/pull/1170)). + - Add support for Idempotent Producer + ([#1152](https://github.com/IBM/sarama/pull/1152)). + - Add support support for Kafka 2.1.0 + ([#1229](https://github.com/IBM/sarama/pull/1229)). + - Add support support for OffsetCommit request/response pairs versions v1 to v5 + ([#1201](https://github.com/IBM/sarama/pull/1201)). + - Add support support for OffsetFetch request/response pair up to version v5 + ([#1198](https://github.com/IBM/sarama/pull/1198)). + +Improvements: + - Export broker's Rack setting + ([#1173](https://github.com/IBM/sarama/pull/1173)). + - Always use latest patch version of Go on CI + ([#1202](https://github.com/IBM/sarama/pull/1202)). + - Add error codes 61 to 72 + ([#1195](https://github.com/IBM/sarama/pull/1195)). + +Bug Fixes: + - Fix build without cgo + ([#1182](https://github.com/IBM/sarama/pull/1182)). + - Fix go vet suggestion in consumer group file + ([#1209](https://github.com/IBM/sarama/pull/1209)). + - Fix typos in code and comments + ([#1228](https://github.com/IBM/sarama/pull/1228)). + +## Version 1.19.0 (2018-09-27) + +New Features: + - Implement a higher-level consumer group + ([#1099](https://github.com/IBM/sarama/pull/1099)). + +Improvements: + - Add support for Go 1.11 + ([#1176](https://github.com/IBM/sarama/pull/1176)). + +Bug Fixes: + - Fix encoding of `MetadataResponse` with version 2 and higher + ([#1174](https://github.com/IBM/sarama/pull/1174)). + - Fix race condition in mock async producer + ([#1174](https://github.com/IBM/sarama/pull/1174)). + +## Version 1.18.0 (2018-09-07) + +New Features: + - Make `Partitioner.RequiresConsistency` vary per-message + ([#1112](https://github.com/IBM/sarama/pull/1112)). + - Add customizable partitioner + ([#1118](https://github.com/IBM/sarama/pull/1118)). + - Add `ClusterAdmin` support for `CreateTopic`, `DeleteTopic`, `CreatePartitions`, + `DeleteRecords`, `DescribeConfig`, `AlterConfig`, `CreateACL`, `ListAcls`, `DeleteACL` + ([#1055](https://github.com/IBM/sarama/pull/1055)). + +Improvements: + - Add support for Kafka 2.0.0 + ([#1149](https://github.com/IBM/sarama/pull/1149)). + - Allow setting `LocalAddr` when dialing an address to support multi-homed hosts + ([#1123](https://github.com/IBM/sarama/pull/1123)). + - Simpler offset management + ([#1127](https://github.com/IBM/sarama/pull/1127)). + +Bug Fixes: + - Fix mutation of `ProducerMessage.MetaData` when producing to Kafka + ([#1110](https://github.com/IBM/sarama/pull/1110)). + - Fix consumer block when response did not contain all the + expected topic/partition blocks + ([#1086](https://github.com/IBM/sarama/pull/1086)). + - Fix consumer block when response contains only constrol messages + ([#1115](https://github.com/IBM/sarama/pull/1115)). + - Add timeout config for ClusterAdmin requests + ([#1142](https://github.com/IBM/sarama/pull/1142)). + - Add version check when producing message with headers + ([#1117](https://github.com/IBM/sarama/pull/1117)). + - Fix `MetadataRequest` for empty list of topics + ([#1132](https://github.com/IBM/sarama/pull/1132)). + - Fix producer topic metadata on-demand fetch when topic error happens in metadata response + ([#1125](https://github.com/IBM/sarama/pull/1125)). + +## Version 1.17.0 (2018-05-30) + +New Features: + - Add support for gzip compression levels + ([#1044](https://github.com/IBM/sarama/pull/1044)). + - Add support for Metadata request/response pairs versions v1 to v5 + ([#1047](https://github.com/IBM/sarama/pull/1047), + [#1069](https://github.com/IBM/sarama/pull/1069)). + - Add versioning to JoinGroup request/response pairs + ([#1098](https://github.com/IBM/sarama/pull/1098)) + - Add support for CreatePartitions, DeleteGroups, DeleteRecords request/response pairs + ([#1065](https://github.com/IBM/sarama/pull/1065), + [#1096](https://github.com/IBM/sarama/pull/1096), + [#1027](https://github.com/IBM/sarama/pull/1027)). + - Add `Controller()` method to Client interface + ([#1063](https://github.com/IBM/sarama/pull/1063)). + +Improvements: + - ConsumerMetadataReq/Resp has been migrated to FindCoordinatorReq/Resp + ([#1010](https://github.com/IBM/sarama/pull/1010)). + - Expose missing protocol parts: `msgSet` and `recordBatch` + ([#1049](https://github.com/IBM/sarama/pull/1049)). + - Add support for v1 DeleteTopics Request + ([#1052](https://github.com/IBM/sarama/pull/1052)). + - Add support for Go 1.10 + ([#1064](https://github.com/IBM/sarama/pull/1064)). + - Claim support for Kafka 1.1.0 + ([#1073](https://github.com/IBM/sarama/pull/1073)). + +Bug Fixes: + - Fix FindCoordinatorResponse.encode to allow nil Coordinator + ([#1050](https://github.com/IBM/sarama/pull/1050), + [#1051](https://github.com/IBM/sarama/pull/1051)). + - Clear all metadata when we have the latest topic info + ([#1033](https://github.com/IBM/sarama/pull/1033)). + - Make `PartitionConsumer.Close` idempotent + ([#1092](https://github.com/IBM/sarama/pull/1092)). + +## Version 1.16.0 (2018-02-12) + +New Features: + - Add support for the Create/Delete Topics request/response pairs + ([#1007](https://github.com/IBM/sarama/pull/1007), + [#1008](https://github.com/IBM/sarama/pull/1008)). + - Add support for the Describe/Create/Delete ACL request/response pairs + ([#1009](https://github.com/IBM/sarama/pull/1009)). + - Add support for the five transaction-related request/response pairs + ([#1016](https://github.com/IBM/sarama/pull/1016)). + +Improvements: + - Permit setting version on mock producer responses + ([#999](https://github.com/IBM/sarama/pull/999)). + - Add `NewMockBrokerListener` helper for testing TLS connections + ([#1019](https://github.com/IBM/sarama/pull/1019)). + - Changed the default value for `Consumer.Fetch.Default` from 32KiB to 1MiB + which results in much higher throughput in most cases + ([#1024](https://github.com/IBM/sarama/pull/1024)). + - Reuse the `time.Ticker` across fetch requests in the PartitionConsumer to + reduce CPU and memory usage when processing many partitions + ([#1028](https://github.com/IBM/sarama/pull/1028)). + - Assign relative offsets to messages in the producer to save the brokers a + recompression pass + ([#1002](https://github.com/IBM/sarama/pull/1002), + [#1015](https://github.com/IBM/sarama/pull/1015)). + +Bug Fixes: + - Fix producing uncompressed batches with the new protocol format + ([#1032](https://github.com/IBM/sarama/issues/1032)). + - Fix consuming compacted topics with the new protocol format + ([#1005](https://github.com/IBM/sarama/issues/1005)). + - Fix consuming topics with a mix of protocol formats + ([#1021](https://github.com/IBM/sarama/issues/1021)). + - Fix consuming when the broker includes multiple batches in a single response + ([#1022](https://github.com/IBM/sarama/issues/1022)). + - Fix detection of `PartialTrailingMessage` when the partial message was + truncated before the magic value indicating its version + ([#1030](https://github.com/IBM/sarama/pull/1030)). + - Fix expectation-checking in the mock of `SyncProducer.SendMessages` + ([#1035](https://github.com/IBM/sarama/pull/1035)). + +## Version 1.15.0 (2017-12-08) + +New Features: + - Claim official support for Kafka 1.0, though it did already work + ([#984](https://github.com/IBM/sarama/pull/984)). + - Helper methods for Kafka version numbers to/from strings + ([#989](https://github.com/IBM/sarama/pull/989)). + - Implement CreatePartitions request/response + ([#985](https://github.com/IBM/sarama/pull/985)). + +Improvements: + - Add error codes 45-60 + ([#986](https://github.com/IBM/sarama/issues/986)). + +Bug Fixes: + - Fix slow consuming for certain Kafka 0.11/1.0 configurations + ([#982](https://github.com/IBM/sarama/pull/982)). + - Correctly determine when a FetchResponse contains the new message format + ([#990](https://github.com/IBM/sarama/pull/990)). + - Fix producing with multiple headers + ([#996](https://github.com/IBM/sarama/pull/996)). + - Fix handling of truncated record batches + ([#998](https://github.com/IBM/sarama/pull/998)). + - Fix leaking metrics when closing brokers + ([#991](https://github.com/IBM/sarama/pull/991)). + +## Version 1.14.0 (2017-11-13) + +New Features: + - Add support for the new Kafka 0.11 record-batch format, including the wire + protocol and the necessary behavioural changes in the producer and consumer. + Transactions and idempotency are not yet supported, but producing and + consuming should work with all the existing bells and whistles (batching, + compression, etc) as well as the new custom headers. Thanks to Vlad Hanciuta + of Arista Networks for this work. Part of + ([#901](https://github.com/IBM/sarama/issues/901)). + +Bug Fixes: + - Fix encoding of ProduceResponse versions in test + ([#970](https://github.com/IBM/sarama/pull/970)). + - Return partial replicas list when we have it + ([#975](https://github.com/IBM/sarama/pull/975)). + +## Version 1.13.0 (2017-10-04) + +New Features: + - Support for FetchRequest version 3 + ([#905](https://github.com/IBM/sarama/pull/905)). + - Permit setting version on mock FetchResponses + ([#939](https://github.com/IBM/sarama/pull/939)). + - Add a configuration option to support storing only minimal metadata for + extremely large clusters + ([#937](https://github.com/IBM/sarama/pull/937)). + - Add `PartitionOffsetManager.ResetOffset` for backtracking tracked offsets + ([#932](https://github.com/IBM/sarama/pull/932)). + +Improvements: + - Provide the block-level timestamp when consuming compressed messages + ([#885](https://github.com/IBM/sarama/issues/885)). + - `Client.Replicas` and `Client.InSyncReplicas` now respect the order returned + by the broker, which can be meaningful + ([#930](https://github.com/IBM/sarama/pull/930)). + - Use a `Ticker` to reduce consumer timer overhead at the cost of higher + variance in the actual timeout + ([#933](https://github.com/IBM/sarama/pull/933)). + +Bug Fixes: + - Gracefully handle messages with negative timestamps + ([#907](https://github.com/IBM/sarama/pull/907)). + - Raise a proper error when encountering an unknown message version + ([#940](https://github.com/IBM/sarama/pull/940)). + +## Version 1.12.0 (2017-05-08) + +New Features: + - Added support for the `ApiVersions` request and response pair, and Kafka + version 0.10.2 ([#867](https://github.com/IBM/sarama/pull/867)). Note + that you still need to specify the Kafka version in the Sarama configuration + for the time being. + - Added a `Brokers` method to the Client which returns the complete set of + active brokers ([#813](https://github.com/IBM/sarama/pull/813)). + - Added an `InSyncReplicas` method to the Client which returns the set of all + in-sync broker IDs for the given partition, now that the Kafka versions for + which this was misleading are no longer in our supported set + ([#872](https://github.com/IBM/sarama/pull/872)). + - Added a `NewCustomHashPartitioner` method which allows constructing a hash + partitioner with a custom hash method in case the default (FNV-1a) is not + suitable + ([#837](https://github.com/IBM/sarama/pull/837), + [#841](https://github.com/IBM/sarama/pull/841)). + +Improvements: + - Recognize more Kafka error codes + ([#859](https://github.com/IBM/sarama/pull/859)). + +Bug Fixes: + - Fix an issue where decoding a malformed FetchRequest would not return the + correct error ([#818](https://github.com/IBM/sarama/pull/818)). + - Respect ordering of group protocols in JoinGroupRequests. This fix is + transparent if you're using the `AddGroupProtocol` or + `AddGroupProtocolMetadata` helpers; otherwise you will need to switch from + the `GroupProtocols` field (now deprecated) to use `OrderedGroupProtocols` + ([#812](https://github.com/IBM/sarama/issues/812)). + - Fix an alignment-related issue with atomics on 32-bit architectures + ([#859](https://github.com/IBM/sarama/pull/859)). + +## Version 1.11.0 (2016-12-20) + +_Important:_ As of Sarama 1.11 it is necessary to set the config value of +`Producer.Return.Successes` to true in order to use the SyncProducer. Previous +versions would silently override this value when instantiating a SyncProducer +which led to unexpected values and data races. + +New Features: + - Metrics! Thanks to Sébastien Launay for all his work on this feature + ([#701](https://github.com/IBM/sarama/pull/701), + [#746](https://github.com/IBM/sarama/pull/746), + [#766](https://github.com/IBM/sarama/pull/766)). + - Add support for LZ4 compression + ([#786](https://github.com/IBM/sarama/pull/786)). + - Add support for ListOffsetRequest v1 and Kafka 0.10.1 + ([#775](https://github.com/IBM/sarama/pull/775)). + - Added a `HighWaterMarks` method to the Consumer which aggregates the + `HighWaterMarkOffset` values of its child topic/partitions + ([#769](https://github.com/IBM/sarama/pull/769)). + +Bug Fixes: + - Fixed producing when using timestamps, compression and Kafka 0.10 + ([#759](https://github.com/IBM/sarama/pull/759)). + - Added missing decoder methods to DescribeGroups response + ([#756](https://github.com/IBM/sarama/pull/756)). + - Fix producer shutdown when `Return.Errors` is disabled + ([#787](https://github.com/IBM/sarama/pull/787)). + - Don't mutate configuration in SyncProducer + ([#790](https://github.com/IBM/sarama/pull/790)). + - Fix crash on SASL initialization failure + ([#795](https://github.com/IBM/sarama/pull/795)). + +## Version 1.10.1 (2016-08-30) + +Bug Fixes: + - Fix the documentation for `HashPartitioner` which was incorrect + ([#717](https://github.com/IBM/sarama/pull/717)). + - Permit client creation even when it is limited by ACLs + ([#722](https://github.com/IBM/sarama/pull/722)). + - Several fixes to the consumer timer optimization code, regressions introduced + in v1.10.0. Go's timers are finicky + ([#730](https://github.com/IBM/sarama/pull/730), + [#733](https://github.com/IBM/sarama/pull/733), + [#734](https://github.com/IBM/sarama/pull/734)). + - Handle consuming compressed relative offsets with Kafka 0.10 + ([#735](https://github.com/IBM/sarama/pull/735)). + +## Version 1.10.0 (2016-08-02) + +_Important:_ As of Sarama 1.10 it is necessary to tell Sarama the version of +Kafka you are running against (via the `config.Version` value) in order to use +features that may not be compatible with old Kafka versions. If you don't +specify this value it will default to 0.8.2 (the minimum supported), and trying +to use more recent features (like the offset manager) will fail with an error. + +_Also:_ The offset-manager's behaviour has been changed to match the upstream +java consumer (see [#705](https://github.com/IBM/sarama/pull/705) and +[#713](https://github.com/IBM/sarama/pull/713)). If you use the +offset-manager, please ensure that you are committing one *greater* than the +last consumed message offset or else you may end up consuming duplicate +messages. + +New Features: + - Support for Kafka 0.10 + ([#672](https://github.com/IBM/sarama/pull/672), + [#678](https://github.com/IBM/sarama/pull/678), + [#681](https://github.com/IBM/sarama/pull/681), and others). + - Support for configuring the target Kafka version + ([#676](https://github.com/IBM/sarama/pull/676)). + - Batch producing support in the SyncProducer + ([#677](https://github.com/IBM/sarama/pull/677)). + - Extend producer mock to allow setting expectations on message contents + ([#667](https://github.com/IBM/sarama/pull/667)). + +Improvements: + - Support `nil` compressed messages for deleting in compacted topics + ([#634](https://github.com/IBM/sarama/pull/634)). + - Pre-allocate decoding errors, greatly reducing heap usage and GC time against + misbehaving brokers ([#690](https://github.com/IBM/sarama/pull/690)). + - Re-use consumer expiry timers, removing one allocation per consumed message + ([#707](https://github.com/IBM/sarama/pull/707)). + +Bug Fixes: + - Actually default the client ID to "sarama" like we say we do + ([#664](https://github.com/IBM/sarama/pull/664)). + - Fix a rare issue where `Client.Leader` could return the wrong error + ([#685](https://github.com/IBM/sarama/pull/685)). + - Fix a possible tight loop in the consumer + ([#693](https://github.com/IBM/sarama/pull/693)). + - Match upstream's offset-tracking behaviour + ([#705](https://github.com/IBM/sarama/pull/705)). + - Report UnknownTopicOrPartition errors from the offset manager + ([#706](https://github.com/IBM/sarama/pull/706)). + - Fix possible negative partition value from the HashPartitioner + ([#709](https://github.com/IBM/sarama/pull/709)). + +## Version 1.9.0 (2016-05-16) + +New Features: + - Add support for custom offset manager retention durations + ([#602](https://github.com/IBM/sarama/pull/602)). + - Publish low-level mocks to enable testing of third-party producer/consumer + implementations ([#570](https://github.com/IBM/sarama/pull/570)). + - Declare support for Golang 1.6 + ([#611](https://github.com/IBM/sarama/pull/611)). + - Support for SASL plain-text auth + ([#648](https://github.com/IBM/sarama/pull/648)). + +Improvements: + - Simplified broker locking scheme slightly + ([#604](https://github.com/IBM/sarama/pull/604)). + - Documentation cleanup + ([#605](https://github.com/IBM/sarama/pull/605), + [#621](https://github.com/IBM/sarama/pull/621), + [#654](https://github.com/IBM/sarama/pull/654)). + +Bug Fixes: + - Fix race condition shutting down the OffsetManager + ([#658](https://github.com/IBM/sarama/pull/658)). + +## Version 1.8.0 (2016-02-01) + +New Features: + - Full support for Kafka 0.9: + - All protocol messages and fields + ([#586](https://github.com/IBM/sarama/pull/586), + [#588](https://github.com/IBM/sarama/pull/588), + [#590](https://github.com/IBM/sarama/pull/590)). + - Verified that TLS support works + ([#581](https://github.com/IBM/sarama/pull/581)). + - Fixed the OffsetManager compatibility + ([#585](https://github.com/IBM/sarama/pull/585)). + +Improvements: + - Optimize for fewer system calls when reading from the network + ([#584](https://github.com/IBM/sarama/pull/584)). + - Automatically retry `InvalidMessage` errors to match upstream behaviour + ([#589](https://github.com/IBM/sarama/pull/589)). + +## Version 1.7.0 (2015-12-11) + +New Features: + - Preliminary support for Kafka 0.9 + ([#572](https://github.com/IBM/sarama/pull/572)). This comes with several + caveats: + - Protocol-layer support is mostly in place + ([#577](https://github.com/IBM/sarama/pull/577)), however Kafka 0.9 + renamed some messages and fields, which we did not in order to preserve API + compatibility. + - The producer and consumer work against 0.9, but the offset manager does + not ([#573](https://github.com/IBM/sarama/pull/573)). + - TLS support may or may not work + ([#581](https://github.com/IBM/sarama/pull/581)). + +Improvements: + - Don't wait for request timeouts on dead brokers, greatly speeding recovery + when the TCP connection is left hanging + ([#548](https://github.com/IBM/sarama/pull/548)). + - Refactored part of the producer. The new version provides a much more elegant + solution to [#449](https://github.com/IBM/sarama/pull/449). It is also + slightly more efficient, and much more precise in calculating batch sizes + when compression is used + ([#549](https://github.com/IBM/sarama/pull/549), + [#550](https://github.com/IBM/sarama/pull/550), + [#551](https://github.com/IBM/sarama/pull/551)). + +Bug Fixes: + - Fix race condition in consumer test mock + ([#553](https://github.com/IBM/sarama/pull/553)). + +## Version 1.6.1 (2015-09-25) + +Bug Fixes: + - Fix panic that could occur if a user-supplied message value failed to encode + ([#449](https://github.com/IBM/sarama/pull/449)). + +## Version 1.6.0 (2015-09-04) + +New Features: + - Implementation of a consumer offset manager using the APIs introduced in + Kafka 0.8.2. The API is designed mainly for integration into a future + high-level consumer, not for direct use, although it is *possible* to use it + directly. + ([#461](https://github.com/IBM/sarama/pull/461)). + +Improvements: + - CRC32 calculation is much faster on machines with SSE4.2 instructions, + removing a major hotspot from most profiles + ([#255](https://github.com/IBM/sarama/pull/255)). + +Bug Fixes: + - Make protocol decoding more robust against some malformed packets generated + by go-fuzz ([#523](https://github.com/IBM/sarama/pull/523), + [#525](https://github.com/IBM/sarama/pull/525)) or found in other ways + ([#528](https://github.com/IBM/sarama/pull/528)). + - Fix a potential race condition panic in the consumer on shutdown + ([#529](https://github.com/IBM/sarama/pull/529)). + +## Version 1.5.0 (2015-08-17) + +New Features: + - TLS-encrypted network connections are now supported. This feature is subject + to change when Kafka releases built-in TLS support, but for now this is + enough to work with TLS-terminating proxies + ([#154](https://github.com/IBM/sarama/pull/154)). + +Improvements: + - The consumer will not block if a single partition is not drained by the user; + all other partitions will continue to consume normally + ([#485](https://github.com/IBM/sarama/pull/485)). + - Formatting of error strings has been much improved + ([#495](https://github.com/IBM/sarama/pull/495)). + - Internal refactoring of the producer for code cleanliness and to enable + future work ([#300](https://github.com/IBM/sarama/pull/300)). + +Bug Fixes: + - Fix a potential deadlock in the consumer on shutdown + ([#475](https://github.com/IBM/sarama/pull/475)). + +## Version 1.4.3 (2015-07-21) + +Bug Fixes: + - Don't include the partitioner in the producer's "fetch partitions" + circuit-breaker ([#466](https://github.com/IBM/sarama/pull/466)). + - Don't retry messages until the broker is closed when abandoning a broker in + the producer ([#468](https://github.com/IBM/sarama/pull/468)). + - Update the import path for snappy-go, it has moved again and the API has + changed slightly ([#486](https://github.com/IBM/sarama/pull/486)). + +## Version 1.4.2 (2015-05-27) + +Bug Fixes: + - Update the import path for snappy-go, it has moved from google code to github + ([#456](https://github.com/IBM/sarama/pull/456)). + +## Version 1.4.1 (2015-05-25) + +Improvements: + - Optimizations when decoding snappy messages, thanks to John Potocny + ([#446](https://github.com/IBM/sarama/pull/446)). + +Bug Fixes: + - Fix hypothetical race conditions on producer shutdown + ([#450](https://github.com/IBM/sarama/pull/450), + [#451](https://github.com/IBM/sarama/pull/451)). + +## Version 1.4.0 (2015-05-01) + +New Features: + - The consumer now implements `Topics()` and `Partitions()` methods to enable + users to dynamically choose what topics/partitions to consume without + instantiating a full client + ([#431](https://github.com/IBM/sarama/pull/431)). + - The partition-consumer now exposes the high water mark offset value returned + by the broker via the `HighWaterMarkOffset()` method ([#339](https://github.com/IBM/sarama/pull/339)). + - Added a `kafka-console-consumer` tool capable of handling multiple + partitions, and deprecated the now-obsolete `kafka-console-partitionConsumer` + ([#439](https://github.com/IBM/sarama/pull/439), + [#442](https://github.com/IBM/sarama/pull/442)). + +Improvements: + - The producer's logging during retry scenarios is more consistent, more + useful, and slightly less verbose + ([#429](https://github.com/IBM/sarama/pull/429)). + - The client now shuffles its initial list of seed brokers in order to prevent + thundering herd on the first broker in the list + ([#441](https://github.com/IBM/sarama/pull/441)). + +Bug Fixes: + - The producer now correctly manages its state if retries occur when it is + shutting down, fixing several instances of confusing behaviour and at least + one potential deadlock ([#419](https://github.com/IBM/sarama/pull/419)). + - The consumer now handles messages for different partitions asynchronously, + making it much more resilient to specific user code ordering + ([#325](https://github.com/IBM/sarama/pull/325)). + +## Version 1.3.0 (2015-04-16) + +New Features: + - The client now tracks consumer group coordinators using + ConsumerMetadataRequests similar to how it tracks partition leadership using + regular MetadataRequests ([#411](https://github.com/IBM/sarama/pull/411)). + This adds two methods to the client API: + - `Coordinator(consumerGroup string) (*Broker, error)` + - `RefreshCoordinator(consumerGroup string) error` + +Improvements: + - ConsumerMetadataResponses now automatically create a Broker object out of the + ID/address/port combination for the Coordinator; accessing the fields + individually has been deprecated + ([#413](https://github.com/IBM/sarama/pull/413)). + - Much improved handling of `OffsetOutOfRange` errors in the consumer. + Consumers will fail to start if the provided offset is out of range + ([#418](https://github.com/IBM/sarama/pull/418)) + and they will automatically shut down if the offset falls out of range + ([#424](https://github.com/IBM/sarama/pull/424)). + - Small performance improvement in encoding and decoding protocol messages + ([#427](https://github.com/IBM/sarama/pull/427)). + +Bug Fixes: + - Fix a rare race condition in the client's background metadata refresher if + it happens to be activated while the client is being closed + ([#422](https://github.com/IBM/sarama/pull/422)). + +## Version 1.2.0 (2015-04-07) + +Improvements: + - The producer's behaviour when `Flush.Frequency` is set is now more intuitive + ([#389](https://github.com/IBM/sarama/pull/389)). + - The producer is now somewhat more memory-efficient during and after retrying + messages due to an improved queue implementation + ([#396](https://github.com/IBM/sarama/pull/396)). + - The consumer produces much more useful logging output when leadership + changes ([#385](https://github.com/IBM/sarama/pull/385)). + - The client's `GetOffset` method will now automatically refresh metadata and + retry once in the event of stale information or similar + ([#394](https://github.com/IBM/sarama/pull/394)). + - Broker connections now have support for using TCP keepalives + ([#407](https://github.com/IBM/sarama/issues/407)). + +Bug Fixes: + - The OffsetCommitRequest message now correctly implements all three possible + API versions ([#390](https://github.com/IBM/sarama/pull/390), + [#400](https://github.com/IBM/sarama/pull/400)). + +## Version 1.1.0 (2015-03-20) + +Improvements: + - Wrap the producer's partitioner call in a circuit-breaker so that repeatedly + broken topics don't choke throughput + ([#373](https://github.com/IBM/sarama/pull/373)). + +Bug Fixes: + - Fix the producer's internal reference counting in certain unusual scenarios + ([#367](https://github.com/IBM/sarama/pull/367)). + - Fix the consumer's internal reference counting in certain unusual scenarios + ([#369](https://github.com/IBM/sarama/pull/369)). + - Fix a condition where the producer's internal control messages could have + gotten stuck ([#368](https://github.com/IBM/sarama/pull/368)). + - Fix an issue where invalid partition lists would be cached when asking for + metadata for a non-existant topic ([#372](https://github.com/IBM/sarama/pull/372)). + + +## Version 1.0.0 (2015-03-17) + +Version 1.0.0 is the first tagged version, and is almost a complete rewrite. The primary differences with previous untagged versions are: + +- The producer has been rewritten; there is now a `SyncProducer` with a blocking API, and an `AsyncProducer` that is non-blocking. +- The consumer has been rewritten to only open one connection per broker instead of one connection per partition. +- The main types of Sarama are now interfaces to make depedency injection easy; mock implementations for `Consumer`, `SyncProducer` and `AsyncProducer` are provided in the `github.com/IBM/sarama/mocks` package. +- For most uses cases, it is no longer necessary to open a `Client`; this will be done for you. +- All the configuration values have been unified in the `Config` struct. +- Much improved test suite. diff --git a/vendor/github.com/IBM/sarama/CONTRIBUTING.md b/vendor/github.com/IBM/sarama/CONTRIBUTING.md new file mode 100644 index 000000000..173b2a384 --- /dev/null +++ b/vendor/github.com/IBM/sarama/CONTRIBUTING.md @@ -0,0 +1,46 @@ +## Contributing + +[fork]: https://github.com/IBM/sarama/fork +[pr]: https://github.com/IBM/sarama/compare +[released]: https://help.github.com/articles/github-terms-of-service/#6-contributions-under-repository-license + +Hi there! We are thrilled that you would like to contribute to Sarama. +Your help is essential for keeping it great. + +Contributions to this project are [released][released] to the public under the project's [opensource license](LICENSE.md). +By contributing to this project you agree to the [Developer Certificate of Origin](https://developercertificate.org/) (DCO). +The DCO was created by the Linux Kernel community and is a simple statement that you, as a contributor, wrote or otherwise have the legal right to contribute those changes. + +Contributors must _sign-off_ that they adhere to these requirements by adding a `Signed-off-by` line to all commit messages with an email address that matches the commit author: + +``` +feat: this is my commit message + +Signed-off-by: Random J Developer +``` + +Git even has a `-s` command line option to append this automatically to your commit message: + +``` +$ git commit -s -m 'This is my commit message' +``` + +## Submitting a pull request + +0. [Fork][fork] and clone the repository +1. Create a new branch: `git checkout -b my-branch-name` +2. Make your change, push to your fork and [submit a pull request][pr] +3. Wait for your pull request to be reviewed and merged. + +Here are a few things you can do that will increase the likelihood of your pull request being accepted: + +- Keep your change as focused as possible. If there are multiple changes you would like to make that are not dependent upon each other, consider submitting them as separate pull requests. +- Write a [good commit message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html). + +## Further Reading + +- [Developer Certificate of Origin versus Contributor License Agreements](https://julien.ponge.org/blog/developer-certificate-of-origin-versus-contributor-license-agreements/) +- [The most powerful contributor agreement](https://lwn.net/Articles/592503/) +- [How to Contribute to Open Source](https://opensource.guide/how-to-contribute/) +- [Using Pull Requests](https://help.github.com/articles/about-pull-requests/) +- [GitHub Help](https://help.github.com) diff --git a/vendor/github.com/Shopify/sarama/Dockerfile.kafka b/vendor/github.com/IBM/sarama/Dockerfile.kafka similarity index 82% rename from vendor/github.com/Shopify/sarama/Dockerfile.kafka rename to vendor/github.com/IBM/sarama/Dockerfile.kafka index 48a9c178a..90fdb1669 100644 --- a/vendor/github.com/Shopify/sarama/Dockerfile.kafka +++ b/vendor/github.com/IBM/sarama/Dockerfile.kafka @@ -3,7 +3,8 @@ FROM registry.access.redhat.com/ubi8/ubi-minimal:latest USER root RUN microdnf update \ - && microdnf install curl gzip java-11-openjdk-headless tar \ + && microdnf install curl gzip java-11-openjdk-headless tar tzdata-java \ + && microdnf reinstall tzdata \ && microdnf clean all ENV JAVA_HOME=/usr/lib/jvm/jre-11 @@ -20,7 +21,7 @@ ARG KAFKA_MIRROR="https://s3-us-west-2.amazonaws.com/kafka-packages" RUN mkdir -p "/opt/kafka-2.8.2" && chmod a+rw /opt/kafka-2.8.2 && curl -s "$KAFKA_MIRROR/kafka_2.12-2.8.2.tgz" | tar xz --strip-components=1 -C "/opt/kafka-2.8.2" RUN mkdir -p "/opt/kafka-3.1.2" && chmod a+rw /opt/kafka-3.1.2 && curl -s "$KAFKA_MIRROR/kafka_2.12-3.1.2.tgz" | tar xz --strip-components=1 -C "/opt/kafka-3.1.2" RUN mkdir -p "/opt/kafka-3.2.3" && chmod a+rw /opt/kafka-3.2.3 && curl -s "$KAFKA_MIRROR/kafka_2.12-3.2.3.tgz" | tar xz --strip-components=1 -C "/opt/kafka-3.2.3" -RUN mkdir -p "/opt/kafka-3.3.1" && chmod a+rw /opt/kafka-3.3.1 && curl -s "$KAFKA_MIRROR/kafka_2.12-3.3.1.tgz" | tar xz --strip-components=1 -C "/opt/kafka-3.3.1" +RUN mkdir -p "/opt/kafka-3.3.2" && chmod a+rw /opt/kafka-3.3.2 && curl -s "$KAFKA_MIRROR/kafka_2.12-3.3.2.tgz" | tar xz --strip-components=1 -C "/opt/kafka-3.3.2" COPY entrypoint.sh / diff --git a/vendor/github.com/Shopify/sarama/LICENSE b/vendor/github.com/IBM/sarama/LICENSE.md similarity index 95% rename from vendor/github.com/Shopify/sarama/LICENSE rename to vendor/github.com/IBM/sarama/LICENSE.md index d2bf4352f..f2c7f0c5b 100644 --- a/vendor/github.com/Shopify/sarama/LICENSE +++ b/vendor/github.com/IBM/sarama/LICENSE.md @@ -1,4 +1,7 @@ +# MIT License + Copyright (c) 2013 Shopify +Copyright (c) 2023 IBM Corporation Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the diff --git a/vendor/github.com/Shopify/sarama/Makefile b/vendor/github.com/IBM/sarama/Makefile similarity index 100% rename from vendor/github.com/Shopify/sarama/Makefile rename to vendor/github.com/IBM/sarama/Makefile diff --git a/vendor/github.com/Shopify/sarama/README.md b/vendor/github.com/IBM/sarama/README.md similarity index 72% rename from vendor/github.com/Shopify/sarama/README.md rename to vendor/github.com/IBM/sarama/README.md index 0ee6e6a7f..a1f6137e5 100644 --- a/vendor/github.com/Shopify/sarama/README.md +++ b/vendor/github.com/IBM/sarama/README.md @@ -1,18 +1,17 @@ # sarama -[![Go Reference](https://pkg.go.dev/badge/github.com/Shopify/sarama.svg)](https://pkg.go.dev/github.com/Shopify/sarama) -[![Coverage](https://codecov.io/gh/Shopify/sarama/branch/main/graph/badge.svg)](https://codecov.io/gh/Shopify/sarama) +[![Go Reference](https://pkg.go.dev/badge/github.com/IBM/sarama.svg)](https://pkg.go.dev/github.com/IBM/sarama) Sarama is an MIT-licensed Go client library for [Apache Kafka](https://kafka.apache.org/). ## Getting started -- API documentation and examples are available via [pkg.go.dev](https://pkg.go.dev/github.com/Shopify/sarama). +- API documentation and examples are available via [pkg.go.dev](https://pkg.go.dev/github.com/IBM/sarama). - Mocks for testing are available in the [mocks](./mocks) subpackage. - The [examples](./examples) directory contains more elaborate example applications. - The [tools](./tools) directory contains command line tools that can be useful for testing, diagnostics, and instrumentation. -You might also want to look at the [Frequently Asked Questions](https://github.com/Shopify/sarama/wiki/Frequently-Asked-Questions). +You might also want to look at the [Frequently Asked Questions](https://github.com/IBM/sarama/wiki/Frequently-Asked-Questions). ## Compatibility and API stability @@ -21,13 +20,13 @@ the two latest stable releases of Kafka and Go, and we provide a two month grace period for older releases. However, older releases of Kafka are still likely to work. Sarama follows semantic versioning and provides API stability via the gopkg.in service. -You can import a version with a guaranteed stable API via http://gopkg.in/Shopify/sarama.v1. +You can import a version with a guaranteed stable API via http://gopkg.in/IBM/sarama.v1. A changelog is available [here](CHANGELOG.md). ## Contributing -- Get started by checking our [contribution guidelines](https://github.com/Shopify/sarama/blob/main/.github/CONTRIBUTING.md). -- Read the [Sarama wiki](https://github.com/Shopify/sarama/wiki) for more technical and design details. +- Get started by checking our [contribution guidelines](https://github.com/IBM/sarama/blob/main/.github/CONTRIBUTING.md). +- Read the [Sarama wiki](https://github.com/IBM/sarama/wiki) for more technical and design details. - The [Kafka Protocol Specification](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol) contains a wealth of useful information. - For more general issues, there is [a google group](https://groups.google.com/forum/#!forum/kafka-clients) for Kafka client developers. - If you have any questions, just ask! diff --git a/vendor/github.com/Shopify/sarama/Vagrantfile b/vendor/github.com/IBM/sarama/Vagrantfile similarity index 100% rename from vendor/github.com/Shopify/sarama/Vagrantfile rename to vendor/github.com/IBM/sarama/Vagrantfile diff --git a/vendor/github.com/Shopify/sarama/acl_bindings.go b/vendor/github.com/IBM/sarama/acl_bindings.go similarity index 100% rename from vendor/github.com/Shopify/sarama/acl_bindings.go rename to vendor/github.com/IBM/sarama/acl_bindings.go diff --git a/vendor/github.com/Shopify/sarama/acl_create_request.go b/vendor/github.com/IBM/sarama/acl_create_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/acl_create_request.go rename to vendor/github.com/IBM/sarama/acl_create_request.go diff --git a/vendor/github.com/Shopify/sarama/acl_create_response.go b/vendor/github.com/IBM/sarama/acl_create_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/acl_create_response.go rename to vendor/github.com/IBM/sarama/acl_create_response.go diff --git a/vendor/github.com/Shopify/sarama/acl_delete_request.go b/vendor/github.com/IBM/sarama/acl_delete_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/acl_delete_request.go rename to vendor/github.com/IBM/sarama/acl_delete_request.go diff --git a/vendor/github.com/Shopify/sarama/acl_delete_response.go b/vendor/github.com/IBM/sarama/acl_delete_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/acl_delete_response.go rename to vendor/github.com/IBM/sarama/acl_delete_response.go diff --git a/vendor/github.com/Shopify/sarama/acl_describe_request.go b/vendor/github.com/IBM/sarama/acl_describe_request.go similarity index 93% rename from vendor/github.com/Shopify/sarama/acl_describe_request.go rename to vendor/github.com/IBM/sarama/acl_describe_request.go index e0fe9023a..98edb6740 100644 --- a/vendor/github.com/Shopify/sarama/acl_describe_request.go +++ b/vendor/github.com/IBM/sarama/acl_describe_request.go @@ -1,6 +1,6 @@ package sarama -// DescribeAclsRequest is a secribe acl request type +// DescribeAclsRequest is a describe acl request type type DescribeAclsRequest struct { Version int AclFilter diff --git a/vendor/github.com/Shopify/sarama/acl_describe_response.go b/vendor/github.com/IBM/sarama/acl_describe_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/acl_describe_response.go rename to vendor/github.com/IBM/sarama/acl_describe_response.go diff --git a/vendor/github.com/Shopify/sarama/acl_filter.go b/vendor/github.com/IBM/sarama/acl_filter.go similarity index 100% rename from vendor/github.com/Shopify/sarama/acl_filter.go rename to vendor/github.com/IBM/sarama/acl_filter.go diff --git a/vendor/github.com/Shopify/sarama/acl_types.go b/vendor/github.com/IBM/sarama/acl_types.go similarity index 93% rename from vendor/github.com/Shopify/sarama/acl_types.go rename to vendor/github.com/IBM/sarama/acl_types.go index c3ba8ddcf..62bb5342a 100644 --- a/vendor/github.com/Shopify/sarama/acl_types.go +++ b/vendor/github.com/IBM/sarama/acl_types.go @@ -60,7 +60,7 @@ func (a *AclOperation) MarshalText() ([]byte, error) { return []byte(a.String()), nil } -// UnmarshalText takes a text reprentation of the operation and converts it to an AclOperation +// UnmarshalText takes a text representation of the operation and converts it to an AclOperation func (a *AclOperation) UnmarshalText(text []byte) error { normalized := strings.ToLower(string(text)) mapping := map[string]AclOperation{ @@ -114,7 +114,7 @@ func (a *AclPermissionType) MarshalText() ([]byte, error) { return []byte(a.String()), nil } -// UnmarshalText takes a text reprentation of the permission type and converts it to an AclPermissionType +// UnmarshalText takes a text representation of the permission type and converts it to an AclPermissionType func (a *AclPermissionType) UnmarshalText(text []byte) error { normalized := strings.ToLower(string(text)) mapping := map[string]AclPermissionType{ @@ -166,7 +166,7 @@ func (a *AclResourceType) MarshalText() ([]byte, error) { return []byte(a.String()), nil } -// UnmarshalText takes a text reprentation of the resource type and converts it to an AclResourceType +// UnmarshalText takes a text representation of the resource type and converts it to an AclResourceType func (a *AclResourceType) UnmarshalText(text []byte) error { normalized := strings.ToLower(string(text)) mapping := map[string]AclResourceType{ @@ -217,7 +217,7 @@ func (a *AclResourcePatternType) MarshalText() ([]byte, error) { return []byte(a.String()), nil } -// UnmarshalText takes a text reprentation of the resource pattern type and converts it to an AclResourcePatternType +// UnmarshalText takes a text representation of the resource pattern type and converts it to an AclResourcePatternType func (a *AclResourcePatternType) UnmarshalText(text []byte) error { normalized := strings.ToLower(string(text)) mapping := map[string]AclResourcePatternType{ diff --git a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go b/vendor/github.com/IBM/sarama/add_offsets_to_txn_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go rename to vendor/github.com/IBM/sarama/add_offsets_to_txn_request.go diff --git a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go b/vendor/github.com/IBM/sarama/add_offsets_to_txn_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go rename to vendor/github.com/IBM/sarama/add_offsets_to_txn_response.go diff --git a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go b/vendor/github.com/IBM/sarama/add_partitions_to_txn_request.go similarity index 96% rename from vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go rename to vendor/github.com/IBM/sarama/add_partitions_to_txn_request.go index 57ecf6488..1d6da75f5 100644 --- a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go +++ b/vendor/github.com/IBM/sarama/add_partitions_to_txn_request.go @@ -1,6 +1,6 @@ package sarama -// AddPartitionsToTxnRequest is a add paartition request +// AddPartitionsToTxnRequest is a add partition request type AddPartitionsToTxnRequest struct { TransactionalID string ProducerID int64 diff --git a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go b/vendor/github.com/IBM/sarama/add_partitions_to_txn_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go rename to vendor/github.com/IBM/sarama/add_partitions_to_txn_response.go diff --git a/vendor/github.com/Shopify/sarama/admin.go b/vendor/github.com/IBM/sarama/admin.go similarity index 96% rename from vendor/github.com/Shopify/sarama/admin.go rename to vendor/github.com/IBM/sarama/admin.go index a334daff5..29eeca1c6 100644 --- a/vendor/github.com/Shopify/sarama/admin.go +++ b/vendor/github.com/IBM/sarama/admin.go @@ -207,19 +207,17 @@ func isErrNoController(err error) bool { // provided retryable func) up to the maximum number of tries permitted by // the admin client configuration func (ca *clusterAdmin) retryOnError(retryable func(error) bool, fn func() error) error { - var err error - for attempt := 0; attempt < ca.conf.Admin.Retry.Max; attempt++ { - err = fn() - if err == nil || !retryable(err) { + for attemptsRemaining := ca.conf.Admin.Retry.Max + 1; ; { + err := fn() + attemptsRemaining-- + if err == nil || attemptsRemaining <= 0 || !retryable(err) { return err } Logger.Printf( "admin/request retrying after %dms... (%d attempts remaining)\n", - ca.conf.Admin.Retry.Backoff/time.Millisecond, ca.conf.Admin.Retry.Max-attempt) + ca.conf.Admin.Retry.Backoff/time.Millisecond, attemptsRemaining) time.Sleep(ca.conf.Admin.Retry.Backoff) - continue } - return err } func (ca *clusterAdmin) CreateTopic(topic string, detail *TopicDetail, validateOnly bool) error { @@ -275,13 +273,19 @@ func (ca *clusterAdmin) CreateTopic(topic string, detail *TopicDetail, validateO } func (ca *clusterAdmin) DescribeTopics(topics []string) (metadata []*TopicMetadata, err error) { - controller, err := ca.Controller() - if err != nil { - return nil, err - } - - request := NewMetadataRequest(ca.conf.Version, topics) - response, err := controller.GetMetadata(request) + var response *MetadataResponse + err = ca.retryOnError(isErrNoController, func() error { + controller, err := ca.Controller() + if err != nil { + return err + } + request := NewMetadataRequest(ca.conf.Version, topics) + response, err = controller.GetMetadata(request) + if isErrNoController(err) { + _, _ = ca.refreshController() + } + return err + }) if err != nil { return nil, err } @@ -289,13 +293,20 @@ func (ca *clusterAdmin) DescribeTopics(topics []string) (metadata []*TopicMetada } func (ca *clusterAdmin) DescribeCluster() (brokers []*Broker, controllerID int32, err error) { - controller, err := ca.Controller() - if err != nil { - return nil, int32(0), err - } + var response *MetadataResponse + err = ca.retryOnError(isErrNoController, func() error { + controller, err := ca.Controller() + if err != nil { + return err + } - request := NewMetadataRequest(ca.conf.Version, nil) - response, err := controller.GetMetadata(request) + request := NewMetadataRequest(ca.conf.Version, nil) + response, err = controller.GetMetadata(request) + if isErrNoController(err) { + _, _ = ca.refreshController() + } + return err + }) if err != nil { return nil, int32(0), err } @@ -545,13 +556,20 @@ func (ca *clusterAdmin) ListPartitionReassignments(topic string, partitions []in request.AddBlock(topic, partitions) - b, err := ca.Controller() - if err != nil { - return nil, err - } - _ = b.Open(ca.client.Config()) + var rsp *ListPartitionReassignmentsResponse + err = ca.retryOnError(isErrNoController, func() error { + b, err := ca.Controller() + if err != nil { + return err + } + _ = b.Open(ca.client.Config()) - rsp, err := b.ListPartitionReassignments(request) + rsp, err = b.ListPartitionReassignments(request) + if isErrNoController(err) { + _, _ = ca.refreshController() + } + return err + }) if err == nil && rsp != nil { return rsp.TopicStatus, nil @@ -891,7 +909,7 @@ func (ca *clusterAdmin) DescribeConsumerGroups(groups []string) (result []*Group describeReq := &DescribeGroupsRequest{ Groups: brokerGroups, } - if ca.conf.Version.IsAtLeast(V2_3_0_0) { + if ca.conf.Version.IsAtLeast(V2_4_0_0) { describeReq.Version = 4 } response, err := broker.DescribeGroups(describeReq) diff --git a/vendor/github.com/Shopify/sarama/alter_client_quotas_request.go b/vendor/github.com/IBM/sarama/alter_client_quotas_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/alter_client_quotas_request.go rename to vendor/github.com/IBM/sarama/alter_client_quotas_request.go diff --git a/vendor/github.com/Shopify/sarama/alter_client_quotas_response.go b/vendor/github.com/IBM/sarama/alter_client_quotas_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/alter_client_quotas_response.go rename to vendor/github.com/IBM/sarama/alter_client_quotas_response.go diff --git a/vendor/github.com/Shopify/sarama/alter_configs_request.go b/vendor/github.com/IBM/sarama/alter_configs_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/alter_configs_request.go rename to vendor/github.com/IBM/sarama/alter_configs_request.go diff --git a/vendor/github.com/Shopify/sarama/alter_configs_response.go b/vendor/github.com/IBM/sarama/alter_configs_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/alter_configs_response.go rename to vendor/github.com/IBM/sarama/alter_configs_response.go diff --git a/vendor/github.com/Shopify/sarama/alter_partition_reassignments_request.go b/vendor/github.com/IBM/sarama/alter_partition_reassignments_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/alter_partition_reassignments_request.go rename to vendor/github.com/IBM/sarama/alter_partition_reassignments_request.go diff --git a/vendor/github.com/Shopify/sarama/alter_partition_reassignments_response.go b/vendor/github.com/IBM/sarama/alter_partition_reassignments_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/alter_partition_reassignments_response.go rename to vendor/github.com/IBM/sarama/alter_partition_reassignments_response.go diff --git a/vendor/github.com/Shopify/sarama/alter_user_scram_credentials_request.go b/vendor/github.com/IBM/sarama/alter_user_scram_credentials_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/alter_user_scram_credentials_request.go rename to vendor/github.com/IBM/sarama/alter_user_scram_credentials_request.go diff --git a/vendor/github.com/Shopify/sarama/alter_user_scram_credentials_response.go b/vendor/github.com/IBM/sarama/alter_user_scram_credentials_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/alter_user_scram_credentials_response.go rename to vendor/github.com/IBM/sarama/alter_user_scram_credentials_response.go diff --git a/vendor/github.com/Shopify/sarama/api_versions_request.go b/vendor/github.com/IBM/sarama/api_versions_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/api_versions_request.go rename to vendor/github.com/IBM/sarama/api_versions_request.go diff --git a/vendor/github.com/Shopify/sarama/api_versions_response.go b/vendor/github.com/IBM/sarama/api_versions_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/api_versions_response.go rename to vendor/github.com/IBM/sarama/api_versions_response.go diff --git a/vendor/github.com/Shopify/sarama/async_producer.go b/vendor/github.com/IBM/sarama/async_producer.go similarity index 99% rename from vendor/github.com/Shopify/sarama/async_producer.go rename to vendor/github.com/IBM/sarama/async_producer.go index 50f226f8e..dfd891237 100644 --- a/vendor/github.com/Shopify/sarama/async_producer.go +++ b/vendor/github.com/IBM/sarama/async_producer.go @@ -50,7 +50,7 @@ type AsyncProducer interface { // errors to be returned. Errors() <-chan *ProducerError - // IsTransactional return true when current producer is is transactional. + // IsTransactional return true when current producer is transactional. IsTransactional() bool // TxnStatus return current producer transaction status. diff --git a/vendor/github.com/Shopify/sarama/balance_strategy.go b/vendor/github.com/IBM/sarama/balance_strategy.go similarity index 95% rename from vendor/github.com/Shopify/sarama/balance_strategy.go rename to vendor/github.com/IBM/sarama/balance_strategy.go index 4594df6f6..8635bdf7d 100644 --- a/vendor/github.com/Shopify/sarama/balance_strategy.go +++ b/vendor/github.com/IBM/sarama/balance_strategy.go @@ -57,7 +57,8 @@ type BalanceStrategy interface { // -------------------------------------------------------------------- -// BalanceStrategyRange is the default and assigns partitions as ranges to consumer group members. +// NewBalanceStrategyRange returns a range balance strategy, +// which is the default and assigns partitions as ranges to consumer group members. // This follows the same logic as // https://kafka.apache.org/31/javadoc/org/apache/kafka/clients/consumer/RangeAssignor.html // @@ -65,27 +66,33 @@ type BalanceStrategy interface { // // M1: {T1: [0, 1, 2], T2: [0, 1, 2]} // M2: {T2: [3, 4, 5], T2: [3, 4, 5]} -var BalanceStrategyRange = &balanceStrategy{ - name: RangeBalanceStrategyName, - coreFn: func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32) { - partitionsPerConsumer := len(partitions) / len(memberIDs) - consumersWithExtraPartition := len(partitions) % len(memberIDs) - - sort.Strings(memberIDs) - - for i, memberID := range memberIDs { - min := i*partitionsPerConsumer + int(math.Min(float64(consumersWithExtraPartition), float64(i))) - extra := 0 - if i < consumersWithExtraPartition { - extra = 1 +func NewBalanceStrategyRange() BalanceStrategy { + return &balanceStrategy{ + name: RangeBalanceStrategyName, + coreFn: func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32) { + partitionsPerConsumer := len(partitions) / len(memberIDs) + consumersWithExtraPartition := len(partitions) % len(memberIDs) + + sort.Strings(memberIDs) + + for i, memberID := range memberIDs { + min := i*partitionsPerConsumer + int(math.Min(float64(consumersWithExtraPartition), float64(i))) + extra := 0 + if i < consumersWithExtraPartition { + extra = 1 + } + max := min + partitionsPerConsumer + extra + plan.Add(memberID, topic, partitions[min:max]...) } - max := min + partitionsPerConsumer + extra - plan.Add(memberID, topic, partitions[min:max]...) - } - }, + }, + } } -// BalanceStrategySticky assigns partitions to members with an attempt to preserve earlier assignments +// Deprecated: use NewBalanceStrategyRange to avoid data race issue +var BalanceStrategyRange = NewBalanceStrategyRange() + +// NewBalanceStrategySticky returns a sticky balance strategy, +// which assigns partitions to members with an attempt to preserve earlier assignments // while maintain a balanced partition distribution. // Example with topic T with six partitions (0..5) and two members (M1, M2): // @@ -97,13 +104,18 @@ var BalanceStrategyRange = &balanceStrategy{ // M1: {T: [0, 2]} // M2: {T: [1, 3]} // M3: {T: [4, 5]} -var BalanceStrategySticky = &stickyBalanceStrategy{} +func NewBalanceStrategySticky() BalanceStrategy { + return &stickyBalanceStrategy{} +} + +// Deprecated: use NewBalanceStrategySticky to avoid data race issue +var BalanceStrategySticky = NewBalanceStrategySticky() // -------------------------------------------------------------------- type balanceStrategy struct { - name string coreFn func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32) + name string } // Name implements BalanceStrategy. @@ -171,10 +183,7 @@ func (s *stickyBalanceStrategy) Plan(members map[string]ConsumerGroupMemberMetad } // determine if we're dealing with a completely fresh assignment, or if there's existing assignment state - isFreshAssignment := false - if len(currentAssignment) == 0 { - isFreshAssignment = true - } + isFreshAssignment := len(currentAssignment) == 0 // create a mapping of all current topic partitions and the consumers that can be assigned to them partition2AllPotentialConsumers := make(map[topicPartitionAssignment][]string) @@ -281,10 +290,7 @@ func strsContains(s []string, value string) bool { // Balance assignments across consumers for maximum fairness and stickiness. func (s *stickyBalanceStrategy) balance(currentAssignment map[string][]topicPartitionAssignment, prevAssignment map[topicPartitionAssignment]consumerGenerationPair, sortedPartitions []topicPartitionAssignment, unassignedPartitions []topicPartitionAssignment, sortedCurrentSubscriptions []string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string, currentPartitionConsumer map[topicPartitionAssignment]string) { - initializing := false - if len(sortedCurrentSubscriptions) == 0 || len(currentAssignment[sortedCurrentSubscriptions[0]]) == 0 { - initializing = true - } + initializing := len(sortedCurrentSubscriptions) == 0 || len(currentAssignment[sortedCurrentSubscriptions[0]]) == 0 // assign all unassigned partitions for _, partition := range unassignedPartitions { @@ -337,11 +343,17 @@ func (s *stickyBalanceStrategy) balance(currentAssignment map[string][]topicPart } } -// BalanceStrategyRoundRobin assigns partitions to members in alternating order. +// NewBalanceStrategyRoundRobin returns a round-robin balance strategy, +// which assigns partitions to members in alternating order. // For example, there are two topics (t0, t1) and two consumer (m0, m1), and each topic has three partitions (p0, p1, p2): // M0: [t0p0, t0p2, t1p1] // M1: [t0p1, t1p0, t1p2] -var BalanceStrategyRoundRobin = new(roundRobinBalancer) +func NewBalanceStrategyRoundRobin() BalanceStrategy { + return new(roundRobinBalancer) +} + +// Deprecated: use NewBalanceStrategyRoundRobin to avoid data race issue +var BalanceStrategyRoundRobin = NewBalanceStrategyRoundRobin() type roundRobinBalancer struct{} @@ -414,8 +426,8 @@ func (tp *topicAndPartition) comparedValue() string { } type memberAndTopic struct { - memberID string topics map[string]struct{} + memberID string } func (m *memberAndTopic) hasTopic(topic string) bool { @@ -681,11 +693,8 @@ func sortPartitions(currentAssignment map[string][]topicPartitionAssignment, par } heap.Init(&pq) - for { - // loop until no consumer-group members remain - if pq.Len() == 0 { - break - } + // loop until no consumer-group members remain + for pq.Len() != 0 { member := pq[0] // partitions that were assigned to a different consumer last time @@ -1106,7 +1115,7 @@ type assignmentPriorityQueue []*consumerGroupMember func (pq assignmentPriorityQueue) Len() int { return len(pq) } func (pq assignmentPriorityQueue) Less(i, j int) bool { - // order asssignment priority queue in descending order using assignment-count/member-id + // order assignment priority queue in descending order using assignment-count/member-id if len(pq[i].assignments) == len(pq[j].assignments) { return strings.Compare(pq[i].id, pq[j].id) > 0 } diff --git a/vendor/github.com/Shopify/sarama/broker.go b/vendor/github.com/IBM/sarama/broker.go similarity index 99% rename from vendor/github.com/Shopify/sarama/broker.go rename to vendor/github.com/IBM/sarama/broker.go index d049e9b47..7ed987fe3 100644 --- a/vendor/github.com/Shopify/sarama/broker.go +++ b/vendor/github.com/IBM/sarama/broker.go @@ -175,7 +175,9 @@ func (b *Broker) Open(conf *Config) error { b.lock.Lock() - b.metricRegistry = newCleanupRegistry(conf.MetricRegistry) + if b.metricRegistry == nil { + b.metricRegistry = newCleanupRegistry(conf.MetricRegistry) + } go withRecover(func() { defer func() { @@ -453,7 +455,7 @@ func (b *Broker) AsyncProduce(request *ProduceRequest, cb ProduceCallback) error return } - // Wellformed response + // Well-formed response b.updateThrottleMetric(res.ThrottleTime) cb(res, nil) }, diff --git a/vendor/github.com/Shopify/sarama/client.go b/vendor/github.com/IBM/sarama/client.go similarity index 98% rename from vendor/github.com/Shopify/sarama/client.go rename to vendor/github.com/IBM/sarama/client.go index f7872a1b3..d9fb77d64 100644 --- a/vendor/github.com/Shopify/sarama/client.go +++ b/vendor/github.com/IBM/sarama/client.go @@ -50,7 +50,7 @@ type Client interface { // topic/partition, as determined by querying the cluster metadata. Leader(topic string, partitionID int32) (*Broker, error) - // LeaderAndEpoch returns the the leader and its epoch for the current + // LeaderAndEpoch returns the leader and its epoch for the current // topic/partition, as determined by querying the cluster metadata. LeaderAndEpoch(topic string, partitionID int32) (*Broker, int32, error) @@ -132,10 +132,10 @@ const ( ) type client struct { - // updateMetaDataMs stores the time at which metadata was lasted updated. + // updateMetadataMs stores the time at which metadata was lasted updated. // Note: this accessed atomically so must be the first word in the struct // as per golang/go#41970 - updateMetaDataMs int64 + updateMetadataMs int64 conf *Config closer, closed chan none // for shutting down background metadata updater @@ -513,7 +513,7 @@ func (client *client) RefreshMetadata(topics ...string) error { // Prior to 0.8.2, Kafka will throw exceptions on an empty topic and not return a proper // error. This handles the case by returning an error instead of sending it - // off to Kafka. See: https://github.com/Shopify/sarama/pull/38#issuecomment-26362310 + // off to Kafka. See: https://github.com/IBM/sarama/pull/38#issuecomment-26362310 for _, topic := range topics { if topic == "" { return ErrInvalidTopic // this is the error that 0.8.2 and later correctly return @@ -975,13 +975,14 @@ func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int, time.Sleep(backoff) } - t := atomic.LoadInt64(&client.updateMetaDataMs) - if time.Since(time.Unix(t/1e3, 0)) < backoff { + t := atomic.LoadInt64(&client.updateMetadataMs) + if time.Since(time.UnixMilli(t)) < backoff { return err } + attemptsRemaining-- Logger.Printf("client/metadata retrying after %dms... (%d attempts remaining)\n", backoff/time.Millisecond, attemptsRemaining) - return client.tryRefreshMetadata(topics, attemptsRemaining-1, deadline) + return client.tryRefreshMetadata(topics, attemptsRemaining, deadline) } return err } @@ -999,10 +1000,7 @@ func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int, req := NewMetadataRequest(client.conf.Version, topics) req.AllowAutoTopicCreation = allowAutoTopicCreation - t := atomic.LoadInt64(&client.updateMetaDataMs) - if !atomic.CompareAndSwapInt64(&client.updateMetaDataMs, t, time.Now().UnixNano()/int64(time.Millisecond)) { - return nil - } + atomic.StoreInt64(&client.updateMetadataMs, time.Now().UnixMilli()) response, err := broker.GetMetadata(req) var kerror KError @@ -1160,9 +1158,10 @@ func (client *client) findCoordinator(coordinatorKey string, coordinatorType Coo retry := func(err error) (*FindCoordinatorResponse, error) { if attemptsRemaining > 0 { backoff := client.computeBackoff(attemptsRemaining) + attemptsRemaining-- Logger.Printf("client/coordinator retrying after %dms... (%d attempts remaining)\n", backoff/time.Millisecond, attemptsRemaining) time.Sleep(backoff) - return client.findCoordinator(coordinatorKey, coordinatorType, attemptsRemaining-1) + return client.findCoordinator(coordinatorKey, coordinatorType, attemptsRemaining) } return nil, err } diff --git a/vendor/github.com/Shopify/sarama/compress.go b/vendor/github.com/IBM/sarama/compress.go similarity index 100% rename from vendor/github.com/Shopify/sarama/compress.go rename to vendor/github.com/IBM/sarama/compress.go diff --git a/vendor/github.com/Shopify/sarama/config.go b/vendor/github.com/IBM/sarama/config.go similarity index 98% rename from vendor/github.com/Shopify/sarama/config.go rename to vendor/github.com/IBM/sarama/config.go index b07034434..eb27d98ac 100644 --- a/vendor/github.com/Shopify/sarama/config.go +++ b/vendor/github.com/IBM/sarama/config.go @@ -294,7 +294,7 @@ type Config struct { Interval time.Duration } Rebalance struct { - // Strategy for allocating topic partitions to members (default BalanceStrategyRange) + // Strategy for allocating topic partitions to members. // Deprecated: Strategy exists for historical compatibility // and should not be used. Please use GroupStrategies. Strategy BalanceStrategy @@ -302,7 +302,7 @@ type Config struct { // GroupStrategies is the priority-ordered list of client-side consumer group // balancing strategies that will be offered to the coordinator. The first // strategy that all group members support will be chosen by the leader. - // default: [BalanceStrategyRange] + // default: [ NewBalanceStrategyRange() ] GroupStrategies []BalanceStrategy // The maximum allowed time for each worker to join the group once a rebalance has begun. @@ -539,7 +539,7 @@ func NewConfig() *Config { c.Consumer.Group.Session.Timeout = 10 * time.Second c.Consumer.Group.Heartbeat.Interval = 3 * time.Second - c.Consumer.Group.Rebalance.GroupStrategies = []BalanceStrategy{BalanceStrategyRange} + c.Consumer.Group.Rebalance.GroupStrategies = []BalanceStrategy{NewBalanceStrategyRange()} c.Consumer.Group.Rebalance.Timeout = 60 * time.Second c.Consumer.Group.Rebalance.Retry.Max = 4 c.Consumer.Group.Rebalance.Retry.Backoff = 2 * time.Second @@ -650,19 +650,26 @@ func (c *Config) Validate() error { return ConfigurationError("Net.SASL.GSSAPI.ServiceName must not be empty when GSS-API mechanism is used") } - if c.Net.SASL.GSSAPI.AuthType == KRB5_USER_AUTH { + switch c.Net.SASL.GSSAPI.AuthType { + case KRB5_USER_AUTH: if c.Net.SASL.GSSAPI.Password == "" { return ConfigurationError("Net.SASL.GSSAPI.Password must not be empty when GSS-API " + "mechanism is used and Net.SASL.GSSAPI.AuthType = KRB5_USER_AUTH") } - } else if c.Net.SASL.GSSAPI.AuthType == KRB5_KEYTAB_AUTH { + case KRB5_KEYTAB_AUTH: if c.Net.SASL.GSSAPI.KeyTabPath == "" { return ConfigurationError("Net.SASL.GSSAPI.KeyTabPath must not be empty when GSS-API mechanism is used" + - " and Net.SASL.GSSAPI.AuthType = KRB5_KEYTAB_AUTH") + " and Net.SASL.GSSAPI.AuthType = KRB5_KEYTAB_AUTH") } - } else { - return ConfigurationError("Net.SASL.GSSAPI.AuthType is invalid. Possible values are KRB5_USER_AUTH and KRB5_KEYTAB_AUTH") + case KRB5_CCACHE_AUTH: + if c.Net.SASL.GSSAPI.CCachePath == "" { + return ConfigurationError("Net.SASL.GSSAPI.CCachePath must not be empty when GSS-API mechanism is used" + + " and Net.SASL.GSSAPI.AuthType = KRB5_CCACHE_AUTH") + } + default: + return ConfigurationError("Net.SASL.GSSAPI.AuthType is invalid. Possible values are KRB5_USER_AUTH, KRB5_KEYTAB_AUTH, and KRB5_CCACHE_AUTH") } + if c.Net.SASL.GSSAPI.KerberosConfigPath == "" { return ConfigurationError("Net.SASL.GSSAPI.KerberosConfigPath must not be empty when GSS-API mechanism is used") } diff --git a/vendor/github.com/Shopify/sarama/config_resource_type.go b/vendor/github.com/IBM/sarama/config_resource_type.go similarity index 100% rename from vendor/github.com/Shopify/sarama/config_resource_type.go rename to vendor/github.com/IBM/sarama/config_resource_type.go diff --git a/vendor/github.com/Shopify/sarama/consumer.go b/vendor/github.com/IBM/sarama/consumer.go similarity index 99% rename from vendor/github.com/Shopify/sarama/consumer.go rename to vendor/github.com/IBM/sarama/consumer.go index eb27df8d7..4d08b3dda 100644 --- a/vendor/github.com/Shopify/sarama/consumer.go +++ b/vendor/github.com/IBM/sarama/consumer.go @@ -85,13 +85,13 @@ type Consumer interface { // New calls to the broker will return records from these partitions if there are any to be fetched. Resume(topicPartitions map[string][]int32) - // Pause suspends fetching from all partitions. Future calls to the broker will not return any + // PauseAll suspends fetching from all partitions. Future calls to the broker will not return any // records from these partitions until they have been resumed using Resume()/ResumeAll(). // Note that this method does not affect partition subscription. // In particular, it does not cause a group rebalance when automatic assignment is used. PauseAll() - // Resume resumes all partitions which have been paused with Pause()/PauseAll(). + // ResumeAll resumes all partitions which have been paused with Pause()/PauseAll(). // New calls to the broker will return records from these partitions if there are any to be fetched. ResumeAll() } diff --git a/vendor/github.com/Shopify/sarama/consumer_group.go b/vendor/github.com/IBM/sarama/consumer_group.go similarity index 99% rename from vendor/github.com/Shopify/sarama/consumer_group.go rename to vendor/github.com/IBM/sarama/consumer_group.go index ecdbcfa68..68f463976 100644 --- a/vendor/github.com/Shopify/sarama/consumer_group.go +++ b/vendor/github.com/IBM/sarama/consumer_group.go @@ -252,7 +252,10 @@ func (c *consumerGroup) retryNewSession(ctx context.Context, topics []string, ha if refreshCoordinator { err := c.client.RefreshCoordinator(c.groupID) if err != nil { - return c.retryNewSession(ctx, topics, handler, retries, true) + if retries <= 0 { + return nil, err + } + return c.retryNewSession(ctx, topics, handler, retries-1, true) } } @@ -403,7 +406,7 @@ func (c *consumerGroup) newSession(ctx context.Context, topics []string, handler claims = members.Topics // in the case of stateful balance strategies, hold on to the returned - // assignment metadata, otherwise, reset the statically defined conusmer + // assignment metadata, otherwise, reset the statically defined consumer // group metadata if members.UserData != nil { c.userData = members.UserData diff --git a/vendor/github.com/Shopify/sarama/consumer_group_members.go b/vendor/github.com/IBM/sarama/consumer_group_members.go similarity index 100% rename from vendor/github.com/Shopify/sarama/consumer_group_members.go rename to vendor/github.com/IBM/sarama/consumer_group_members.go diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_request.go b/vendor/github.com/IBM/sarama/consumer_metadata_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/consumer_metadata_request.go rename to vendor/github.com/IBM/sarama/consumer_metadata_request.go diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_response.go b/vendor/github.com/IBM/sarama/consumer_metadata_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/consumer_metadata_response.go rename to vendor/github.com/IBM/sarama/consumer_metadata_response.go diff --git a/vendor/github.com/Shopify/sarama/control_record.go b/vendor/github.com/IBM/sarama/control_record.go similarity index 100% rename from vendor/github.com/Shopify/sarama/control_record.go rename to vendor/github.com/IBM/sarama/control_record.go diff --git a/vendor/github.com/Shopify/sarama/crc32_field.go b/vendor/github.com/IBM/sarama/crc32_field.go similarity index 100% rename from vendor/github.com/Shopify/sarama/crc32_field.go rename to vendor/github.com/IBM/sarama/crc32_field.go diff --git a/vendor/github.com/Shopify/sarama/create_partitions_request.go b/vendor/github.com/IBM/sarama/create_partitions_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/create_partitions_request.go rename to vendor/github.com/IBM/sarama/create_partitions_request.go diff --git a/vendor/github.com/Shopify/sarama/create_partitions_response.go b/vendor/github.com/IBM/sarama/create_partitions_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/create_partitions_response.go rename to vendor/github.com/IBM/sarama/create_partitions_response.go diff --git a/vendor/github.com/Shopify/sarama/create_topics_request.go b/vendor/github.com/IBM/sarama/create_topics_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/create_topics_request.go rename to vendor/github.com/IBM/sarama/create_topics_request.go diff --git a/vendor/github.com/Shopify/sarama/create_topics_response.go b/vendor/github.com/IBM/sarama/create_topics_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/create_topics_response.go rename to vendor/github.com/IBM/sarama/create_topics_response.go diff --git a/vendor/github.com/IBM/sarama/decompress.go b/vendor/github.com/IBM/sarama/decompress.go new file mode 100644 index 000000000..a01cefaa5 --- /dev/null +++ b/vendor/github.com/IBM/sarama/decompress.go @@ -0,0 +1,98 @@ +package sarama + +import ( + "bytes" + "compress/gzip" + "fmt" + "sync" + + snappy "github.com/eapache/go-xerial-snappy" + "github.com/pierrec/lz4/v4" +) + +var ( + lz4ReaderPool = sync.Pool{ + New: func() interface{} { + return lz4.NewReader(nil) + }, + } + + gzipReaderPool sync.Pool + + bufferPool = sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, + } + + bytesPool = sync.Pool{ + New: func() interface{} { + res := make([]byte, 0, 4096) + return &res + }, + } +) + +func decompress(cc CompressionCodec, data []byte) ([]byte, error) { + switch cc { + case CompressionNone: + return data, nil + case CompressionGZIP: + var err error + reader, ok := gzipReaderPool.Get().(*gzip.Reader) + if !ok { + reader, err = gzip.NewReader(bytes.NewReader(data)) + } else { + err = reader.Reset(bytes.NewReader(data)) + } + + if err != nil { + return nil, err + } + + buffer := bufferPool.Get().(*bytes.Buffer) + _, err = buffer.ReadFrom(reader) + // copy the buffer to a new slice with the correct length + // reuse gzipReader and buffer + gzipReaderPool.Put(reader) + res := make([]byte, buffer.Len()) + copy(res, buffer.Bytes()) + buffer.Reset() + bufferPool.Put(buffer) + + return res, err + case CompressionSnappy: + return snappy.Decode(data) + case CompressionLZ4: + reader, ok := lz4ReaderPool.Get().(*lz4.Reader) + if !ok { + reader = lz4.NewReader(bytes.NewReader(data)) + } else { + reader.Reset(bytes.NewReader(data)) + } + buffer := bufferPool.Get().(*bytes.Buffer) + _, err := buffer.ReadFrom(reader) + // copy the buffer to a new slice with the correct length + // reuse lz4Reader and buffer + lz4ReaderPool.Put(reader) + res := make([]byte, buffer.Len()) + copy(res, buffer.Bytes()) + buffer.Reset() + bufferPool.Put(buffer) + + return res, err + case CompressionZSTD: + buffer := *bytesPool.Get().(*[]byte) + var err error + buffer, err = zstdDecompress(ZstdDecoderParams{}, buffer, data) + // copy the buffer to a new slice with the correct length and reuse buffer + res := make([]byte, len(buffer)) + copy(res, buffer) + buffer = buffer[:0] + bytesPool.Put(&buffer) + + return res, err + default: + return nil, PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", cc)} + } +} diff --git a/vendor/github.com/Shopify/sarama/delete_groups_request.go b/vendor/github.com/IBM/sarama/delete_groups_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/delete_groups_request.go rename to vendor/github.com/IBM/sarama/delete_groups_request.go diff --git a/vendor/github.com/Shopify/sarama/delete_groups_response.go b/vendor/github.com/IBM/sarama/delete_groups_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/delete_groups_response.go rename to vendor/github.com/IBM/sarama/delete_groups_response.go diff --git a/vendor/github.com/Shopify/sarama/delete_offsets_request.go b/vendor/github.com/IBM/sarama/delete_offsets_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/delete_offsets_request.go rename to vendor/github.com/IBM/sarama/delete_offsets_request.go diff --git a/vendor/github.com/Shopify/sarama/delete_offsets_response.go b/vendor/github.com/IBM/sarama/delete_offsets_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/delete_offsets_response.go rename to vendor/github.com/IBM/sarama/delete_offsets_response.go diff --git a/vendor/github.com/Shopify/sarama/delete_records_request.go b/vendor/github.com/IBM/sarama/delete_records_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/delete_records_request.go rename to vendor/github.com/IBM/sarama/delete_records_request.go diff --git a/vendor/github.com/Shopify/sarama/delete_records_response.go b/vendor/github.com/IBM/sarama/delete_records_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/delete_records_response.go rename to vendor/github.com/IBM/sarama/delete_records_response.go diff --git a/vendor/github.com/Shopify/sarama/delete_topics_request.go b/vendor/github.com/IBM/sarama/delete_topics_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/delete_topics_request.go rename to vendor/github.com/IBM/sarama/delete_topics_request.go diff --git a/vendor/github.com/Shopify/sarama/delete_topics_response.go b/vendor/github.com/IBM/sarama/delete_topics_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/delete_topics_response.go rename to vendor/github.com/IBM/sarama/delete_topics_response.go diff --git a/vendor/github.com/Shopify/sarama/describe_client_quotas_request.go b/vendor/github.com/IBM/sarama/describe_client_quotas_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/describe_client_quotas_request.go rename to vendor/github.com/IBM/sarama/describe_client_quotas_request.go diff --git a/vendor/github.com/Shopify/sarama/describe_client_quotas_response.go b/vendor/github.com/IBM/sarama/describe_client_quotas_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/describe_client_quotas_response.go rename to vendor/github.com/IBM/sarama/describe_client_quotas_response.go diff --git a/vendor/github.com/Shopify/sarama/describe_configs_request.go b/vendor/github.com/IBM/sarama/describe_configs_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/describe_configs_request.go rename to vendor/github.com/IBM/sarama/describe_configs_request.go diff --git a/vendor/github.com/Shopify/sarama/describe_configs_response.go b/vendor/github.com/IBM/sarama/describe_configs_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/describe_configs_response.go rename to vendor/github.com/IBM/sarama/describe_configs_response.go diff --git a/vendor/github.com/Shopify/sarama/describe_groups_request.go b/vendor/github.com/IBM/sarama/describe_groups_request.go similarity index 92% rename from vendor/github.com/Shopify/sarama/describe_groups_request.go rename to vendor/github.com/IBM/sarama/describe_groups_request.go index f81f69ac4..fc8e6b588 100644 --- a/vendor/github.com/Shopify/sarama/describe_groups_request.go +++ b/vendor/github.com/IBM/sarama/describe_groups_request.go @@ -44,8 +44,14 @@ func (r *DescribeGroupsRequest) headerVersion() int16 { func (r *DescribeGroupsRequest) requiredVersion() KafkaVersion { switch r.Version { - case 1, 2, 3, 4: + case 1: + return V1_1_0_0 + case 2: + return V2_0_0_0 + case 3: return V2_3_0_0 + case 4: + return V2_4_0_0 } return V0_9_0_0 } diff --git a/vendor/github.com/Shopify/sarama/describe_groups_response.go b/vendor/github.com/IBM/sarama/describe_groups_response.go similarity index 98% rename from vendor/github.com/Shopify/sarama/describe_groups_response.go rename to vendor/github.com/IBM/sarama/describe_groups_response.go index 09052e431..12bf93e15 100644 --- a/vendor/github.com/Shopify/sarama/describe_groups_response.go +++ b/vendor/github.com/IBM/sarama/describe_groups_response.go @@ -65,8 +65,14 @@ func (r *DescribeGroupsResponse) headerVersion() int16 { func (r *DescribeGroupsResponse) requiredVersion() KafkaVersion { switch r.Version { - case 1, 2, 3, 4: + case 1: + return V1_1_0_0 + case 2: + return V2_0_0_0 + case 3: return V2_3_0_0 + case 4: + return V2_4_0_0 } return V0_9_0_0 } diff --git a/vendor/github.com/Shopify/sarama/describe_log_dirs_request.go b/vendor/github.com/IBM/sarama/describe_log_dirs_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/describe_log_dirs_request.go rename to vendor/github.com/IBM/sarama/describe_log_dirs_request.go diff --git a/vendor/github.com/Shopify/sarama/describe_log_dirs_response.go b/vendor/github.com/IBM/sarama/describe_log_dirs_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/describe_log_dirs_response.go rename to vendor/github.com/IBM/sarama/describe_log_dirs_response.go diff --git a/vendor/github.com/Shopify/sarama/describe_user_scram_credentials_request.go b/vendor/github.com/IBM/sarama/describe_user_scram_credentials_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/describe_user_scram_credentials_request.go rename to vendor/github.com/IBM/sarama/describe_user_scram_credentials_request.go diff --git a/vendor/github.com/Shopify/sarama/describe_user_scram_credentials_response.go b/vendor/github.com/IBM/sarama/describe_user_scram_credentials_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/describe_user_scram_credentials_response.go rename to vendor/github.com/IBM/sarama/describe_user_scram_credentials_response.go diff --git a/vendor/github.com/Shopify/sarama/dev.yml b/vendor/github.com/IBM/sarama/dev.yml similarity index 100% rename from vendor/github.com/Shopify/sarama/dev.yml rename to vendor/github.com/IBM/sarama/dev.yml diff --git a/vendor/github.com/Shopify/sarama/docker-compose.yml b/vendor/github.com/IBM/sarama/docker-compose.yml similarity index 96% rename from vendor/github.com/Shopify/sarama/docker-compose.yml rename to vendor/github.com/IBM/sarama/docker-compose.yml index e1119c87f..22ee21bf9 100644 --- a/vendor/github.com/Shopify/sarama/docker-compose.yml +++ b/vendor/github.com/IBM/sarama/docker-compose.yml @@ -40,7 +40,7 @@ services: dockerfile: Dockerfile.kafka restart: always environment: - KAFKA_VERSION: ${KAFKA_VERSION:-3.3.1} + KAFKA_VERSION: ${KAFKA_VERSION:-3.3.2} KAFKA_CFG_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29091' KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-1:9091,LISTENER_LOCAL://localhost:29091' @@ -62,7 +62,7 @@ services: dockerfile: Dockerfile.kafka restart: always environment: - KAFKA_VERSION: ${KAFKA_VERSION:-3.3.1} + KAFKA_VERSION: ${KAFKA_VERSION:-3.3.2} KAFKA_CFG_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29092' KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-2:9091,LISTENER_LOCAL://localhost:29092' @@ -84,7 +84,7 @@ services: dockerfile: Dockerfile.kafka restart: always environment: - KAFKA_VERSION: ${KAFKA_VERSION:-3.3.1} + KAFKA_VERSION: ${KAFKA_VERSION:-3.3.2} KAFKA_CFG_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29093' KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-3:9091,LISTENER_LOCAL://localhost:29093' @@ -106,7 +106,7 @@ services: dockerfile: Dockerfile.kafka restart: always environment: - KAFKA_VERSION: ${KAFKA_VERSION:-3.3.1} + KAFKA_VERSION: ${KAFKA_VERSION:-3.3.2} KAFKA_CFG_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29094' KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-4:9091,LISTENER_LOCAL://localhost:29094' @@ -128,7 +128,7 @@ services: dockerfile: Dockerfile.kafka restart: always environment: - KAFKA_VERSION: ${KAFKA_VERSION:-3.3.1} + KAFKA_VERSION: ${KAFKA_VERSION:-3.3.2} KAFKA_CFG_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29095' KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-5:9091,LISTENER_LOCAL://localhost:29095' diff --git a/vendor/github.com/Shopify/sarama/encoder_decoder.go b/vendor/github.com/IBM/sarama/encoder_decoder.go similarity index 100% rename from vendor/github.com/Shopify/sarama/encoder_decoder.go rename to vendor/github.com/IBM/sarama/encoder_decoder.go diff --git a/vendor/github.com/Shopify/sarama/end_txn_request.go b/vendor/github.com/IBM/sarama/end_txn_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/end_txn_request.go rename to vendor/github.com/IBM/sarama/end_txn_request.go diff --git a/vendor/github.com/Shopify/sarama/end_txn_response.go b/vendor/github.com/IBM/sarama/end_txn_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/end_txn_response.go rename to vendor/github.com/IBM/sarama/end_txn_response.go diff --git a/vendor/github.com/Shopify/sarama/entrypoint.sh b/vendor/github.com/IBM/sarama/entrypoint.sh similarity index 94% rename from vendor/github.com/Shopify/sarama/entrypoint.sh rename to vendor/github.com/IBM/sarama/entrypoint.sh index 8cd2efcb9..7b344fae8 100644 --- a/vendor/github.com/Shopify/sarama/entrypoint.sh +++ b/vendor/github.com/IBM/sarama/entrypoint.sh @@ -1,6 +1,6 @@ #!/bin/bash -KAFKA_VERSION="${KAFKA_VERSION:-3.3.1}" +KAFKA_VERSION="${KAFKA_VERSION:-3.3.2}" KAFKA_HOME="/opt/kafka-${KAFKA_VERSION}" if [ ! -d "${KAFKA_HOME}" ]; then diff --git a/vendor/github.com/Shopify/sarama/errors.go b/vendor/github.com/IBM/sarama/errors.go similarity index 99% rename from vendor/github.com/Shopify/sarama/errors.go rename to vendor/github.com/IBM/sarama/errors.go index 27977f166..8d1d16834 100644 --- a/vendor/github.com/Shopify/sarama/errors.go +++ b/vendor/github.com/IBM/sarama/errors.go @@ -79,7 +79,7 @@ var ErrTransactionNotReady = errors.New("transaction manager: transaction is not // ErrNonTransactedProducer when calling BeginTxn, CommitTxn or AbortTxn on a non transactional producer. var ErrNonTransactedProducer = errors.New("transaction manager: you need to add TransactionalID to producer") -// ErrTransitionNotAllowed when txnmgr state transiion is not valid. +// ErrTransitionNotAllowed when txnmgr state transition is not valid. var ErrTransitionNotAllowed = errors.New("transaction manager: invalid transition attempted") // ErrCannotTransitionNilError when transition is attempted with an nil error. @@ -89,7 +89,7 @@ var ErrCannotTransitionNilError = errors.New("transaction manager: cannot transi var ErrTxnUnableToParseResponse = errors.New("transaction manager: unable to parse response") // MultiErrorFormat specifies the formatter applied to format multierrors. The -// default implementation is a consensed version of the hashicorp/go-multierror +// default implementation is a condensed version of the hashicorp/go-multierror // default one var MultiErrorFormat multierror.ErrorFormatFunc = func(es []error) string { if len(es) == 1 { diff --git a/vendor/github.com/Shopify/sarama/fetch_request.go b/vendor/github.com/IBM/sarama/fetch_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/fetch_request.go rename to vendor/github.com/IBM/sarama/fetch_request.go diff --git a/vendor/github.com/Shopify/sarama/fetch_response.go b/vendor/github.com/IBM/sarama/fetch_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/fetch_response.go rename to vendor/github.com/IBM/sarama/fetch_response.go diff --git a/vendor/github.com/Shopify/sarama/find_coordinator_request.go b/vendor/github.com/IBM/sarama/find_coordinator_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/find_coordinator_request.go rename to vendor/github.com/IBM/sarama/find_coordinator_request.go diff --git a/vendor/github.com/Shopify/sarama/find_coordinator_response.go b/vendor/github.com/IBM/sarama/find_coordinator_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/find_coordinator_response.go rename to vendor/github.com/IBM/sarama/find_coordinator_response.go diff --git a/vendor/github.com/Shopify/sarama/gssapi_kerberos.go b/vendor/github.com/IBM/sarama/gssapi_kerberos.go similarity index 99% rename from vendor/github.com/Shopify/sarama/gssapi_kerberos.go rename to vendor/github.com/IBM/sarama/gssapi_kerberos.go index ab8b70196..8abbcdc38 100644 --- a/vendor/github.com/Shopify/sarama/gssapi_kerberos.go +++ b/vendor/github.com/IBM/sarama/gssapi_kerberos.go @@ -23,6 +23,7 @@ const ( GSS_API_GENERIC_TAG = 0x60 KRB5_USER_AUTH = 1 KRB5_KEYTAB_AUTH = 2 + KRB5_CCACHE_AUTH = 3 GSS_API_INITIAL = 1 GSS_API_VERIFY = 2 GSS_API_FINISH = 3 @@ -31,6 +32,7 @@ const ( type GSSAPIConfig struct { AuthType int KeyTabPath string + CCachePath string KerberosConfigPath string ServiceName string Username string diff --git a/vendor/github.com/Shopify/sarama/heartbeat_request.go b/vendor/github.com/IBM/sarama/heartbeat_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/heartbeat_request.go rename to vendor/github.com/IBM/sarama/heartbeat_request.go diff --git a/vendor/github.com/Shopify/sarama/heartbeat_response.go b/vendor/github.com/IBM/sarama/heartbeat_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/heartbeat_response.go rename to vendor/github.com/IBM/sarama/heartbeat_response.go diff --git a/vendor/github.com/Shopify/sarama/incremental_alter_configs_request.go b/vendor/github.com/IBM/sarama/incremental_alter_configs_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/incremental_alter_configs_request.go rename to vendor/github.com/IBM/sarama/incremental_alter_configs_request.go diff --git a/vendor/github.com/Shopify/sarama/incremental_alter_configs_response.go b/vendor/github.com/IBM/sarama/incremental_alter_configs_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/incremental_alter_configs_response.go rename to vendor/github.com/IBM/sarama/incremental_alter_configs_response.go diff --git a/vendor/github.com/Shopify/sarama/init_producer_id_request.go b/vendor/github.com/IBM/sarama/init_producer_id_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/init_producer_id_request.go rename to vendor/github.com/IBM/sarama/init_producer_id_request.go diff --git a/vendor/github.com/Shopify/sarama/init_producer_id_response.go b/vendor/github.com/IBM/sarama/init_producer_id_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/init_producer_id_response.go rename to vendor/github.com/IBM/sarama/init_producer_id_response.go diff --git a/vendor/github.com/Shopify/sarama/interceptors.go b/vendor/github.com/IBM/sarama/interceptors.go similarity index 100% rename from vendor/github.com/Shopify/sarama/interceptors.go rename to vendor/github.com/IBM/sarama/interceptors.go diff --git a/vendor/github.com/Shopify/sarama/join_group_request.go b/vendor/github.com/IBM/sarama/join_group_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/join_group_request.go rename to vendor/github.com/IBM/sarama/join_group_request.go diff --git a/vendor/github.com/Shopify/sarama/join_group_response.go b/vendor/github.com/IBM/sarama/join_group_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/join_group_response.go rename to vendor/github.com/IBM/sarama/join_group_response.go diff --git a/vendor/github.com/Shopify/sarama/kerberos_client.go b/vendor/github.com/IBM/sarama/kerberos_client.go similarity index 79% rename from vendor/github.com/Shopify/sarama/kerberos_client.go rename to vendor/github.com/IBM/sarama/kerberos_client.go index 01a53193b..289126879 100644 --- a/vendor/github.com/Shopify/sarama/kerberos_client.go +++ b/vendor/github.com/IBM/sarama/kerberos_client.go @@ -3,6 +3,7 @@ package sarama import ( krb5client "github.com/jcmturner/gokrb5/v8/client" krb5config "github.com/jcmturner/gokrb5/v8/config" + "github.com/jcmturner/gokrb5/v8/credentials" "github.com/jcmturner/gokrb5/v8/keytab" "github.com/jcmturner/gokrb5/v8/types" ) @@ -32,13 +33,23 @@ func NewKerberosClient(config *GSSAPIConfig) (KerberosClient, error) { func createClient(config *GSSAPIConfig, cfg *krb5config.Config) (KerberosClient, error) { var client *krb5client.Client - if config.AuthType == KRB5_KEYTAB_AUTH { + switch config.AuthType { + case KRB5_KEYTAB_AUTH: kt, err := keytab.Load(config.KeyTabPath) if err != nil { return nil, err } client = krb5client.NewWithKeytab(config.Username, config.Realm, kt, cfg, krb5client.DisablePAFXFAST(config.DisablePAFXFAST)) - } else { + case KRB5_CCACHE_AUTH: + cc, err := credentials.LoadCCache(config.CCachePath) + if err != nil { + return nil, err + } + client, err = krb5client.NewFromCCache(cc, cfg, krb5client.DisablePAFXFAST(config.DisablePAFXFAST)) + if err != nil { + return nil, err + } + default: client = krb5client.NewWithPassword(config.Username, config.Realm, config.Password, cfg, krb5client.DisablePAFXFAST(config.DisablePAFXFAST)) } diff --git a/vendor/github.com/Shopify/sarama/leave_group_request.go b/vendor/github.com/IBM/sarama/leave_group_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/leave_group_request.go rename to vendor/github.com/IBM/sarama/leave_group_request.go diff --git a/vendor/github.com/Shopify/sarama/leave_group_response.go b/vendor/github.com/IBM/sarama/leave_group_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/leave_group_response.go rename to vendor/github.com/IBM/sarama/leave_group_response.go diff --git a/vendor/github.com/Shopify/sarama/length_field.go b/vendor/github.com/IBM/sarama/length_field.go similarity index 100% rename from vendor/github.com/Shopify/sarama/length_field.go rename to vendor/github.com/IBM/sarama/length_field.go diff --git a/vendor/github.com/Shopify/sarama/list_groups_request.go b/vendor/github.com/IBM/sarama/list_groups_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/list_groups_request.go rename to vendor/github.com/IBM/sarama/list_groups_request.go diff --git a/vendor/github.com/Shopify/sarama/list_groups_response.go b/vendor/github.com/IBM/sarama/list_groups_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/list_groups_response.go rename to vendor/github.com/IBM/sarama/list_groups_response.go diff --git a/vendor/github.com/Shopify/sarama/list_partition_reassignments_request.go b/vendor/github.com/IBM/sarama/list_partition_reassignments_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/list_partition_reassignments_request.go rename to vendor/github.com/IBM/sarama/list_partition_reassignments_request.go diff --git a/vendor/github.com/Shopify/sarama/list_partition_reassignments_response.go b/vendor/github.com/IBM/sarama/list_partition_reassignments_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/list_partition_reassignments_response.go rename to vendor/github.com/IBM/sarama/list_partition_reassignments_response.go diff --git a/vendor/github.com/Shopify/sarama/message.go b/vendor/github.com/IBM/sarama/message.go similarity index 100% rename from vendor/github.com/Shopify/sarama/message.go rename to vendor/github.com/IBM/sarama/message.go diff --git a/vendor/github.com/Shopify/sarama/message_set.go b/vendor/github.com/IBM/sarama/message_set.go similarity index 100% rename from vendor/github.com/Shopify/sarama/message_set.go rename to vendor/github.com/IBM/sarama/message_set.go diff --git a/vendor/github.com/Shopify/sarama/metadata_request.go b/vendor/github.com/IBM/sarama/metadata_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/metadata_request.go rename to vendor/github.com/IBM/sarama/metadata_request.go diff --git a/vendor/github.com/Shopify/sarama/metadata_response.go b/vendor/github.com/IBM/sarama/metadata_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/metadata_response.go rename to vendor/github.com/IBM/sarama/metadata_response.go diff --git a/vendor/github.com/Shopify/sarama/metrics.go b/vendor/github.com/IBM/sarama/metrics.go similarity index 100% rename from vendor/github.com/Shopify/sarama/metrics.go rename to vendor/github.com/IBM/sarama/metrics.go diff --git a/vendor/github.com/Shopify/sarama/mockbroker.go b/vendor/github.com/IBM/sarama/mockbroker.go similarity index 100% rename from vendor/github.com/Shopify/sarama/mockbroker.go rename to vendor/github.com/IBM/sarama/mockbroker.go diff --git a/vendor/github.com/Shopify/sarama/mockkerberos.go b/vendor/github.com/IBM/sarama/mockkerberos.go similarity index 100% rename from vendor/github.com/Shopify/sarama/mockkerberos.go rename to vendor/github.com/IBM/sarama/mockkerberos.go diff --git a/vendor/github.com/Shopify/sarama/mockresponses.go b/vendor/github.com/IBM/sarama/mockresponses.go similarity index 100% rename from vendor/github.com/Shopify/sarama/mockresponses.go rename to vendor/github.com/IBM/sarama/mockresponses.go diff --git a/vendor/github.com/Shopify/sarama/offset_commit_request.go b/vendor/github.com/IBM/sarama/offset_commit_request.go similarity index 95% rename from vendor/github.com/Shopify/sarama/offset_commit_request.go rename to vendor/github.com/IBM/sarama/offset_commit_request.go index 5dd88220d..ed0566fe6 100644 --- a/vendor/github.com/Shopify/sarama/offset_commit_request.go +++ b/vendor/github.com/IBM/sarama/offset_commit_request.go @@ -220,7 +220,11 @@ func (r *OffsetCommitRequest) requiredVersion() KafkaVersion { } } -func (r *OffsetCommitRequest) AddBlock(topic string, partitionID int32, offset int64, leaderEpoch int32, timestamp int64, metadata string) { +func (r *OffsetCommitRequest) AddBlock(topic string, partitionID int32, offset int64, timestamp int64, metadata string) { + r.AddBlockWithLeaderEpoch(topic, partitionID, offset, 0, timestamp, metadata) +} + +func (r *OffsetCommitRequest) AddBlockWithLeaderEpoch(topic string, partitionID int32, offset int64, leaderEpoch int32, timestamp int64, metadata string) { if r.blocks == nil { r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock) } diff --git a/vendor/github.com/Shopify/sarama/offset_commit_response.go b/vendor/github.com/IBM/sarama/offset_commit_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/offset_commit_response.go rename to vendor/github.com/IBM/sarama/offset_commit_response.go diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_request.go b/vendor/github.com/IBM/sarama/offset_fetch_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/offset_fetch_request.go rename to vendor/github.com/IBM/sarama/offset_fetch_request.go diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_response.go b/vendor/github.com/IBM/sarama/offset_fetch_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/offset_fetch_response.go rename to vendor/github.com/IBM/sarama/offset_fetch_response.go diff --git a/vendor/github.com/Shopify/sarama/offset_manager.go b/vendor/github.com/IBM/sarama/offset_manager.go similarity index 98% rename from vendor/github.com/Shopify/sarama/offset_manager.go rename to vendor/github.com/IBM/sarama/offset_manager.go index 1ea15ff93..9b7960599 100644 --- a/vendor/github.com/Shopify/sarama/offset_manager.go +++ b/vendor/github.com/IBM/sarama/offset_manager.go @@ -304,7 +304,7 @@ func (om *offsetManager) constructRequest() *OffsetCommitRequest { for _, pom := range topicManagers { pom.lock.Lock() if pom.dirty { - r.AddBlock(pom.topic, pom.partition, pom.offset, pom.leaderEpoch, perPartitionTimestamp, pom.metadata) + r.AddBlockWithLeaderEpoch(pom.topic, pom.partition, pom.offset, pom.leaderEpoch, perPartitionTimestamp, pom.metadata) } pom.lock.Unlock() } @@ -359,13 +359,13 @@ func (om *offsetManager) handleResponse(broker *Broker, req *OffsetCommitRequest // nothing wrong but we didn't commit, we'll get it next time round case ErrFencedInstancedId: pom.handleError(err) - // TODO close the whole consumer for instacne fenced.... + // TODO close the whole consumer for instance fenced.... om.tryCancelSession() case ErrUnknownTopicOrPartition: // let the user know *and* try redispatching - if topic-auto-create is // enabled, redispatching should trigger a metadata req and create the // topic; if not then re-dispatching won't help, but we've let the user - // know and it shouldn't hurt either (see https://github.com/Shopify/sarama/issues/706) + // know and it shouldn't hurt either (see https://github.com/IBM/sarama/issues/706) fallthrough default: // dunno, tell the user and try redispatching diff --git a/vendor/github.com/Shopify/sarama/offset_request.go b/vendor/github.com/IBM/sarama/offset_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/offset_request.go rename to vendor/github.com/IBM/sarama/offset_request.go diff --git a/vendor/github.com/Shopify/sarama/offset_response.go b/vendor/github.com/IBM/sarama/offset_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/offset_response.go rename to vendor/github.com/IBM/sarama/offset_response.go diff --git a/vendor/github.com/Shopify/sarama/packet_decoder.go b/vendor/github.com/IBM/sarama/packet_decoder.go similarity index 98% rename from vendor/github.com/Shopify/sarama/packet_decoder.go rename to vendor/github.com/IBM/sarama/packet_decoder.go index b8cae5350..526e0f42f 100644 --- a/vendor/github.com/Shopify/sarama/packet_decoder.go +++ b/vendor/github.com/IBM/sarama/packet_decoder.go @@ -55,7 +55,7 @@ type pushDecoder interface { // Saves the offset into the input buffer as the location to actually read the calculated value when able. saveOffset(in int) - // Returns the length of data to reserve for the input of this encoder (eg 4 bytes for a CRC32). + // Returns the length of data to reserve for the input of this encoder (e.g. 4 bytes for a CRC32). reserveLength() int // Indicates that all required data is now available to calculate and check the field. diff --git a/vendor/github.com/Shopify/sarama/packet_encoder.go b/vendor/github.com/IBM/sarama/packet_encoder.go similarity index 100% rename from vendor/github.com/Shopify/sarama/packet_encoder.go rename to vendor/github.com/IBM/sarama/packet_encoder.go diff --git a/vendor/github.com/Shopify/sarama/partitioner.go b/vendor/github.com/IBM/sarama/partitioner.go similarity index 100% rename from vendor/github.com/Shopify/sarama/partitioner.go rename to vendor/github.com/IBM/sarama/partitioner.go diff --git a/vendor/github.com/Shopify/sarama/prep_encoder.go b/vendor/github.com/IBM/sarama/prep_encoder.go similarity index 100% rename from vendor/github.com/Shopify/sarama/prep_encoder.go rename to vendor/github.com/IBM/sarama/prep_encoder.go diff --git a/vendor/github.com/Shopify/sarama/produce_request.go b/vendor/github.com/IBM/sarama/produce_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/produce_request.go rename to vendor/github.com/IBM/sarama/produce_request.go diff --git a/vendor/github.com/Shopify/sarama/produce_response.go b/vendor/github.com/IBM/sarama/produce_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/produce_response.go rename to vendor/github.com/IBM/sarama/produce_response.go diff --git a/vendor/github.com/Shopify/sarama/produce_set.go b/vendor/github.com/IBM/sarama/produce_set.go similarity index 100% rename from vendor/github.com/Shopify/sarama/produce_set.go rename to vendor/github.com/IBM/sarama/produce_set.go diff --git a/vendor/github.com/Shopify/sarama/quota_types.go b/vendor/github.com/IBM/sarama/quota_types.go similarity index 100% rename from vendor/github.com/Shopify/sarama/quota_types.go rename to vendor/github.com/IBM/sarama/quota_types.go diff --git a/vendor/github.com/Shopify/sarama/real_decoder.go b/vendor/github.com/IBM/sarama/real_decoder.go similarity index 100% rename from vendor/github.com/Shopify/sarama/real_decoder.go rename to vendor/github.com/IBM/sarama/real_decoder.go diff --git a/vendor/github.com/Shopify/sarama/real_encoder.go b/vendor/github.com/IBM/sarama/real_encoder.go similarity index 100% rename from vendor/github.com/Shopify/sarama/real_encoder.go rename to vendor/github.com/IBM/sarama/real_encoder.go diff --git a/vendor/github.com/Shopify/sarama/record.go b/vendor/github.com/IBM/sarama/record.go similarity index 100% rename from vendor/github.com/Shopify/sarama/record.go rename to vendor/github.com/IBM/sarama/record.go diff --git a/vendor/github.com/Shopify/sarama/record_batch.go b/vendor/github.com/IBM/sarama/record_batch.go similarity index 100% rename from vendor/github.com/Shopify/sarama/record_batch.go rename to vendor/github.com/IBM/sarama/record_batch.go diff --git a/vendor/github.com/Shopify/sarama/records.go b/vendor/github.com/IBM/sarama/records.go similarity index 100% rename from vendor/github.com/Shopify/sarama/records.go rename to vendor/github.com/IBM/sarama/records.go diff --git a/vendor/github.com/Shopify/sarama/request.go b/vendor/github.com/IBM/sarama/request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/request.go rename to vendor/github.com/IBM/sarama/request.go diff --git a/vendor/github.com/Shopify/sarama/response_header.go b/vendor/github.com/IBM/sarama/response_header.go similarity index 100% rename from vendor/github.com/Shopify/sarama/response_header.go rename to vendor/github.com/IBM/sarama/response_header.go diff --git a/vendor/github.com/Shopify/sarama/sarama.go b/vendor/github.com/IBM/sarama/sarama.go similarity index 99% rename from vendor/github.com/Shopify/sarama/sarama.go rename to vendor/github.com/IBM/sarama/sarama.go index a42bc075a..4d5f60a66 100644 --- a/vendor/github.com/Shopify/sarama/sarama.go +++ b/vendor/github.com/IBM/sarama/sarama.go @@ -91,7 +91,7 @@ import ( var ( // Logger is the instance of a StdLogger interface that Sarama writes connection - // management events to. By default it is set to discard all log messages via ioutil.Discard, + // management events to. By default it is set to discard all log messages via io.Discard, // but you can set it to redirect wherever you want. Logger StdLogger = log.New(io.Discard, "[Sarama] ", log.LstdFlags) diff --git a/vendor/github.com/Shopify/sarama/sasl_authenticate_request.go b/vendor/github.com/IBM/sarama/sasl_authenticate_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/sasl_authenticate_request.go rename to vendor/github.com/IBM/sarama/sasl_authenticate_request.go diff --git a/vendor/github.com/Shopify/sarama/sasl_authenticate_response.go b/vendor/github.com/IBM/sarama/sasl_authenticate_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/sasl_authenticate_response.go rename to vendor/github.com/IBM/sarama/sasl_authenticate_response.go diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_request.go b/vendor/github.com/IBM/sarama/sasl_handshake_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/sasl_handshake_request.go rename to vendor/github.com/IBM/sarama/sasl_handshake_request.go diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_response.go b/vendor/github.com/IBM/sarama/sasl_handshake_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/sasl_handshake_response.go rename to vendor/github.com/IBM/sarama/sasl_handshake_response.go diff --git a/vendor/github.com/Shopify/sarama/scram_formatter.go b/vendor/github.com/IBM/sarama/scram_formatter.go similarity index 100% rename from vendor/github.com/Shopify/sarama/scram_formatter.go rename to vendor/github.com/IBM/sarama/scram_formatter.go diff --git a/vendor/github.com/Shopify/sarama/sticky_assignor_user_data.go b/vendor/github.com/IBM/sarama/sticky_assignor_user_data.go similarity index 100% rename from vendor/github.com/Shopify/sarama/sticky_assignor_user_data.go rename to vendor/github.com/IBM/sarama/sticky_assignor_user_data.go diff --git a/vendor/github.com/Shopify/sarama/sync_group_request.go b/vendor/github.com/IBM/sarama/sync_group_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/sync_group_request.go rename to vendor/github.com/IBM/sarama/sync_group_request.go diff --git a/vendor/github.com/Shopify/sarama/sync_group_response.go b/vendor/github.com/IBM/sarama/sync_group_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/sync_group_response.go rename to vendor/github.com/IBM/sarama/sync_group_response.go diff --git a/vendor/github.com/Shopify/sarama/sync_producer.go b/vendor/github.com/IBM/sarama/sync_producer.go similarity index 98% rename from vendor/github.com/Shopify/sarama/sync_producer.go rename to vendor/github.com/IBM/sarama/sync_producer.go index 8765ac336..3119baa6d 100644 --- a/vendor/github.com/Shopify/sarama/sync_producer.go +++ b/vendor/github.com/IBM/sarama/sync_producer.go @@ -33,7 +33,7 @@ type SyncProducer interface { // TxnStatus return current producer transaction status. TxnStatus() ProducerTxnStatusFlag - // IsTransactional return true when current producer is is transactional. + // IsTransactional return true when current producer is transactional. IsTransactional() bool // BeginTxn mark current transaction as ready. diff --git a/vendor/github.com/Shopify/sarama/timestamp.go b/vendor/github.com/IBM/sarama/timestamp.go similarity index 100% rename from vendor/github.com/Shopify/sarama/timestamp.go rename to vendor/github.com/IBM/sarama/timestamp.go diff --git a/vendor/github.com/Shopify/sarama/transaction_manager.go b/vendor/github.com/IBM/sarama/transaction_manager.go similarity index 99% rename from vendor/github.com/Shopify/sarama/transaction_manager.go rename to vendor/github.com/IBM/sarama/transaction_manager.go index e18abecd3..e1bcda3f9 100644 --- a/vendor/github.com/Shopify/sarama/transaction_manager.go +++ b/vendor/github.com/IBM/sarama/transaction_manager.go @@ -14,7 +14,7 @@ type ProducerTxnStatusFlag int16 const ( // ProducerTxnFlagUninitialized when txnmgr is created ProducerTxnFlagUninitialized ProducerTxnStatusFlag = 1 << iota - // ProducerTxnFlagInitializing when txnmgr is initilizing + // ProducerTxnFlagInitializing when txnmgr is initializing ProducerTxnFlagInitializing // ProducerTxnFlagReady when is ready to receive transaction ProducerTxnFlagReady @@ -22,7 +22,7 @@ const ( ProducerTxnFlagInTransaction // ProducerTxnFlagEndTransaction when transaction will be committed ProducerTxnFlagEndTransaction - // ProducerTxnFlagInError whan having abortable or fatal error + // ProducerTxnFlagInError when having abortable or fatal error ProducerTxnFlagInError // ProducerTxnFlagCommittingTransaction when committing txn ProducerTxnFlagCommittingTransaction @@ -117,13 +117,13 @@ var producerTxnTransitions = map[ProducerTxnStatusFlag][]ProducerTxnStatusFlag{ ProducerTxnFlagReady, ProducerTxnFlagInError, }, - // When we need are initilizing + // When we need are initializing ProducerTxnFlagInitializing: { ProducerTxnFlagInitializing, ProducerTxnFlagReady, ProducerTxnFlagInError, }, - // When we have initilized transactional producer + // When we have initialized transactional producer ProducerTxnFlagReady: { ProducerTxnFlagInTransaction, }, @@ -660,7 +660,7 @@ func (t *transactionManager) finishTransaction(commit bool) error { t.mutex.Lock() defer t.mutex.Unlock() - // Ensure no error when committing or abording + // Ensure no error when committing or aborting if commit && t.currentTxnStatus()&ProducerTxnFlagInError != 0 { return t.lastError } else if !commit && t.currentTxnStatus()&ProducerTxnFlagFatalError != 0 { diff --git a/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go b/vendor/github.com/IBM/sarama/txn_offset_commit_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/txn_offset_commit_request.go rename to vendor/github.com/IBM/sarama/txn_offset_commit_request.go diff --git a/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go b/vendor/github.com/IBM/sarama/txn_offset_commit_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/txn_offset_commit_response.go rename to vendor/github.com/IBM/sarama/txn_offset_commit_response.go diff --git a/vendor/github.com/Shopify/sarama/utils.go b/vendor/github.com/IBM/sarama/utils.go similarity index 98% rename from vendor/github.com/Shopify/sarama/utils.go rename to vendor/github.com/IBM/sarama/utils.go index 819b6597c..4526543d6 100644 --- a/vendor/github.com/Shopify/sarama/utils.go +++ b/vendor/github.com/IBM/sarama/utils.go @@ -193,6 +193,7 @@ var ( V3_2_3_0 = newKafkaVersion(3, 2, 3, 0) V3_3_0_0 = newKafkaVersion(3, 3, 0, 0) V3_3_1_0 = newKafkaVersion(3, 3, 1, 0) + V3_3_2_0 = newKafkaVersion(3, 3, 2, 0) SupportedVersions = []KafkaVersion{ V0_8_2_0, @@ -248,9 +249,10 @@ var ( V3_2_3_0, V3_3_0_0, V3_3_1_0, + V3_3_2_0, } MinVersion = V0_8_2_0 - MaxVersion = V3_3_1_0 + MaxVersion = V3_3_2_0 DefaultVersion = V1_0_0_0 // reduced set of versions to matrix test @@ -266,7 +268,7 @@ var ( V2_8_2_0, V3_1_2_0, V3_2_3_0, - V3_3_1_0, + V3_3_2_0, } ) diff --git a/vendor/github.com/Shopify/sarama/version.go b/vendor/github.com/IBM/sarama/version.go similarity index 100% rename from vendor/github.com/Shopify/sarama/version.go rename to vendor/github.com/IBM/sarama/version.go diff --git a/vendor/github.com/Shopify/sarama/zstd.go b/vendor/github.com/IBM/sarama/zstd.go similarity index 100% rename from vendor/github.com/Shopify/sarama/zstd.go rename to vendor/github.com/IBM/sarama/zstd.go diff --git a/vendor/github.com/Shopify/sarama/CHANGELOG.md b/vendor/github.com/Shopify/sarama/CHANGELOG.md deleted file mode 100644 index c2f92ec9a..000000000 --- a/vendor/github.com/Shopify/sarama/CHANGELOG.md +++ /dev/null @@ -1,1187 +0,0 @@ -# Changelog - -## Version 1.31.1 (2022-02-01) - -- #2126 - @bai - Populate missing kafka versions -- #2124 - @bai - Add Kafka 3.1.0 to CI matrix, migrate to bitnami kafka image -- #2123 - @bai - Update klauspost/compress to 0.14 -- #2122 - @dnwe - fix(test): make it simpler to re-use toxiproxy -- #2119 - @bai - Add Kafka 3.1.0 version number -- #2005 - @raulnegreiros - feat: add methods to pause/resume consumer's consumption -- #2051 - @seveas - Expose the TLS connection state of a broker connection -- #2117 - @wuhuizuo - feat: add method MockApiVersionsResponse.SetApiKeys -- #2110 - @dnwe - fix: ensure heartbeats only stop after cleanup -- #2113 - @mosceo - Fix typo - -## Version 1.31.0 (2022-01-18) - -## What's Changed -### :tada: New Features / Improvements -* feat: expose IncrementalAlterConfigs API in admin.go by @fengyinqiao in https://github.com/Shopify/sarama/pull/2088 -* feat: allow AsyncProducer to have MaxOpenRequests inflight produce requests per broker by @xujianhai666 in https://github.com/Shopify/sarama/pull/1686 -* Support request pipelining in AsyncProducer by @slaunay in https://github.com/Shopify/sarama/pull/2094 -### :bug: Fixes -* fix(test): add fluent interface for mocks where missing by @grongor in https://github.com/Shopify/sarama/pull/2080 -* fix(test): test for ConsumePartition with OffsetOldest by @grongor in https://github.com/Shopify/sarama/pull/2081 -* fix: set HWMO during creation of partitionConsumer (fix incorrect HWMO before first fetch) by @grongor in https://github.com/Shopify/sarama/pull/2082 -* fix: ignore non-nil but empty error strings in Describe/Alter client quotas responses by @agriffaut in https://github.com/Shopify/sarama/pull/2096 -* fix: skip over KIP-482 tagged fields by @dnwe in https://github.com/Shopify/sarama/pull/2107 -* fix: clear preferredReadReplica if broker shutdown by @dnwe in https://github.com/Shopify/sarama/pull/2108 -* fix(test): correct wrong offsets in mock Consumer by @grongor in https://github.com/Shopify/sarama/pull/2078 -* fix: correct bugs in DescribeGroupsResponse by @dnwe in https://github.com/Shopify/sarama/pull/2111 -### :wrench: Maintenance -* chore: bump runtime and test dependencies by @dnwe in https://github.com/Shopify/sarama/pull/2100 -### :memo: Documentation -* docs: refresh README.md for Kafka 3.0.0 by @dnwe in https://github.com/Shopify/sarama/pull/2099 -### :heavy_plus_sign: Other Changes -* Fix typo by @mosceo in https://github.com/Shopify/sarama/pull/2084 - -## New Contributors -* @grongor made their first contribution in https://github.com/Shopify/sarama/pull/2080 -* @fengyinqiao made their first contribution in https://github.com/Shopify/sarama/pull/2088 -* @xujianhai666 made their first contribution in https://github.com/Shopify/sarama/pull/1686 -* @mosceo made their first contribution in https://github.com/Shopify/sarama/pull/2084 - -**Full Changelog**: https://github.com/Shopify/sarama/compare/v1.30.1...v1.31.0 - -## Version 1.30.1 (2021-12-04) - -## What's Changed -### :tada: New Features / Improvements -* feat(zstd): pass level param through to compress/zstd encoder by @lizthegrey in https://github.com/Shopify/sarama/pull/2045 -### :bug: Fixes -* fix: set min-go-version to 1.16 by @troyanov in https://github.com/Shopify/sarama/pull/2048 -* logger: fix debug logs' formatting directives by @utrack in https://github.com/Shopify/sarama/pull/2054 -* fix: stuck on the batch with zero records length by @pachmu in https://github.com/Shopify/sarama/pull/2057 -* fix: only update preferredReadReplica if valid by @dnwe in https://github.com/Shopify/sarama/pull/2076 -### :wrench: Maintenance -* chore: add release notes configuration by @dnwe in https://github.com/Shopify/sarama/pull/2046 -* chore: confluent platform version bump by @lizthegrey in https://github.com/Shopify/sarama/pull/2070 - -## Notes -* ℹ️ from Sarama 1.30.x onward the minimum version of Go toolchain required is 1.16.x - -## New Contributors -* @troyanov made their first contribution in https://github.com/Shopify/sarama/pull/2048 -* @lizthegrey made their first contribution in https://github.com/Shopify/sarama/pull/2045 -* @utrack made their first contribution in https://github.com/Shopify/sarama/pull/2054 -* @pachmu made their first contribution in https://github.com/Shopify/sarama/pull/2057 - -**Full Changelog**: https://github.com/Shopify/sarama/compare/v1.30.0...v1.30.1 - -## Version 1.30.0 (2021-09-29) - -⚠️ This release has been superseded by v1.30.1 and should _not_ be used. - -**regression**: enabling rackawareness causes severe throughput drops (#2071) — fixed in v1.30.1 via #2076 - ---- - -ℹ️ **Note: from Sarama 1.30.0 the minimum version of Go toolchain required is 1.16.x** - ---- - -# New Features / Improvements - -- #1983 - @zifengyu - allow configure AllowAutoTopicCreation argument in metadata refresh -- #2000 - @matzew - Using xdg-go module for SCRAM -- #2003 - @gdm85 - feat: add counter metrics for consumer group join/sync and their failures -- #1992 - @zhaomoran - feat: support SaslHandshakeRequest v0 for SCRAM -- #2006 - @faillefer - Add support for DeleteOffsets operation -- #1909 - @agriffaut - KIP-546 Client quota APIs -- #1633 - @aldelucca1 - feat: allow balance strategies to provide initial state -- #1275 - @dnwe - log: add a DebugLogger that proxies to Logger -- #2018 - @dnwe - feat: use DebugLogger reference for goldenpath log -- #2019 - @dnwe - feat: add logging & a metric for producer throttle -- #2023 - @dnwe - feat: add Controller() to ClusterAdmin interface -- #2025 - @dnwe - feat: support ApiVersionsRequest V3 protocol -- #2028 - @dnwe - feat: send ApiVersionsRequest on broker open -- #2034 - @bai - Add support for kafka 3.0.0 - -# Fixes - -- #1990 - @doxsch - fix: correctly pass ValidateOnly through to CreatePartitionsRequest -- #1988 - @LubergAlexander - fix: correct WithCustomFallbackPartitioner implementation -- #2001 - @HurSungYun - docs: inform AsyncProducer Close pitfalls -- #1973 - @qiangmzsx - fix: metrics still taking up too much memory when metrics.UseNilMetrics=true -- #2007 - @bai - Add support for Go 1.17 -- #2009 - @dnwe - fix: enable nilerr linter and fix iferr checks -- #2010 - @dnwe - chore: enable exportloopref and misspell linters -- #2013 - @faillefer - fix(test): disable encoded response/request check when map contains multiple elements -- #2015 - @bai - Change default branch to main -- #1718 - @crivera-fastly - fix: correct the error handling in client.InitProducerID() -- #1984 - @null-sleep - fix(test): bump confluentPlatformVersion from 6.1.1 to 6.2.0 -- #2016 - @dnwe - chore: replace deprecated Go calls -- #2017 - @dnwe - chore: delete legacy vagrant script -- #2020 - @dnwe - fix(test): remove testLogger from TrackLeader test -- #2024 - @dnwe - chore: bump toxiproxy container to v2.1.5 -- #2033 - @bai - Update dependencies -- #2031 - @gdm85 - docs: do not mention buffered messages in sync producer Close method -- #2035 - @dnwe - chore: populate the missing kafka versions -- #2038 - @dnwe - feat: add a fuzzing workflow to github actions - -## New Contributors -* @zifengyu made their first contribution in https://github.com/Shopify/sarama/pull/1983 -* @doxsch made their first contribution in https://github.com/Shopify/sarama/pull/1990 -* @LubergAlexander made their first contribution in https://github.com/Shopify/sarama/pull/1988 -* @HurSungYun made their first contribution in https://github.com/Shopify/sarama/pull/2001 -* @gdm85 made their first contribution in https://github.com/Shopify/sarama/pull/2003 -* @qiangmzsx made their first contribution in https://github.com/Shopify/sarama/pull/1973 -* @zhaomoran made their first contribution in https://github.com/Shopify/sarama/pull/1992 -* @faillefer made their first contribution in https://github.com/Shopify/sarama/pull/2006 -* @crivera-fastly made their first contribution in https://github.com/Shopify/sarama/pull/1718 -* @null-sleep made their first contribution in https://github.com/Shopify/sarama/pull/1984 - -**Full Changelog**: https://github.com/Shopify/sarama/compare/v1.29.1...v1.30.0 - -## Version 1.29.1 (2021-06-24) - -# New Features / Improvements - -- #1966 - @ajanikow - KIP-339: Add Incremental Config updates API -- #1964 - @ajanikow - Add DelegationToken ResourceType - -# Fixes - -- #1962 - @hanxiaolin - fix(consumer): call interceptors when MaxProcessingTime expire -- #1971 - @KerryJava - fix kafka-producer-performance throughput panic -- #1968 - @dnwe - chore: bump golang.org/x versions -- #1956 - @joewreschnig - Allow checking the entire `ProducerMessage` in the mock producers -- #1963 - @dnwe - fix: ensure backoff timer is re-used -- #1949 - @dnwe - fix: explicitly use uint64 for payload length - -## Version 1.29.0 (2021-05-07) - -### New Features / Improvements - -- #1917 - @arkady-emelyanov - KIP-554: Add Broker-side SCRAM Config API -- #1869 - @wyndhblb - zstd: encode+decode performance improvements -- #1541 - @izolight - add String, (Un)MarshalText for acl types. -- #1921 - @bai - Add support for Kafka 2.8.0 - -### Fixes -- #1936 - @dnwe - fix(consumer): follow preferred broker -- #1933 - @ozzieba - Use gofork for encoding/asn1 to fix ASN errors during Kerberos authentication -- #1929 - @celrenheit - Handle isolation level in Offset(Request|Response) and require stable offset in FetchOffset(Request|Response) -- #1926 - @dnwe - fix: correct initial CodeQL findings -- #1925 - @bai - Test out CodeQL -- #1923 - @bestgopher - Remove redundant switch-case, fix doc typos -- #1922 - @bai - Update go dependencies -- #1898 - @mmaslankaprv - Parsing only known control batches value -- #1887 - @withshubh - Fix: issues affecting code quality - -## Version 1.28.0 (2021-02-15) - -**Note that with this release we change `RoundRobinBalancer` strategy to match Java client behavior. See #1788 for details.** - -- #1870 - @kvch - Update Kerberos library to latest major -- #1876 - @bai - Update docs, reference pkg.go.dev -- #1846 - @wclaeys - Do not ignore Consumer.Offsets.AutoCommit.Enable config on Close -- #1747 - @XSAM - fix: mock sync producer does not handle the offset while sending messages -- #1863 - @bai - Add support for Kafka 2.7.0 + update lz4 and klauspost/compress dependencies -- #1788 - @kzinglzy - feat[balance_strategy]: announcing a new round robin balance strategy -- #1862 - @bai - Fix CI setenv permissions issues -- #1832 - @ilyakaznacheev - Update Godoc link to pkg.go.dev -- #1822 - @danp - KIP-392: Allow consumers to fetch from closest replica - -## Version 1.27.2 (2020-10-21) - -### Improvements - -#1750 - @krantideep95 Adds missing mock responses for mocking consumer group - -## Fixes - -#1817 - reverts #1785 - Add private method to Client interface to prevent implementation - -## Version 1.27.1 (2020-10-07) - -### Improvements - -#1775 - @d1egoaz - Adds a Producer Interceptor example -#1781 - @justin-chen - Refresh brokers given list of seed brokers -#1784 - @justin-chen - Add randomize seed broker method -#1790 - @d1egoaz - remove example binary -#1798 - @bai - Test against Go 1.15 -#1785 - @justin-chen - Add private method to Client interface to prevent implementation -#1802 - @uvw - Support Go 1.13 error unwrapping - -## Fixes - -#1791 - @stanislavkozlovski - bump default version to 1.0.0 - -## Version 1.27.0 (2020-08-11) - -### Improvements - -#1466 - @rubenvp8510 - Expose kerberos fast negotiation configuration -#1695 - @KJTsanaktsidis - Use docker-compose to run the functional tests -#1699 - @wclaeys - Consumer group support for manually comitting offsets -#1714 - @bai - Bump Go to version 1.14.3, golangci-lint to 1.27.0 -#1726 - @d1egoaz - Include zstd on the functional tests -#1730 - @d1egoaz - KIP-42 Add producer and consumer interceptors -#1738 - @varun06 - fixed variable names that are named same as some std lib package names -#1741 - @varun06 - updated zstd dependency to latest v1.10.10 -#1743 - @varun06 - Fixed declaration dependencies and other lint issues in code base -#1763 - @alrs - remove deprecated tls options from test -#1769 - @bai - Add support for Kafka 2.6.0 - -## Fixes - -#1697 - @kvch - Use gofork for encoding/asn1 to fix ASN errors during Kerberos authentication -#1744 - @alrs - Fix isBalanced Function Signature - -## Version 1.26.4 (2020-05-19) - -## Fixes - -- #1701 - @d1egoaz - Set server name only for the current broker -- #1694 - @dnwe - testfix: set KAFKA_HEAP_OPTS for zk and kafka - -## Version 1.26.3 (2020-05-07) - -## Fixes - -- #1692 - @d1egoaz - Set tls ServerName to fix issue: either ServerName or InsecureSkipVerify must be specified in the tls.Config - -## Version 1.26.2 (2020-05-06) - -## ⚠️ Known Issues - -This release has been marked as not ready for production and may be unstable, please use v1.26.4. - -### Improvements - -- #1560 - @iyacontrol - add sync pool for gzip 1-9 -- #1605 - @dnwe - feat: protocol support for V11 fetch w/ rackID -- #1617 - @sladkoff / @dwi-di / @random-dwi - Add support for alter/list partition reassignements APIs -- #1632 - @bai - Add support for Go 1.14 -- #1640 - @random-dwi - Feature/fix list partition reassignments -- #1646 - @mimaison - Add DescribeLogDirs to admin client -- #1667 - @bai - Add support for kafka 2.5.0 - -## Fixes - -- #1594 - @sladkoff - Sets ConfigEntry.Default flag in addition to the ConfigEntry.Source for Kafka versions > V1_1_0_0 -- #1601 - @alrs - fix: remove use of testing.T.FailNow() inside goroutine -- #1602 - @d1egoaz - adds a note about consumer groups Consume method -- #1607 - @darklore - Fix memory leak when Broker.Open and Broker.Close called repeatedly -- #1613 - @wblakecaldwell - Updated "retrying" log message when BackoffFunc implemented -- #1614 - @alrs - produce_response.go: Remove Unused Functions -- #1619 - @alrs - tools/kafka-producer-performance: prune unused flag variables -- #1639 - @agriffaut - Handle errors with no message but error code -- #1643 - @kzinglzy - fix `config.net.keepalive` -- #1644 - @KJTsanaktsidis - Fix brokers continually allocating new Session IDs -- #1645 - @Stephan14 - Remove broker(s) which no longer exist in metadata -- #1650 - @lavoiesl - Return the response error in heartbeatLoop -- #1661 - @KJTsanaktsidis - Fix "broker received out of order sequence" when brokers die -- #1666 - @KevinJCross - Bugfix: Allow TLS connections to work over socks proxy. - -## Version 1.26.1 (2020-02-04) - -Improvements: -- Add requests-in-flight metric ([1539](https://github.com/Shopify/sarama/pull/1539)) -- Fix misleading example for cluster admin ([1595](https://github.com/Shopify/sarama/pull/1595)) -- Replace Travis with GitHub Actions, linters housekeeping ([1573](https://github.com/Shopify/sarama/pull/1573)) -- Allow BalanceStrategy to provide custom assignment data ([1592](https://github.com/Shopify/sarama/pull/1592)) - -Bug Fixes: -- Adds back Consumer.Offsets.CommitInterval to fix API ([1590](https://github.com/Shopify/sarama/pull/1590)) -- Fix error message s/CommitInterval/AutoCommit.Interval ([1589](https://github.com/Shopify/sarama/pull/1589)) - -## Version 1.26.0 (2020-01-24) - -New Features: -- Enable zstd compression - ([1574](https://github.com/Shopify/sarama/pull/1574), - [1582](https://github.com/Shopify/sarama/pull/1582)) -- Support headers in tools kafka-console-producer - ([1549](https://github.com/Shopify/sarama/pull/1549)) - -Improvements: -- Add SASL AuthIdentity to SASL frames (authzid) - ([1585](https://github.com/Shopify/sarama/pull/1585)). - -Bug Fixes: -- Sending messages with ZStd compression enabled fails in multiple ways - ([1252](https://github.com/Shopify/sarama/issues/1252)). -- Use the broker for any admin on BrokerConfig - ([1571](https://github.com/Shopify/sarama/pull/1571)). -- Set DescribeConfigRequest Version field - ([1576](https://github.com/Shopify/sarama/pull/1576)). -- ConsumerGroup flooding logs with client/metadata update req - ([1578](https://github.com/Shopify/sarama/pull/1578)). -- MetadataRequest version in DescribeCluster - ([1580](https://github.com/Shopify/sarama/pull/1580)). -- Fix deadlock in consumer group handleError - ([1581](https://github.com/Shopify/sarama/pull/1581)) -- Fill in the Fetch{Request,Response} protocol - ([1582](https://github.com/Shopify/sarama/pull/1582)). -- Retry topic request on ControllerNotAvailable - ([1586](https://github.com/Shopify/sarama/pull/1586)). - -## Version 1.25.0 (2020-01-13) - -New Features: -- Support TLS protocol in kafka-producer-performance - ([1538](https://github.com/Shopify/sarama/pull/1538)). -- Add support for kafka 2.4.0 - ([1552](https://github.com/Shopify/sarama/pull/1552)). - -Improvements: -- Allow the Consumer to disable auto-commit offsets - ([1164](https://github.com/Shopify/sarama/pull/1164)). -- Produce records with consistent timestamps - ([1455](https://github.com/Shopify/sarama/pull/1455)). - -Bug Fixes: -- Fix incorrect SetTopicMetadata name mentions - ([1534](https://github.com/Shopify/sarama/pull/1534)). -- Fix client.tryRefreshMetadata Println - ([1535](https://github.com/Shopify/sarama/pull/1535)). -- Fix panic on calling updateMetadata on closed client - ([1531](https://github.com/Shopify/sarama/pull/1531)). -- Fix possible faulty metrics in TestFuncProducing - ([1545](https://github.com/Shopify/sarama/pull/1545)). - -## Version 1.24.1 (2019-10-31) - -New Features: -- Add DescribeLogDirs Request/Response pair - ([1520](https://github.com/Shopify/sarama/pull/1520)). - -Bug Fixes: -- Fix ClusterAdmin returning invalid controller ID on DescribeCluster - ([1518](https://github.com/Shopify/sarama/pull/1518)). -- Fix issue with consumergroup not rebalancing when new partition is added - ([1525](https://github.com/Shopify/sarama/pull/1525)). -- Ensure consistent use of read/write deadlines - ([1529](https://github.com/Shopify/sarama/pull/1529)). - -## Version 1.24.0 (2019-10-09) - -New Features: -- Add sticky partition assignor - ([1416](https://github.com/Shopify/sarama/pull/1416)). -- Switch from cgo zstd package to pure Go implementation - ([1477](https://github.com/Shopify/sarama/pull/1477)). - -Improvements: -- Allow creating ClusterAdmin from client - ([1415](https://github.com/Shopify/sarama/pull/1415)). -- Set KafkaVersion in ListAcls method - ([1452](https://github.com/Shopify/sarama/pull/1452)). -- Set request version in CreateACL ClusterAdmin method - ([1458](https://github.com/Shopify/sarama/pull/1458)). -- Set request version in DeleteACL ClusterAdmin method - ([1461](https://github.com/Shopify/sarama/pull/1461)). -- Handle missed error codes on TopicMetaDataRequest and GroupCoordinatorRequest - ([1464](https://github.com/Shopify/sarama/pull/1464)). -- Remove direct usage of gofork - ([1465](https://github.com/Shopify/sarama/pull/1465)). -- Add support for Go 1.13 - ([1478](https://github.com/Shopify/sarama/pull/1478)). -- Improve behavior of NewMockListAclsResponse - ([1481](https://github.com/Shopify/sarama/pull/1481)). - -Bug Fixes: -- Fix race condition in consumergroup example - ([1434](https://github.com/Shopify/sarama/pull/1434)). -- Fix brokerProducer goroutine leak - ([1442](https://github.com/Shopify/sarama/pull/1442)). -- Use released version of lz4 library - ([1469](https://github.com/Shopify/sarama/pull/1469)). -- Set correct version in MockDeleteTopicsResponse - ([1484](https://github.com/Shopify/sarama/pull/1484)). -- Fix CLI help message typo - ([1494](https://github.com/Shopify/sarama/pull/1494)). - -Known Issues: -- Please **don't** use Zstd, as it doesn't work right now. - See https://github.com/Shopify/sarama/issues/1252 - -## Version 1.23.1 (2019-07-22) - -Bug Fixes: -- Fix fetch delete bug record - ([1425](https://github.com/Shopify/sarama/pull/1425)). -- Handle SASL/OAUTHBEARER token rejection - ([1428](https://github.com/Shopify/sarama/pull/1428)). - -## Version 1.23.0 (2019-07-02) - -New Features: -- Add support for Kafka 2.3.0 - ([1418](https://github.com/Shopify/sarama/pull/1418)). -- Add support for ListConsumerGroupOffsets v2 - ([1374](https://github.com/Shopify/sarama/pull/1374)). -- Add support for DeleteConsumerGroup - ([1417](https://github.com/Shopify/sarama/pull/1417)). -- Add support for SASLVersion configuration - ([1410](https://github.com/Shopify/sarama/pull/1410)). -- Add kerberos support - ([1366](https://github.com/Shopify/sarama/pull/1366)). - -Improvements: -- Improve sasl_scram_client example - ([1406](https://github.com/Shopify/sarama/pull/1406)). -- Fix shutdown and race-condition in consumer-group example - ([1404](https://github.com/Shopify/sarama/pull/1404)). -- Add support for error codes 77—81 - ([1397](https://github.com/Shopify/sarama/pull/1397)). -- Pool internal objects allocated per message - ([1385](https://github.com/Shopify/sarama/pull/1385)). -- Reduce packet decoder allocations - ([1373](https://github.com/Shopify/sarama/pull/1373)). -- Support timeout when fetching metadata - ([1359](https://github.com/Shopify/sarama/pull/1359)). - -Bug Fixes: -- Fix fetch size integer overflow - ([1376](https://github.com/Shopify/sarama/pull/1376)). -- Handle and log throttled FetchResponses - ([1383](https://github.com/Shopify/sarama/pull/1383)). -- Refactor misspelled word Resouce to Resource - ([1368](https://github.com/Shopify/sarama/pull/1368)). - -## Version 1.22.1 (2019-04-29) - -Improvements: -- Use zstd 1.3.8 - ([1350](https://github.com/Shopify/sarama/pull/1350)). -- Add support for SaslHandshakeRequest v1 - ([1354](https://github.com/Shopify/sarama/pull/1354)). - -Bug Fixes: -- Fix V5 MetadataRequest nullable topics array - ([1353](https://github.com/Shopify/sarama/pull/1353)). -- Use a different SCRAM client for each broker connection - ([1349](https://github.com/Shopify/sarama/pull/1349)). -- Fix AllowAutoTopicCreation for MetadataRequest greater than v3 - ([1344](https://github.com/Shopify/sarama/pull/1344)). - -## Version 1.22.0 (2019-04-09) - -New Features: -- Add Offline Replicas Operation to Client - ([1318](https://github.com/Shopify/sarama/pull/1318)). -- Allow using proxy when connecting to broker - ([1326](https://github.com/Shopify/sarama/pull/1326)). -- Implement ReadCommitted - ([1307](https://github.com/Shopify/sarama/pull/1307)). -- Add support for Kafka 2.2.0 - ([1331](https://github.com/Shopify/sarama/pull/1331)). -- Add SASL SCRAM-SHA-512 and SCRAM-SHA-256 mechanismes - ([1331](https://github.com/Shopify/sarama/pull/1295)). - -Improvements: -- Unregister all broker metrics on broker stop - ([1232](https://github.com/Shopify/sarama/pull/1232)). -- Add SCRAM authentication example - ([1303](https://github.com/Shopify/sarama/pull/1303)). -- Add consumergroup examples - ([1304](https://github.com/Shopify/sarama/pull/1304)). -- Expose consumer batch size metric - ([1296](https://github.com/Shopify/sarama/pull/1296)). -- Add TLS options to console producer and consumer - ([1300](https://github.com/Shopify/sarama/pull/1300)). -- Reduce client close bookkeeping - ([1297](https://github.com/Shopify/sarama/pull/1297)). -- Satisfy error interface in create responses - ([1154](https://github.com/Shopify/sarama/pull/1154)). -- Please lint gods - ([1346](https://github.com/Shopify/sarama/pull/1346)). - -Bug Fixes: -- Fix multi consumer group instance crash - ([1338](https://github.com/Shopify/sarama/pull/1338)). -- Update lz4 to latest version - ([1347](https://github.com/Shopify/sarama/pull/1347)). -- Retry ErrNotCoordinatorForConsumer in new consumergroup session - ([1231](https://github.com/Shopify/sarama/pull/1231)). -- Fix cleanup error handler - ([1332](https://github.com/Shopify/sarama/pull/1332)). -- Fix rate condition in PartitionConsumer - ([1156](https://github.com/Shopify/sarama/pull/1156)). - -## Version 1.21.0 (2019-02-24) - -New Features: -- Add CreateAclRequest, DescribeAclRequest, DeleteAclRequest - ([1236](https://github.com/Shopify/sarama/pull/1236)). -- Add DescribeTopic, DescribeConsumerGroup, ListConsumerGroups, ListConsumerGroupOffsets admin requests - ([1178](https://github.com/Shopify/sarama/pull/1178)). -- Implement SASL/OAUTHBEARER - ([1240](https://github.com/Shopify/sarama/pull/1240)). - -Improvements: -- Add Go mod support - ([1282](https://github.com/Shopify/sarama/pull/1282)). -- Add error codes 73—76 - ([1239](https://github.com/Shopify/sarama/pull/1239)). -- Add retry backoff function - ([1160](https://github.com/Shopify/sarama/pull/1160)). -- Maintain metadata in the producer even when retries are disabled - ([1189](https://github.com/Shopify/sarama/pull/1189)). -- Include ReplicaAssignment in ListTopics - ([1274](https://github.com/Shopify/sarama/pull/1274)). -- Add producer performance tool - ([1222](https://github.com/Shopify/sarama/pull/1222)). -- Add support LogAppend timestamps - ([1258](https://github.com/Shopify/sarama/pull/1258)). - -Bug Fixes: -- Fix potential deadlock when a heartbeat request fails - ([1286](https://github.com/Shopify/sarama/pull/1286)). -- Fix consuming compacted topic - ([1227](https://github.com/Shopify/sarama/pull/1227)). -- Set correct Kafka version for DescribeConfigsRequest v1 - ([1277](https://github.com/Shopify/sarama/pull/1277)). -- Update kafka test version - ([1273](https://github.com/Shopify/sarama/pull/1273)). - -## Version 1.20.1 (2019-01-10) - -New Features: -- Add optional replica id in offset request - ([1100](https://github.com/Shopify/sarama/pull/1100)). - -Improvements: -- Implement DescribeConfigs Request + Response v1 & v2 - ([1230](https://github.com/Shopify/sarama/pull/1230)). -- Reuse compression objects - ([1185](https://github.com/Shopify/sarama/pull/1185)). -- Switch from png to svg for GoDoc link in README - ([1243](https://github.com/Shopify/sarama/pull/1243)). -- Fix typo in deprecation notice for FetchResponseBlock.Records - ([1242](https://github.com/Shopify/sarama/pull/1242)). -- Fix typos in consumer metadata response file - ([1244](https://github.com/Shopify/sarama/pull/1244)). - -Bug Fixes: -- Revert to individual msg retries for non-idempotent - ([1203](https://github.com/Shopify/sarama/pull/1203)). -- Respect MaxMessageBytes limit for uncompressed messages - ([1141](https://github.com/Shopify/sarama/pull/1141)). - -## Version 1.20.0 (2018-12-10) - -New Features: - - Add support for zstd compression - ([#1170](https://github.com/Shopify/sarama/pull/1170)). - - Add support for Idempotent Producer - ([#1152](https://github.com/Shopify/sarama/pull/1152)). - - Add support support for Kafka 2.1.0 - ([#1229](https://github.com/Shopify/sarama/pull/1229)). - - Add support support for OffsetCommit request/response pairs versions v1 to v5 - ([#1201](https://github.com/Shopify/sarama/pull/1201)). - - Add support support for OffsetFetch request/response pair up to version v5 - ([#1198](https://github.com/Shopify/sarama/pull/1198)). - -Improvements: - - Export broker's Rack setting - ([#1173](https://github.com/Shopify/sarama/pull/1173)). - - Always use latest patch version of Go on CI - ([#1202](https://github.com/Shopify/sarama/pull/1202)). - - Add error codes 61 to 72 - ([#1195](https://github.com/Shopify/sarama/pull/1195)). - -Bug Fixes: - - Fix build without cgo - ([#1182](https://github.com/Shopify/sarama/pull/1182)). - - Fix go vet suggestion in consumer group file - ([#1209](https://github.com/Shopify/sarama/pull/1209)). - - Fix typos in code and comments - ([#1228](https://github.com/Shopify/sarama/pull/1228)). - -## Version 1.19.0 (2018-09-27) - -New Features: - - Implement a higher-level consumer group - ([#1099](https://github.com/Shopify/sarama/pull/1099)). - -Improvements: - - Add support for Go 1.11 - ([#1176](https://github.com/Shopify/sarama/pull/1176)). - -Bug Fixes: - - Fix encoding of `MetadataResponse` with version 2 and higher - ([#1174](https://github.com/Shopify/sarama/pull/1174)). - - Fix race condition in mock async producer - ([#1174](https://github.com/Shopify/sarama/pull/1174)). - -## Version 1.18.0 (2018-09-07) - -New Features: - - Make `Partitioner.RequiresConsistency` vary per-message - ([#1112](https://github.com/Shopify/sarama/pull/1112)). - - Add customizable partitioner - ([#1118](https://github.com/Shopify/sarama/pull/1118)). - - Add `ClusterAdmin` support for `CreateTopic`, `DeleteTopic`, `CreatePartitions`, - `DeleteRecords`, `DescribeConfig`, `AlterConfig`, `CreateACL`, `ListAcls`, `DeleteACL` - ([#1055](https://github.com/Shopify/sarama/pull/1055)). - -Improvements: - - Add support for Kafka 2.0.0 - ([#1149](https://github.com/Shopify/sarama/pull/1149)). - - Allow setting `LocalAddr` when dialing an address to support multi-homed hosts - ([#1123](https://github.com/Shopify/sarama/pull/1123)). - - Simpler offset management - ([#1127](https://github.com/Shopify/sarama/pull/1127)). - -Bug Fixes: - - Fix mutation of `ProducerMessage.MetaData` when producing to Kafka - ([#1110](https://github.com/Shopify/sarama/pull/1110)). - - Fix consumer block when response did not contain all the - expected topic/partition blocks - ([#1086](https://github.com/Shopify/sarama/pull/1086)). - - Fix consumer block when response contains only constrol messages - ([#1115](https://github.com/Shopify/sarama/pull/1115)). - - Add timeout config for ClusterAdmin requests - ([#1142](https://github.com/Shopify/sarama/pull/1142)). - - Add version check when producing message with headers - ([#1117](https://github.com/Shopify/sarama/pull/1117)). - - Fix `MetadataRequest` for empty list of topics - ([#1132](https://github.com/Shopify/sarama/pull/1132)). - - Fix producer topic metadata on-demand fetch when topic error happens in metadata response - ([#1125](https://github.com/Shopify/sarama/pull/1125)). - -## Version 1.17.0 (2018-05-30) - -New Features: - - Add support for gzip compression levels - ([#1044](https://github.com/Shopify/sarama/pull/1044)). - - Add support for Metadata request/response pairs versions v1 to v5 - ([#1047](https://github.com/Shopify/sarama/pull/1047), - [#1069](https://github.com/Shopify/sarama/pull/1069)). - - Add versioning to JoinGroup request/response pairs - ([#1098](https://github.com/Shopify/sarama/pull/1098)) - - Add support for CreatePartitions, DeleteGroups, DeleteRecords request/response pairs - ([#1065](https://github.com/Shopify/sarama/pull/1065), - [#1096](https://github.com/Shopify/sarama/pull/1096), - [#1027](https://github.com/Shopify/sarama/pull/1027)). - - Add `Controller()` method to Client interface - ([#1063](https://github.com/Shopify/sarama/pull/1063)). - -Improvements: - - ConsumerMetadataReq/Resp has been migrated to FindCoordinatorReq/Resp - ([#1010](https://github.com/Shopify/sarama/pull/1010)). - - Expose missing protocol parts: `msgSet` and `recordBatch` - ([#1049](https://github.com/Shopify/sarama/pull/1049)). - - Add support for v1 DeleteTopics Request - ([#1052](https://github.com/Shopify/sarama/pull/1052)). - - Add support for Go 1.10 - ([#1064](https://github.com/Shopify/sarama/pull/1064)). - - Claim support for Kafka 1.1.0 - ([#1073](https://github.com/Shopify/sarama/pull/1073)). - -Bug Fixes: - - Fix FindCoordinatorResponse.encode to allow nil Coordinator - ([#1050](https://github.com/Shopify/sarama/pull/1050), - [#1051](https://github.com/Shopify/sarama/pull/1051)). - - Clear all metadata when we have the latest topic info - ([#1033](https://github.com/Shopify/sarama/pull/1033)). - - Make `PartitionConsumer.Close` idempotent - ([#1092](https://github.com/Shopify/sarama/pull/1092)). - -## Version 1.16.0 (2018-02-12) - -New Features: - - Add support for the Create/Delete Topics request/response pairs - ([#1007](https://github.com/Shopify/sarama/pull/1007), - [#1008](https://github.com/Shopify/sarama/pull/1008)). - - Add support for the Describe/Create/Delete ACL request/response pairs - ([#1009](https://github.com/Shopify/sarama/pull/1009)). - - Add support for the five transaction-related request/response pairs - ([#1016](https://github.com/Shopify/sarama/pull/1016)). - -Improvements: - - Permit setting version on mock producer responses - ([#999](https://github.com/Shopify/sarama/pull/999)). - - Add `NewMockBrokerListener` helper for testing TLS connections - ([#1019](https://github.com/Shopify/sarama/pull/1019)). - - Changed the default value for `Consumer.Fetch.Default` from 32KiB to 1MiB - which results in much higher throughput in most cases - ([#1024](https://github.com/Shopify/sarama/pull/1024)). - - Reuse the `time.Ticker` across fetch requests in the PartitionConsumer to - reduce CPU and memory usage when processing many partitions - ([#1028](https://github.com/Shopify/sarama/pull/1028)). - - Assign relative offsets to messages in the producer to save the brokers a - recompression pass - ([#1002](https://github.com/Shopify/sarama/pull/1002), - [#1015](https://github.com/Shopify/sarama/pull/1015)). - -Bug Fixes: - - Fix producing uncompressed batches with the new protocol format - ([#1032](https://github.com/Shopify/sarama/issues/1032)). - - Fix consuming compacted topics with the new protocol format - ([#1005](https://github.com/Shopify/sarama/issues/1005)). - - Fix consuming topics with a mix of protocol formats - ([#1021](https://github.com/Shopify/sarama/issues/1021)). - - Fix consuming when the broker includes multiple batches in a single response - ([#1022](https://github.com/Shopify/sarama/issues/1022)). - - Fix detection of `PartialTrailingMessage` when the partial message was - truncated before the magic value indicating its version - ([#1030](https://github.com/Shopify/sarama/pull/1030)). - - Fix expectation-checking in the mock of `SyncProducer.SendMessages` - ([#1035](https://github.com/Shopify/sarama/pull/1035)). - -## Version 1.15.0 (2017-12-08) - -New Features: - - Claim official support for Kafka 1.0, though it did already work - ([#984](https://github.com/Shopify/sarama/pull/984)). - - Helper methods for Kafka version numbers to/from strings - ([#989](https://github.com/Shopify/sarama/pull/989)). - - Implement CreatePartitions request/response - ([#985](https://github.com/Shopify/sarama/pull/985)). - -Improvements: - - Add error codes 45-60 - ([#986](https://github.com/Shopify/sarama/issues/986)). - -Bug Fixes: - - Fix slow consuming for certain Kafka 0.11/1.0 configurations - ([#982](https://github.com/Shopify/sarama/pull/982)). - - Correctly determine when a FetchResponse contains the new message format - ([#990](https://github.com/Shopify/sarama/pull/990)). - - Fix producing with multiple headers - ([#996](https://github.com/Shopify/sarama/pull/996)). - - Fix handling of truncated record batches - ([#998](https://github.com/Shopify/sarama/pull/998)). - - Fix leaking metrics when closing brokers - ([#991](https://github.com/Shopify/sarama/pull/991)). - -## Version 1.14.0 (2017-11-13) - -New Features: - - Add support for the new Kafka 0.11 record-batch format, including the wire - protocol and the necessary behavioural changes in the producer and consumer. - Transactions and idempotency are not yet supported, but producing and - consuming should work with all the existing bells and whistles (batching, - compression, etc) as well as the new custom headers. Thanks to Vlad Hanciuta - of Arista Networks for this work. Part of - ([#901](https://github.com/Shopify/sarama/issues/901)). - -Bug Fixes: - - Fix encoding of ProduceResponse versions in test - ([#970](https://github.com/Shopify/sarama/pull/970)). - - Return partial replicas list when we have it - ([#975](https://github.com/Shopify/sarama/pull/975)). - -## Version 1.13.0 (2017-10-04) - -New Features: - - Support for FetchRequest version 3 - ([#905](https://github.com/Shopify/sarama/pull/905)). - - Permit setting version on mock FetchResponses - ([#939](https://github.com/Shopify/sarama/pull/939)). - - Add a configuration option to support storing only minimal metadata for - extremely large clusters - ([#937](https://github.com/Shopify/sarama/pull/937)). - - Add `PartitionOffsetManager.ResetOffset` for backtracking tracked offsets - ([#932](https://github.com/Shopify/sarama/pull/932)). - -Improvements: - - Provide the block-level timestamp when consuming compressed messages - ([#885](https://github.com/Shopify/sarama/issues/885)). - - `Client.Replicas` and `Client.InSyncReplicas` now respect the order returned - by the broker, which can be meaningful - ([#930](https://github.com/Shopify/sarama/pull/930)). - - Use a `Ticker` to reduce consumer timer overhead at the cost of higher - variance in the actual timeout - ([#933](https://github.com/Shopify/sarama/pull/933)). - -Bug Fixes: - - Gracefully handle messages with negative timestamps - ([#907](https://github.com/Shopify/sarama/pull/907)). - - Raise a proper error when encountering an unknown message version - ([#940](https://github.com/Shopify/sarama/pull/940)). - -## Version 1.12.0 (2017-05-08) - -New Features: - - Added support for the `ApiVersions` request and response pair, and Kafka - version 0.10.2 ([#867](https://github.com/Shopify/sarama/pull/867)). Note - that you still need to specify the Kafka version in the Sarama configuration - for the time being. - - Added a `Brokers` method to the Client which returns the complete set of - active brokers ([#813](https://github.com/Shopify/sarama/pull/813)). - - Added an `InSyncReplicas` method to the Client which returns the set of all - in-sync broker IDs for the given partition, now that the Kafka versions for - which this was misleading are no longer in our supported set - ([#872](https://github.com/Shopify/sarama/pull/872)). - - Added a `NewCustomHashPartitioner` method which allows constructing a hash - partitioner with a custom hash method in case the default (FNV-1a) is not - suitable - ([#837](https://github.com/Shopify/sarama/pull/837), - [#841](https://github.com/Shopify/sarama/pull/841)). - -Improvements: - - Recognize more Kafka error codes - ([#859](https://github.com/Shopify/sarama/pull/859)). - -Bug Fixes: - - Fix an issue where decoding a malformed FetchRequest would not return the - correct error ([#818](https://github.com/Shopify/sarama/pull/818)). - - Respect ordering of group protocols in JoinGroupRequests. This fix is - transparent if you're using the `AddGroupProtocol` or - `AddGroupProtocolMetadata` helpers; otherwise you will need to switch from - the `GroupProtocols` field (now deprecated) to use `OrderedGroupProtocols` - ([#812](https://github.com/Shopify/sarama/issues/812)). - - Fix an alignment-related issue with atomics on 32-bit architectures - ([#859](https://github.com/Shopify/sarama/pull/859)). - -## Version 1.11.0 (2016-12-20) - -_Important:_ As of Sarama 1.11 it is necessary to set the config value of -`Producer.Return.Successes` to true in order to use the SyncProducer. Previous -versions would silently override this value when instantiating a SyncProducer -which led to unexpected values and data races. - -New Features: - - Metrics! Thanks to Sébastien Launay for all his work on this feature - ([#701](https://github.com/Shopify/sarama/pull/701), - [#746](https://github.com/Shopify/sarama/pull/746), - [#766](https://github.com/Shopify/sarama/pull/766)). - - Add support for LZ4 compression - ([#786](https://github.com/Shopify/sarama/pull/786)). - - Add support for ListOffsetRequest v1 and Kafka 0.10.1 - ([#775](https://github.com/Shopify/sarama/pull/775)). - - Added a `HighWaterMarks` method to the Consumer which aggregates the - `HighWaterMarkOffset` values of its child topic/partitions - ([#769](https://github.com/Shopify/sarama/pull/769)). - -Bug Fixes: - - Fixed producing when using timestamps, compression and Kafka 0.10 - ([#759](https://github.com/Shopify/sarama/pull/759)). - - Added missing decoder methods to DescribeGroups response - ([#756](https://github.com/Shopify/sarama/pull/756)). - - Fix producer shutdown when `Return.Errors` is disabled - ([#787](https://github.com/Shopify/sarama/pull/787)). - - Don't mutate configuration in SyncProducer - ([#790](https://github.com/Shopify/sarama/pull/790)). - - Fix crash on SASL initialization failure - ([#795](https://github.com/Shopify/sarama/pull/795)). - -## Version 1.10.1 (2016-08-30) - -Bug Fixes: - - Fix the documentation for `HashPartitioner` which was incorrect - ([#717](https://github.com/Shopify/sarama/pull/717)). - - Permit client creation even when it is limited by ACLs - ([#722](https://github.com/Shopify/sarama/pull/722)). - - Several fixes to the consumer timer optimization code, regressions introduced - in v1.10.0. Go's timers are finicky - ([#730](https://github.com/Shopify/sarama/pull/730), - [#733](https://github.com/Shopify/sarama/pull/733), - [#734](https://github.com/Shopify/sarama/pull/734)). - - Handle consuming compressed relative offsets with Kafka 0.10 - ([#735](https://github.com/Shopify/sarama/pull/735)). - -## Version 1.10.0 (2016-08-02) - -_Important:_ As of Sarama 1.10 it is necessary to tell Sarama the version of -Kafka you are running against (via the `config.Version` value) in order to use -features that may not be compatible with old Kafka versions. If you don't -specify this value it will default to 0.8.2 (the minimum supported), and trying -to use more recent features (like the offset manager) will fail with an error. - -_Also:_ The offset-manager's behaviour has been changed to match the upstream -java consumer (see [#705](https://github.com/Shopify/sarama/pull/705) and -[#713](https://github.com/Shopify/sarama/pull/713)). If you use the -offset-manager, please ensure that you are committing one *greater* than the -last consumed message offset or else you may end up consuming duplicate -messages. - -New Features: - - Support for Kafka 0.10 - ([#672](https://github.com/Shopify/sarama/pull/672), - [#678](https://github.com/Shopify/sarama/pull/678), - [#681](https://github.com/Shopify/sarama/pull/681), and others). - - Support for configuring the target Kafka version - ([#676](https://github.com/Shopify/sarama/pull/676)). - - Batch producing support in the SyncProducer - ([#677](https://github.com/Shopify/sarama/pull/677)). - - Extend producer mock to allow setting expectations on message contents - ([#667](https://github.com/Shopify/sarama/pull/667)). - -Improvements: - - Support `nil` compressed messages for deleting in compacted topics - ([#634](https://github.com/Shopify/sarama/pull/634)). - - Pre-allocate decoding errors, greatly reducing heap usage and GC time against - misbehaving brokers ([#690](https://github.com/Shopify/sarama/pull/690)). - - Re-use consumer expiry timers, removing one allocation per consumed message - ([#707](https://github.com/Shopify/sarama/pull/707)). - -Bug Fixes: - - Actually default the client ID to "sarama" like we say we do - ([#664](https://github.com/Shopify/sarama/pull/664)). - - Fix a rare issue where `Client.Leader` could return the wrong error - ([#685](https://github.com/Shopify/sarama/pull/685)). - - Fix a possible tight loop in the consumer - ([#693](https://github.com/Shopify/sarama/pull/693)). - - Match upstream's offset-tracking behaviour - ([#705](https://github.com/Shopify/sarama/pull/705)). - - Report UnknownTopicOrPartition errors from the offset manager - ([#706](https://github.com/Shopify/sarama/pull/706)). - - Fix possible negative partition value from the HashPartitioner - ([#709](https://github.com/Shopify/sarama/pull/709)). - -## Version 1.9.0 (2016-05-16) - -New Features: - - Add support for custom offset manager retention durations - ([#602](https://github.com/Shopify/sarama/pull/602)). - - Publish low-level mocks to enable testing of third-party producer/consumer - implementations ([#570](https://github.com/Shopify/sarama/pull/570)). - - Declare support for Golang 1.6 - ([#611](https://github.com/Shopify/sarama/pull/611)). - - Support for SASL plain-text auth - ([#648](https://github.com/Shopify/sarama/pull/648)). - -Improvements: - - Simplified broker locking scheme slightly - ([#604](https://github.com/Shopify/sarama/pull/604)). - - Documentation cleanup - ([#605](https://github.com/Shopify/sarama/pull/605), - [#621](https://github.com/Shopify/sarama/pull/621), - [#654](https://github.com/Shopify/sarama/pull/654)). - -Bug Fixes: - - Fix race condition shutting down the OffsetManager - ([#658](https://github.com/Shopify/sarama/pull/658)). - -## Version 1.8.0 (2016-02-01) - -New Features: - - Full support for Kafka 0.9: - - All protocol messages and fields - ([#586](https://github.com/Shopify/sarama/pull/586), - [#588](https://github.com/Shopify/sarama/pull/588), - [#590](https://github.com/Shopify/sarama/pull/590)). - - Verified that TLS support works - ([#581](https://github.com/Shopify/sarama/pull/581)). - - Fixed the OffsetManager compatibility - ([#585](https://github.com/Shopify/sarama/pull/585)). - -Improvements: - - Optimize for fewer system calls when reading from the network - ([#584](https://github.com/Shopify/sarama/pull/584)). - - Automatically retry `InvalidMessage` errors to match upstream behaviour - ([#589](https://github.com/Shopify/sarama/pull/589)). - -## Version 1.7.0 (2015-12-11) - -New Features: - - Preliminary support for Kafka 0.9 - ([#572](https://github.com/Shopify/sarama/pull/572)). This comes with several - caveats: - - Protocol-layer support is mostly in place - ([#577](https://github.com/Shopify/sarama/pull/577)), however Kafka 0.9 - renamed some messages and fields, which we did not in order to preserve API - compatibility. - - The producer and consumer work against 0.9, but the offset manager does - not ([#573](https://github.com/Shopify/sarama/pull/573)). - - TLS support may or may not work - ([#581](https://github.com/Shopify/sarama/pull/581)). - -Improvements: - - Don't wait for request timeouts on dead brokers, greatly speeding recovery - when the TCP connection is left hanging - ([#548](https://github.com/Shopify/sarama/pull/548)). - - Refactored part of the producer. The new version provides a much more elegant - solution to [#449](https://github.com/Shopify/sarama/pull/449). It is also - slightly more efficient, and much more precise in calculating batch sizes - when compression is used - ([#549](https://github.com/Shopify/sarama/pull/549), - [#550](https://github.com/Shopify/sarama/pull/550), - [#551](https://github.com/Shopify/sarama/pull/551)). - -Bug Fixes: - - Fix race condition in consumer test mock - ([#553](https://github.com/Shopify/sarama/pull/553)). - -## Version 1.6.1 (2015-09-25) - -Bug Fixes: - - Fix panic that could occur if a user-supplied message value failed to encode - ([#449](https://github.com/Shopify/sarama/pull/449)). - -## Version 1.6.0 (2015-09-04) - -New Features: - - Implementation of a consumer offset manager using the APIs introduced in - Kafka 0.8.2. The API is designed mainly for integration into a future - high-level consumer, not for direct use, although it is *possible* to use it - directly. - ([#461](https://github.com/Shopify/sarama/pull/461)). - -Improvements: - - CRC32 calculation is much faster on machines with SSE4.2 instructions, - removing a major hotspot from most profiles - ([#255](https://github.com/Shopify/sarama/pull/255)). - -Bug Fixes: - - Make protocol decoding more robust against some malformed packets generated - by go-fuzz ([#523](https://github.com/Shopify/sarama/pull/523), - [#525](https://github.com/Shopify/sarama/pull/525)) or found in other ways - ([#528](https://github.com/Shopify/sarama/pull/528)). - - Fix a potential race condition panic in the consumer on shutdown - ([#529](https://github.com/Shopify/sarama/pull/529)). - -## Version 1.5.0 (2015-08-17) - -New Features: - - TLS-encrypted network connections are now supported. This feature is subject - to change when Kafka releases built-in TLS support, but for now this is - enough to work with TLS-terminating proxies - ([#154](https://github.com/Shopify/sarama/pull/154)). - -Improvements: - - The consumer will not block if a single partition is not drained by the user; - all other partitions will continue to consume normally - ([#485](https://github.com/Shopify/sarama/pull/485)). - - Formatting of error strings has been much improved - ([#495](https://github.com/Shopify/sarama/pull/495)). - - Internal refactoring of the producer for code cleanliness and to enable - future work ([#300](https://github.com/Shopify/sarama/pull/300)). - -Bug Fixes: - - Fix a potential deadlock in the consumer on shutdown - ([#475](https://github.com/Shopify/sarama/pull/475)). - -## Version 1.4.3 (2015-07-21) - -Bug Fixes: - - Don't include the partitioner in the producer's "fetch partitions" - circuit-breaker ([#466](https://github.com/Shopify/sarama/pull/466)). - - Don't retry messages until the broker is closed when abandoning a broker in - the producer ([#468](https://github.com/Shopify/sarama/pull/468)). - - Update the import path for snappy-go, it has moved again and the API has - changed slightly ([#486](https://github.com/Shopify/sarama/pull/486)). - -## Version 1.4.2 (2015-05-27) - -Bug Fixes: - - Update the import path for snappy-go, it has moved from google code to github - ([#456](https://github.com/Shopify/sarama/pull/456)). - -## Version 1.4.1 (2015-05-25) - -Improvements: - - Optimizations when decoding snappy messages, thanks to John Potocny - ([#446](https://github.com/Shopify/sarama/pull/446)). - -Bug Fixes: - - Fix hypothetical race conditions on producer shutdown - ([#450](https://github.com/Shopify/sarama/pull/450), - [#451](https://github.com/Shopify/sarama/pull/451)). - -## Version 1.4.0 (2015-05-01) - -New Features: - - The consumer now implements `Topics()` and `Partitions()` methods to enable - users to dynamically choose what topics/partitions to consume without - instantiating a full client - ([#431](https://github.com/Shopify/sarama/pull/431)). - - The partition-consumer now exposes the high water mark offset value returned - by the broker via the `HighWaterMarkOffset()` method ([#339](https://github.com/Shopify/sarama/pull/339)). - - Added a `kafka-console-consumer` tool capable of handling multiple - partitions, and deprecated the now-obsolete `kafka-console-partitionConsumer` - ([#439](https://github.com/Shopify/sarama/pull/439), - [#442](https://github.com/Shopify/sarama/pull/442)). - -Improvements: - - The producer's logging during retry scenarios is more consistent, more - useful, and slightly less verbose - ([#429](https://github.com/Shopify/sarama/pull/429)). - - The client now shuffles its initial list of seed brokers in order to prevent - thundering herd on the first broker in the list - ([#441](https://github.com/Shopify/sarama/pull/441)). - -Bug Fixes: - - The producer now correctly manages its state if retries occur when it is - shutting down, fixing several instances of confusing behaviour and at least - one potential deadlock ([#419](https://github.com/Shopify/sarama/pull/419)). - - The consumer now handles messages for different partitions asynchronously, - making it much more resilient to specific user code ordering - ([#325](https://github.com/Shopify/sarama/pull/325)). - -## Version 1.3.0 (2015-04-16) - -New Features: - - The client now tracks consumer group coordinators using - ConsumerMetadataRequests similar to how it tracks partition leadership using - regular MetadataRequests ([#411](https://github.com/Shopify/sarama/pull/411)). - This adds two methods to the client API: - - `Coordinator(consumerGroup string) (*Broker, error)` - - `RefreshCoordinator(consumerGroup string) error` - -Improvements: - - ConsumerMetadataResponses now automatically create a Broker object out of the - ID/address/port combination for the Coordinator; accessing the fields - individually has been deprecated - ([#413](https://github.com/Shopify/sarama/pull/413)). - - Much improved handling of `OffsetOutOfRange` errors in the consumer. - Consumers will fail to start if the provided offset is out of range - ([#418](https://github.com/Shopify/sarama/pull/418)) - and they will automatically shut down if the offset falls out of range - ([#424](https://github.com/Shopify/sarama/pull/424)). - - Small performance improvement in encoding and decoding protocol messages - ([#427](https://github.com/Shopify/sarama/pull/427)). - -Bug Fixes: - - Fix a rare race condition in the client's background metadata refresher if - it happens to be activated while the client is being closed - ([#422](https://github.com/Shopify/sarama/pull/422)). - -## Version 1.2.0 (2015-04-07) - -Improvements: - - The producer's behaviour when `Flush.Frequency` is set is now more intuitive - ([#389](https://github.com/Shopify/sarama/pull/389)). - - The producer is now somewhat more memory-efficient during and after retrying - messages due to an improved queue implementation - ([#396](https://github.com/Shopify/sarama/pull/396)). - - The consumer produces much more useful logging output when leadership - changes ([#385](https://github.com/Shopify/sarama/pull/385)). - - The client's `GetOffset` method will now automatically refresh metadata and - retry once in the event of stale information or similar - ([#394](https://github.com/Shopify/sarama/pull/394)). - - Broker connections now have support for using TCP keepalives - ([#407](https://github.com/Shopify/sarama/issues/407)). - -Bug Fixes: - - The OffsetCommitRequest message now correctly implements all three possible - API versions ([#390](https://github.com/Shopify/sarama/pull/390), - [#400](https://github.com/Shopify/sarama/pull/400)). - -## Version 1.1.0 (2015-03-20) - -Improvements: - - Wrap the producer's partitioner call in a circuit-breaker so that repeatedly - broken topics don't choke throughput - ([#373](https://github.com/Shopify/sarama/pull/373)). - -Bug Fixes: - - Fix the producer's internal reference counting in certain unusual scenarios - ([#367](https://github.com/Shopify/sarama/pull/367)). - - Fix the consumer's internal reference counting in certain unusual scenarios - ([#369](https://github.com/Shopify/sarama/pull/369)). - - Fix a condition where the producer's internal control messages could have - gotten stuck ([#368](https://github.com/Shopify/sarama/pull/368)). - - Fix an issue where invalid partition lists would be cached when asking for - metadata for a non-existant topic ([#372](https://github.com/Shopify/sarama/pull/372)). - - -## Version 1.0.0 (2015-03-17) - -Version 1.0.0 is the first tagged version, and is almost a complete rewrite. The primary differences with previous untagged versions are: - -- The producer has been rewritten; there is now a `SyncProducer` with a blocking API, and an `AsyncProducer` that is non-blocking. -- The consumer has been rewritten to only open one connection per broker instead of one connection per partition. -- The main types of Sarama are now interfaces to make depedency injection easy; mock implementations for `Consumer`, `SyncProducer` and `AsyncProducer` are provided in the `github.com/Shopify/sarama/mocks` package. -- For most uses cases, it is no longer necessary to open a `Client`; this will be done for you. -- All the configuration values have been unified in the `Config` struct. -- Much improved test suite. diff --git a/vendor/github.com/Shopify/sarama/decompress.go b/vendor/github.com/Shopify/sarama/decompress.go deleted file mode 100644 index aa7fb7498..000000000 --- a/vendor/github.com/Shopify/sarama/decompress.go +++ /dev/null @@ -1,61 +0,0 @@ -package sarama - -import ( - "bytes" - "compress/gzip" - "fmt" - "io" - "sync" - - snappy "github.com/eapache/go-xerial-snappy" - "github.com/pierrec/lz4/v4" -) - -var ( - lz4ReaderPool = sync.Pool{ - New: func() interface{} { - return lz4.NewReader(nil) - }, - } - - gzipReaderPool sync.Pool -) - -func decompress(cc CompressionCodec, data []byte) ([]byte, error) { - switch cc { - case CompressionNone: - return data, nil - case CompressionGZIP: - var err error - reader, ok := gzipReaderPool.Get().(*gzip.Reader) - if !ok { - reader, err = gzip.NewReader(bytes.NewReader(data)) - } else { - err = reader.Reset(bytes.NewReader(data)) - } - - if err != nil { - return nil, err - } - - defer gzipReaderPool.Put(reader) - - return io.ReadAll(reader) - case CompressionSnappy: - return snappy.Decode(data) - case CompressionLZ4: - reader, ok := lz4ReaderPool.Get().(*lz4.Reader) - if !ok { - reader = lz4.NewReader(bytes.NewReader(data)) - } else { - reader.Reset(bytes.NewReader(data)) - } - defer lz4ReaderPool.Put(reader) - - return io.ReadAll(reader) - case CompressionZSTD: - return zstdDecompress(ZstdDecoderParams{}, nil, data) - default: - return nil, PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", cc)} - } -} diff --git a/vendor/github.com/antonmedv/expr/ast/node.go b/vendor/github.com/antonmedv/expr/ast/node.go index e85f853e9..5bdcb75ed 100644 --- a/vendor/github.com/antonmedv/expr/ast/node.go +++ b/vendor/github.com/antonmedv/expr/ast/node.go @@ -50,7 +50,6 @@ type NilNode struct { type IdentifierNode struct { base Value string - Deref bool FieldIndex []int Method bool // true if method, false if field MethodIndex int // index of method, set only if Method is true @@ -106,10 +105,9 @@ type MemberNode struct { Property Node Name string // Name of the filed or method. Used for error reporting. Optional bool - Deref bool FieldIndex []int - // TODO: Replace with a single MethodIndex field of &int type. + // TODO: Combine Method and MethodIndex into a single MethodIndex field of &int type. Method bool MethodIndex int } diff --git a/vendor/github.com/antonmedv/expr/checker/checker.go b/vendor/github.com/antonmedv/expr/checker/checker.go index 00025a33c..86d6d6c6a 100644 --- a/vendor/github.com/antonmedv/expr/checker/checker.go +++ b/vendor/github.com/antonmedv/expr/checker/checker.go @@ -135,18 +135,14 @@ func (v *visitor) IdentifierNode(node *ast.IdentifierNode) (reflect.Type, info) // when the arguments are known in CallNode. return anyType, info{fn: fn} } - if v.config.Types == nil { - node.Deref = true - } else if t, ok := v.config.Types[node.Value]; ok { + if t, ok := v.config.Types[node.Value]; ok { if t.Ambiguous { return v.error(node, "ambiguous identifier %v", node.Value) } - d, c := deref(t.Type) - node.Deref = c node.Method = t.Method node.MethodIndex = t.MethodIndex node.FieldIndex = t.FieldIndex - return d, info{method: t.Method} + return t.Type, info{method: t.Method} } if v.config.Strict { return v.error(node, "unknown name %v", node.Value) @@ -180,6 +176,8 @@ func (v *visitor) ConstantNode(node *ast.ConstantNode) (reflect.Type, info) { func (v *visitor) UnaryNode(node *ast.UnaryNode) (reflect.Type, info) { t, _ := v.visit(node.Node) + t = deref(t) + switch node.Operator { case "!", "not": @@ -209,6 +207,9 @@ func (v *visitor) BinaryNode(node *ast.BinaryNode) (reflect.Type, info) { l, _ := v.visit(node.Left) r, _ := v.visit(node.Right) + l = deref(l) + r = deref(r) + // check operator overloading if fns, ok := v.config.Operators[node.Operator]; ok { t, _, ok := conf.FindSuitableOperatorOverload(fns, v.config.Types, l, r) @@ -383,8 +384,18 @@ func (v *visitor) ChainNode(node *ast.ChainNode) (reflect.Type, info) { } func (v *visitor) MemberNode(node *ast.MemberNode) (reflect.Type, info) { - base, _ := v.visit(node.Node) prop, _ := v.visit(node.Property) + if an, ok := node.Node.(*ast.IdentifierNode); ok && an.Value == "env" { + // If the index is a constant string, can save some + // cycles later by finding the type of its referent + if name, ok := node.Property.(*ast.StringNode); ok { + if t, ok := v.config.Types[name.Value]; ok { + return t.Type, info{method: t.Method} + } // No error if no type found; it may be added to env between compile and run + } + return anyType, info{} + } + base, _ := v.visit(node.Node) if name, ok := node.Property.(*ast.StringNode); ok { if base == nil { @@ -417,34 +428,27 @@ func (v *visitor) MemberNode(node *ast.MemberNode) (reflect.Type, info) { switch base.Kind() { case reflect.Interface: - node.Deref = true return anyType, info{} case reflect.Map: if prop != nil && !prop.AssignableTo(base.Key()) && !isAny(prop) { return v.error(node.Property, "cannot use %v to get an element from %v", prop, base) } - t, c := deref(base.Elem()) - node.Deref = c - return t, info{} + return base.Elem(), info{} case reflect.Array, reflect.Slice: if !isInteger(prop) && !isAny(prop) { return v.error(node.Property, "array elements can only be selected using an integer (got %v)", prop) } - t, c := deref(base.Elem()) - node.Deref = c - return t, info{} + return base.Elem(), info{} case reflect.Struct: if name, ok := node.Property.(*ast.StringNode); ok { propertyName := name.Value if field, ok := fetchField(base, propertyName); ok { - t, c := deref(field.Type) - node.Deref = c node.FieldIndex = field.Index node.Name = propertyName - return t, info{} + return field.Type, info{} } if len(v.parents) > 1 { if _, ok := v.parents[len(v.parents)-2].(*ast.CallNode); ok { diff --git a/vendor/github.com/antonmedv/expr/checker/types.go b/vendor/github.com/antonmedv/expr/checker/types.go index 7ccd89480..e8f53372c 100644 --- a/vendor/github.com/antonmedv/expr/checker/types.go +++ b/vendor/github.com/antonmedv/expr/checker/types.go @@ -204,25 +204,23 @@ func fetchField(t reflect.Type, name string) (reflect.StructField, bool) { return reflect.StructField{}, false } -func deref(t reflect.Type) (reflect.Type, bool) { +func deref(t reflect.Type) reflect.Type { if t == nil { - return nil, false + return nil } if t.Kind() == reflect.Interface { - return t, true + return t } - found := false for t != nil && t.Kind() == reflect.Ptr { e := t.Elem() switch e.Kind() { case reflect.Struct, reflect.Map, reflect.Array, reflect.Slice: - return t, false + return t default: - found = true t = e } } - return t, found + return t } func isIntegerOrArithmeticOperation(node ast.Node) bool { diff --git a/vendor/github.com/antonmedv/expr/compiler/compiler.go b/vendor/github.com/antonmedv/expr/compiler/compiler.go index 3cd32af0f..4b2f94603 100644 --- a/vendor/github.com/antonmedv/expr/compiler/compiler.go +++ b/vendor/github.com/antonmedv/expr/compiler/compiler.go @@ -205,6 +205,10 @@ func (c *compiler) NilNode(_ *ast.NilNode) { } func (c *compiler) IdentifierNode(node *ast.IdentifierNode) { + if node.Value == "env" { + c.emit(OpLoadEnv) + return + } if c.mapEnv { c.emit(OpLoadFast, c.addConstant(node.Value)) } else if len(node.FieldIndex) > 0 { @@ -220,11 +224,6 @@ func (c *compiler) IdentifierNode(node *ast.IdentifierNode) { } else { c.emit(OpLoadConst, c.addConstant(node.Value)) } - if node.Deref { - c.emit(OpDeref) - } else if node.Type() == nil { - c.emit(OpDeref) - } } func (c *compiler) IntegerNode(node *ast.IntegerNode) { @@ -285,6 +284,7 @@ func (c *compiler) ConstantNode(node *ast.ConstantNode) { func (c *compiler) UnaryNode(node *ast.UnaryNode) { c.compile(node.Node) + c.derefInNeeded(node.Node) switch node.Operator { @@ -309,7 +309,9 @@ func (c *compiler) BinaryNode(node *ast.BinaryNode) { switch node.Operator { case "==": c.compile(node.Left) + c.derefInNeeded(node.Left) c.compile(node.Right) + c.derefInNeeded(node.Left) if l == r && l == reflect.Int { c.emit(OpEqualInt) @@ -321,114 +323,155 @@ func (c *compiler) BinaryNode(node *ast.BinaryNode) { case "!=": c.compile(node.Left) + c.derefInNeeded(node.Left) c.compile(node.Right) + c.derefInNeeded(node.Left) c.emit(OpEqual) c.emit(OpNot) case "or", "||": c.compile(node.Left) + c.derefInNeeded(node.Left) end := c.emit(OpJumpIfTrue, placeholder) c.emit(OpPop) c.compile(node.Right) + c.derefInNeeded(node.Right) c.patchJump(end) case "and", "&&": c.compile(node.Left) + c.derefInNeeded(node.Left) end := c.emit(OpJumpIfFalse, placeholder) c.emit(OpPop) c.compile(node.Right) + c.derefInNeeded(node.Right) c.patchJump(end) case "<": c.compile(node.Left) + c.derefInNeeded(node.Left) c.compile(node.Right) + c.derefInNeeded(node.Right) c.emit(OpLess) case ">": c.compile(node.Left) + c.derefInNeeded(node.Left) c.compile(node.Right) + c.derefInNeeded(node.Right) c.emit(OpMore) case "<=": c.compile(node.Left) + c.derefInNeeded(node.Left) c.compile(node.Right) + c.derefInNeeded(node.Right) c.emit(OpLessOrEqual) case ">=": c.compile(node.Left) + c.derefInNeeded(node.Left) c.compile(node.Right) + c.derefInNeeded(node.Right) c.emit(OpMoreOrEqual) case "+": c.compile(node.Left) + c.derefInNeeded(node.Left) c.compile(node.Right) + c.derefInNeeded(node.Right) c.emit(OpAdd) case "-": c.compile(node.Left) + c.derefInNeeded(node.Left) c.compile(node.Right) + c.derefInNeeded(node.Right) c.emit(OpSubtract) case "*": c.compile(node.Left) + c.derefInNeeded(node.Left) c.compile(node.Right) + c.derefInNeeded(node.Right) c.emit(OpMultiply) case "/": c.compile(node.Left) + c.derefInNeeded(node.Left) c.compile(node.Right) + c.derefInNeeded(node.Right) c.emit(OpDivide) case "%": c.compile(node.Left) + c.derefInNeeded(node.Left) c.compile(node.Right) + c.derefInNeeded(node.Right) c.emit(OpModulo) case "**", "^": c.compile(node.Left) + c.derefInNeeded(node.Left) c.compile(node.Right) + c.derefInNeeded(node.Right) c.emit(OpExponent) case "in": c.compile(node.Left) + c.derefInNeeded(node.Left) c.compile(node.Right) + c.derefInNeeded(node.Right) c.emit(OpIn) case "matches": if node.Regexp != nil { c.compile(node.Left) + c.derefInNeeded(node.Left) c.emit(OpMatchesConst, c.addConstant(node.Regexp)) } else { c.compile(node.Left) + c.derefInNeeded(node.Left) c.compile(node.Right) + c.derefInNeeded(node.Right) c.emit(OpMatches) } case "contains": c.compile(node.Left) + c.derefInNeeded(node.Left) c.compile(node.Right) + c.derefInNeeded(node.Right) c.emit(OpContains) case "startsWith": c.compile(node.Left) + c.derefInNeeded(node.Left) c.compile(node.Right) + c.derefInNeeded(node.Right) c.emit(OpStartsWith) case "endsWith": c.compile(node.Left) + c.derefInNeeded(node.Left) c.compile(node.Right) + c.derefInNeeded(node.Right) c.emit(OpEndsWith) case "..": c.compile(node.Left) + c.derefInNeeded(node.Left) c.compile(node.Right) + c.derefInNeeded(node.Right) c.emit(OpRange) case "??": c.compile(node.Left) + c.derefInNeeded(node.Left) end := c.emit(OpJumpIfNotNil, placeholder) c.emit(OpPop) c.compile(node.Right) + c.derefInNeeded(node.Right) c.patchJump(end) default: @@ -457,7 +500,6 @@ func (c *compiler) MemberNode(node *ast.MemberNode) { return } op := OpFetch - original := node index := node.FieldIndex path := []string{node.Name} base := node.Node @@ -466,21 +508,15 @@ func (c *compiler) MemberNode(node *ast.MemberNode) { for !node.Optional { ident, ok := base.(*ast.IdentifierNode) if ok && len(ident.FieldIndex) > 0 { - if ident.Deref { - panic("IdentifierNode should not be dereferenced") - } index = append(ident.FieldIndex, index...) path = append([]string{ident.Value}, path...) c.emitLocation(ident.Location(), OpLoadField, c.addConstant( &runtime.Field{Index: index, Path: path}, )) - goto deref + return } member, ok := base.(*ast.MemberNode) if ok && len(member.FieldIndex) > 0 { - if member.Deref { - panic("MemberNode should not be dereferenced") - } index = append(member.FieldIndex, index...) path = append([]string{member.Name}, path...) node = member @@ -505,13 +541,6 @@ func (c *compiler) MemberNode(node *ast.MemberNode) { &runtime.Field{Index: index, Path: path}, )) } - -deref: - if original.Deref { - c.emit(OpDeref) - } else if original.Type() == nil { - c.emit(OpDeref) - } } func (c *compiler) SliceNode(node *ast.SliceNode) { @@ -730,6 +759,13 @@ func (c *compiler) PairNode(node *ast.PairNode) { c.compile(node.Value) } +func (c *compiler) derefInNeeded(node ast.Node) { + switch kind(node) { + case reflect.Ptr, reflect.Interface: + c.emit(OpDeref) + } +} + func kind(node ast.Node) reflect.Kind { t := node.Type() if t == nil { diff --git a/vendor/github.com/antonmedv/expr/conf/types_table.go b/vendor/github.com/antonmedv/expr/conf/types_table.go index e917f5fa8..f4d401c9c 100644 --- a/vendor/github.com/antonmedv/expr/conf/types_table.go +++ b/vendor/github.com/antonmedv/expr/conf/types_table.go @@ -54,6 +54,9 @@ func CreateTypesTable(i interface{}) TypesTable { for _, key := range v.MapKeys() { value := v.MapIndex(key) if key.Kind() == reflect.String && value.IsValid() && value.CanInterface() { + if key.String() == "env" { // Could check for all keywords here + panic("attempt to misuse env keyword as env map key") + } types[key.String()] = Tag{Type: reflect.TypeOf(value.Interface())} } } @@ -94,10 +97,13 @@ func FieldsFromStruct(t reflect.Type) TypesTable { } } } - - types[FieldName(f)] = Tag{ - Type: f.Type, - FieldIndex: f.Index, + if fn := FieldName(f); fn == "env" { // Could check for all keywords here + panic("attempt to misuse env keyword as env struct field tag") + } else { + types[FieldName(f)] = Tag{ + Type: f.Type, + FieldIndex: f.Index, + } } } } diff --git a/vendor/github.com/antonmedv/expr/expr.go b/vendor/github.com/antonmedv/expr/expr.go index 14f6af285..6aa674fb5 100644 --- a/vendor/github.com/antonmedv/expr/expr.go +++ b/vendor/github.com/antonmedv/expr/expr.go @@ -151,15 +151,10 @@ func Compile(input string, ops ...Option) (*vm.Program, error) { _, _ = checker.Check(tree, config) ast.Walk(&tree.Node, v) } - _, err = checker.Check(tree, config) - if err != nil { - return nil, err - } - } else { - _, err = checker.Check(tree, config) - if err != nil { - return nil, err - } + } + _, err = checker.Check(tree, config) + if err != nil { + return nil, err } if config.Optimize { diff --git a/vendor/github.com/antonmedv/expr/file/error.go b/vendor/github.com/antonmedv/expr/file/error.go index 1e7e81b94..edf202b04 100644 --- a/vendor/github.com/antonmedv/expr/file/error.go +++ b/vendor/github.com/antonmedv/expr/file/error.go @@ -10,7 +10,7 @@ type Error struct { Location Message string Snippet string - Prev error + Prev error } func (e *Error) Error() string { @@ -45,7 +45,6 @@ func (e *Error) Bind(source *Source) *Error { return e } - func (e *Error) Unwrap() error { return e.Prev } @@ -54,7 +53,6 @@ func (e *Error) Wrap(err error) { e.Prev = err } - func (e *Error) format() string { if e.Location.Empty() { return e.Message diff --git a/vendor/github.com/antonmedv/expr/vm/opcodes.go b/vendor/github.com/antonmedv/expr/vm/opcodes.go index b3117e73c..4bc9abf4d 100644 --- a/vendor/github.com/antonmedv/expr/vm/opcodes.go +++ b/vendor/github.com/antonmedv/expr/vm/opcodes.go @@ -3,7 +3,8 @@ package vm type Opcode byte const ( - OpPush Opcode = iota + OpInvalid Opcode = iota + OpPush OpPushInt OpPop OpLoadConst @@ -11,6 +12,7 @@ const ( OpLoadFast OpLoadMethod OpLoadFunc + OpLoadEnv OpFetch OpFetchField OpMethod diff --git a/vendor/github.com/antonmedv/expr/vm/program.go b/vendor/github.com/antonmedv/expr/vm/program.go index d424df14f..936eca9a2 100644 --- a/vendor/github.com/antonmedv/expr/vm/program.go +++ b/vendor/github.com/antonmedv/expr/vm/program.go @@ -73,6 +73,9 @@ func (program *Program) Disassemble() string { } switch op { + case OpInvalid: + code("OpInvalid") + case OpPush: constant("OpPush") @@ -97,6 +100,9 @@ func (program *Program) Disassemble() string { case OpLoadFunc: argument("OpLoadFunc") + case OpLoadEnv: + code("OpLoadEnv") + case OpFetch: code("OpFetch") @@ -270,7 +276,7 @@ func (program *Program) Disassemble() string { code("OpEnd") default: - _, _ = fmt.Fprintf(w, "%v\t%#x\n", ip, op) + _, _ = fmt.Fprintf(w, "%v\t%#x (unknown)\n", ip, op) } } _ = w.Flush() diff --git a/vendor/github.com/antonmedv/expr/vm/runtime/runtime.go b/vendor/github.com/antonmedv/expr/vm/runtime/runtime.go index b2eeb65d8..75ac94bf8 100644 --- a/vendor/github.com/antonmedv/expr/vm/runtime/runtime.go +++ b/vendor/github.com/antonmedv/expr/vm/runtime/runtime.go @@ -153,13 +153,15 @@ func Deref(i interface{}) interface{} { v = v.Elem() } - if v.Kind() == reflect.Ptr { +loop: + for v.Kind() == reflect.Ptr { if v.IsNil() { return i } indirect := reflect.Indirect(v) switch indirect.Kind() { case reflect.Struct, reflect.Map, reflect.Array, reflect.Slice: + break loop default: v = v.Elem() } diff --git a/vendor/github.com/antonmedv/expr/vm/vm.go b/vendor/github.com/antonmedv/expr/vm/vm.go index af4fc5bf7..ec22c251d 100644 --- a/vendor/github.com/antonmedv/expr/vm/vm.go +++ b/vendor/github.com/antonmedv/expr/vm/vm.go @@ -93,6 +93,9 @@ func (vm *VM) Run(program *Program, env interface{}) (_ interface{}, err error) switch op { + case OpInvalid: + panic("invalid opcode") + case OpPush: vm.push(program.Constants[arg]) @@ -123,6 +126,9 @@ func (vm *VM) Run(program *Program, env interface{}) (_ interface{}, err error) a := vm.pop() vm.push(runtime.FetchField(a, program.Constants[arg].(*runtime.Field))) + case OpLoadEnv: + vm.push(env) + case OpMethod: a := vm.pop() vm.push(runtime.FetchMethod(a, program.Constants[arg].(*runtime.Method))) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index ff1f64435..d7ca9fbf5 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -705,6 +705,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -13724,6 +13727,9 @@ var awsPartition = partition{ endpointKey{ Region: "af-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, @@ -13766,6 +13772,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -13775,6 +13784,9 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -19765,6 +19777,24 @@ var awsPartition = partition{ Region: "us-west-2", }, }, + endpointKey{ + Region: "aws-global", + Variant: fipsVariant, + }: endpoint{ + Hostname: "networkmanager-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "fips-aws-global", + }: endpoint{ + Hostname: "networkmanager-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, }, }, "nimble": service{ @@ -24680,6 +24710,9 @@ var awsPartition = partition{ }, "schemas": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-east-1", }: endpoint{}, @@ -24689,6 +24722,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -24698,15 +24734,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -24716,6 +24764,12 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -25030,6 +25084,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -29513,9 +29570,21 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -33008,6 +33077,16 @@ var awscnPartition = partition{ }, }, }, + "schemas": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "secretsmanager": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -37353,6 +37432,24 @@ var awsusgovPartition = partition{ Region: "us-gov-west-1", }, }, + endpointKey{ + Region: "aws-us-gov-global", + Variant: fipsVariant, + }: endpoint{ + Hostname: "networkmanager.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "fips-aws-us-gov-global", + }: endpoint{ + Hostname: "networkmanager.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, }, }, "oidc": service{ diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index 9d9173626..a0203e758 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.44.320" +const SDKVersion = "1.44.323" diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go index f9de38875..c1ddd39b4 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go @@ -187004,6 +187004,51 @@ const ( // InstanceTypeC7gn16xlarge is a InstanceType enum value InstanceTypeC7gn16xlarge = "c7gn.16xlarge" + + // InstanceTypeP548xlarge is a InstanceType enum value + InstanceTypeP548xlarge = "p5.48xlarge" + + // InstanceTypeM7iLarge is a InstanceType enum value + InstanceTypeM7iLarge = "m7i.large" + + // InstanceTypeM7iXlarge is a InstanceType enum value + InstanceTypeM7iXlarge = "m7i.xlarge" + + // InstanceTypeM7i2xlarge is a InstanceType enum value + InstanceTypeM7i2xlarge = "m7i.2xlarge" + + // InstanceTypeM7i4xlarge is a InstanceType enum value + InstanceTypeM7i4xlarge = "m7i.4xlarge" + + // InstanceTypeM7i8xlarge is a InstanceType enum value + InstanceTypeM7i8xlarge = "m7i.8xlarge" + + // InstanceTypeM7i12xlarge is a InstanceType enum value + InstanceTypeM7i12xlarge = "m7i.12xlarge" + + // InstanceTypeM7i16xlarge is a InstanceType enum value + InstanceTypeM7i16xlarge = "m7i.16xlarge" + + // InstanceTypeM7i24xlarge is a InstanceType enum value + InstanceTypeM7i24xlarge = "m7i.24xlarge" + + // InstanceTypeM7i48xlarge is a InstanceType enum value + InstanceTypeM7i48xlarge = "m7i.48xlarge" + + // InstanceTypeM7iFlexLarge is a InstanceType enum value + InstanceTypeM7iFlexLarge = "m7i-flex.large" + + // InstanceTypeM7iFlexXlarge is a InstanceType enum value + InstanceTypeM7iFlexXlarge = "m7i-flex.xlarge" + + // InstanceTypeM7iFlex2xlarge is a InstanceType enum value + InstanceTypeM7iFlex2xlarge = "m7i-flex.2xlarge" + + // InstanceTypeM7iFlex4xlarge is a InstanceType enum value + InstanceTypeM7iFlex4xlarge = "m7i-flex.4xlarge" + + // InstanceTypeM7iFlex8xlarge is a InstanceType enum value + InstanceTypeM7iFlex8xlarge = "m7i-flex.8xlarge" ) // InstanceType_Values returns all elements of the InstanceType enum @@ -187674,6 +187719,21 @@ func InstanceType_Values() []string { InstanceTypeC7gn8xlarge, InstanceTypeC7gn12xlarge, InstanceTypeC7gn16xlarge, + InstanceTypeP548xlarge, + InstanceTypeM7iLarge, + InstanceTypeM7iXlarge, + InstanceTypeM7i2xlarge, + InstanceTypeM7i4xlarge, + InstanceTypeM7i8xlarge, + InstanceTypeM7i12xlarge, + InstanceTypeM7i16xlarge, + InstanceTypeM7i24xlarge, + InstanceTypeM7i48xlarge, + InstanceTypeM7iFlexLarge, + InstanceTypeM7iFlexXlarge, + InstanceTypeM7iFlex2xlarge, + InstanceTypeM7iFlex4xlarge, + InstanceTypeM7iFlex8xlarge, } } diff --git a/vendor/github.com/digitalocean/godo/CHANGELOG.md b/vendor/github.com/digitalocean/godo/CHANGELOG.md index 23407fe80..7233c389b 100644 --- a/vendor/github.com/digitalocean/godo/CHANGELOG.md +++ b/vendor/github.com/digitalocean/godo/CHANGELOG.md @@ -1,5 +1,10 @@ # Change Log +## [v1.98.0] - 2023-03-09 + +- #608 - @anitgandhi - client: don't process body upon 204 response +- #607 - @gregmankes - add apps rewrites/redirects to app spec + ## [v1.97.0] - 2023-02-10 - #601 - @jcodybaker - APPS-6813: update app platform - pending_deployment + timing diff --git a/vendor/github.com/digitalocean/godo/apps.gen.go b/vendor/github.com/digitalocean/godo/apps.gen.go index 221332c72..8bb88858d 100644 --- a/vendor/github.com/digitalocean/godo/apps.gen.go +++ b/vendor/github.com/digitalocean/godo/apps.gen.go @@ -162,6 +162,8 @@ type AppBuildConfig struct { type AppBuildConfigCNBVersioning struct { // List of versioned buildpacks used for the application. Buildpacks are only versioned based on the major semver version, therefore exact versions will not be available at the app build config. Buildpacks []*Buildpack `json:"buildpacks,omitempty"` + // A version id that represents the underlying CNB stack. The version of the stack indicates what buildpacks are supported. + StackID string `json:"stack_id,omitempty"` } // AppDatabaseSpec struct for AppDatabaseSpec @@ -239,10 +241,12 @@ type AppFunctionsSpec struct { CORS *AppCORSPolicy `json:"cors,omitempty"` } -// AppIngressSpec struct for AppIngressSpec +// AppIngressSpec Specification for app ingress configurations. type AppIngressSpec struct { LoadBalancer AppIngressSpecLoadBalancer `json:"load_balancer,omitempty"` LoadBalancerSize int64 `json:"load_balancer_size,omitempty"` + // Rules for configuring HTTP ingress for component routes, CORS, rewrites, and redirects. + Rules []*AppIngressSpecRule `json:"rules,omitempty"` } // AppIngressSpecLoadBalancer the model 'AppIngressSpecLoadBalancer' @@ -254,6 +258,49 @@ const ( AppIngressSpecLoadBalancer_DigitalOcean AppIngressSpecLoadBalancer = "DIGITALOCEAN" ) +// AppIngressSpecRule A rule that configures component routes, rewrites, redirects and cors. +type AppIngressSpecRule struct { + Match *AppIngressSpecRuleMatch `json:"match,omitempty"` + Component *AppIngressSpecRuleRoutingComponent `json:"component,omitempty"` + Redirect *AppIngressSpecRuleRoutingRedirect `json:"redirect,omitempty"` + CORS *AppCORSPolicy `json:"cors,omitempty"` +} + +// AppIngressSpecRuleMatch The match configuration for a rule. +type AppIngressSpecRuleMatch struct { + Path *AppIngressSpecRuleStringMatch `json:"path,omitempty"` +} + +// AppIngressSpecRuleRoutingComponent The component routing configuration. +type AppIngressSpecRuleRoutingComponent struct { + // The name of the component to route to. + Name string `json:"name,omitempty"` + // An optional flag to preserve the path that is forwarded to the backend service. By default, the HTTP request path will be trimmed from the left when forwarded to the component. For example, a component with `path=/api` will have requests to `/api/list` trimmed to `/list`. If this value is `true`, the path will remain `/api/list`. Note: this is not applicable for Functions Components and is mutually exclusive with `rewrite`. + PreservePathPrefix bool `json:"preserve_path_prefix,omitempty"` + // An optional field that will rewrite the path of the component to be what is specified here. By default, the HTTP request path will be trimmed from the left when forwarded to the component. For example, a component with `path=/api` will have requests to `/api/list` trimmed to `/list`. If you specified the rewrite to be `/v1/`, requests to `/api/list` would be rewritten to `/v1/list`. Note: this is mutually exclusive with `preserve_path_prefix`. + Rewrite string `json:"rewrite,omitempty"` +} + +// AppIngressSpecRuleRoutingRedirect The redirect routing configuration. +type AppIngressSpecRuleRoutingRedirect struct { + // An optional URI path to redirect to. Note: if this is specified the whole URI of the original request will be overwritten to this value, irrespective of the original request URI being matched. + Uri string `json:"uri,omitempty"` + // The authority/host to redirect to. This can be a hostname or IP address. Note: use `port` to set the port. + Authority string `json:"authority,omitempty"` + // The port to redirect to. + Port int64 `json:"port,omitempty"` + // The scheme to redirect to. Supported values are `http` or `https`. Default: `https`. + Scheme string `json:"scheme,omitempty"` + // The redirect code to use. Defaults to `302`. Supported values are 300, 301, 302, 303, 304, 305, 307, 308. + RedirectCode int64 `json:"redirect_code,omitempty"` +} + +// AppIngressSpecRuleStringMatch The string match configuration. +type AppIngressSpecRuleStringMatch struct { + // Prefix-based match. For example, `/api` will match `/api`, `/api/`, and any nested paths such as `/api/v1/endpoint`. + Prefix string `json:"prefix,omitempty"` +} + // AppJobSpec struct for AppJobSpec type AppJobSpec struct { // The name. Must be unique across all components within the same app. @@ -973,6 +1020,12 @@ const ( AppInstanceSizeCPUType_Dedicated AppInstanceSizeCPUType = "DEDICATED" ) +// ListBuildpacksResponse struct for ListBuildpacksResponse +type ListBuildpacksResponse struct { + // List of the available buildpacks on App Platform. + Buildpacks []*Buildpack `json:"buildpacks,omitempty"` +} + // AppProposeRequest struct for AppProposeRequest type AppProposeRequest struct { Spec *AppSpec `json:"spec"` @@ -982,22 +1035,22 @@ type AppProposeRequest struct { // AppProposeResponse struct for AppProposeResponse type AppProposeResponse struct { - // Deprecated. Please use AppIsStarter instead. + // Deprecated. Please use app_is_starter instead. AppIsStatic bool `json:"app_is_static,omitempty"` // Indicates whether the app name is available. AppNameAvailable bool `json:"app_name_available,omitempty"` // If the app name is unavailable, this will be set to a suggested available name. AppNameSuggestion string `json:"app_name_suggestion,omitempty"` - // Deprecated. Please use ExistingStarterApps instead. + // Deprecated. Please use existing_starter_apps instead. ExistingStaticApps string `json:"existing_static_apps,omitempty"` - // Deprecated. Please use MaxFreeStarterApps instead. + // Deprecated. Please use max_free_starter_apps instead. MaxFreeStaticApps string `json:"max_free_static_apps,omitempty"` Spec *AppSpec `json:"spec,omitempty"` // The monthly cost of the proposed app in USD. AppCost float32 `json:"app_cost,omitempty"` - // The monthly cost of the proposed app in USD using the next pricing plan tier. For example, if you propose an app that uses the Basic tier, the `AppTierUpgradeCost` field displays the monthly cost of the app if it were to use the Professional tier. If the proposed app already uses the most expensive tier, the field is empty. + // The monthly cost of the proposed app in USD using the next pricing plan tier. For example, if you propose an app that uses the Basic tier, the `app_tier_upgrade_cost` field displays the monthly cost of the app if it were to use the Professional tier. If the proposed app already uses the most expensive tier, the field is empty. AppTierUpgradeCost float32 `json:"app_tier_upgrade_cost,omitempty"` - // The monthly cost of the proposed app in USD using the previous pricing plan tier. For example, if you propose an app that uses the Professional tier, the `AppTierDowngradeCost` field displays the monthly cost of the app if it were to use the Basic tier. If the proposed app already uses the lest expensive tier, the field is empty. + // The monthly cost of the proposed app in USD using the previous pricing plan tier. For example, if you propose an app that uses the Professional tier, the `app_tier_downgrade_cost` field displays the monthly cost of the app if it were to use the Basic tier. If the proposed app already uses the lest expensive tier, the field is empty. AppTierDowngradeCost float32 `json:"app_tier_downgrade_cost,omitempty"` // The number of existing starter tier apps the account has. ExistingStarterApps string `json:"existing_starter_apps,omitempty"` diff --git a/vendor/github.com/digitalocean/godo/apps_accessors.go b/vendor/github.com/digitalocean/godo/apps_accessors.go index d3692a189..82bba4347 100644 --- a/vendor/github.com/digitalocean/godo/apps_accessors.go +++ b/vendor/github.com/digitalocean/godo/apps_accessors.go @@ -366,6 +366,14 @@ func (a *AppBuildConfigCNBVersioning) GetBuildpacks() []*Buildpack { return a.Buildpacks } +// GetStackID returns the StackID field. +func (a *AppBuildConfigCNBVersioning) GetStackID() string { + if a == nil { + return "" + } + return a.StackID +} + // GetAllowCredentials returns the AllowCredentials field. func (a *AppCORSPolicy) GetAllowCredentials() bool { if a == nil { @@ -798,6 +806,126 @@ func (a *AppIngressSpec) GetLoadBalancerSize() int64 { return a.LoadBalancerSize } +// GetRules returns the Rules field. +func (a *AppIngressSpec) GetRules() []*AppIngressSpecRule { + if a == nil { + return nil + } + return a.Rules +} + +// GetComponent returns the Component field. +func (a *AppIngressSpecRule) GetComponent() *AppIngressSpecRuleRoutingComponent { + if a == nil { + return nil + } + return a.Component +} + +// GetCORS returns the CORS field. +func (a *AppIngressSpecRule) GetCORS() *AppCORSPolicy { + if a == nil { + return nil + } + return a.CORS +} + +// GetMatch returns the Match field. +func (a *AppIngressSpecRule) GetMatch() *AppIngressSpecRuleMatch { + if a == nil { + return nil + } + return a.Match +} + +// GetRedirect returns the Redirect field. +func (a *AppIngressSpecRule) GetRedirect() *AppIngressSpecRuleRoutingRedirect { + if a == nil { + return nil + } + return a.Redirect +} + +// GetPath returns the Path field. +func (a *AppIngressSpecRuleMatch) GetPath() *AppIngressSpecRuleStringMatch { + if a == nil { + return nil + } + return a.Path +} + +// GetName returns the Name field. +func (a *AppIngressSpecRuleRoutingComponent) GetName() string { + if a == nil { + return "" + } + return a.Name +} + +// GetPreservePathPrefix returns the PreservePathPrefix field. +func (a *AppIngressSpecRuleRoutingComponent) GetPreservePathPrefix() bool { + if a == nil { + return false + } + return a.PreservePathPrefix +} + +// GetRewrite returns the Rewrite field. +func (a *AppIngressSpecRuleRoutingComponent) GetRewrite() string { + if a == nil { + return "" + } + return a.Rewrite +} + +// GetAuthority returns the Authority field. +func (a *AppIngressSpecRuleRoutingRedirect) GetAuthority() string { + if a == nil { + return "" + } + return a.Authority +} + +// GetPort returns the Port field. +func (a *AppIngressSpecRuleRoutingRedirect) GetPort() int64 { + if a == nil { + return 0 + } + return a.Port +} + +// GetRedirectCode returns the RedirectCode field. +func (a *AppIngressSpecRuleRoutingRedirect) GetRedirectCode() int64 { + if a == nil { + return 0 + } + return a.RedirectCode +} + +// GetScheme returns the Scheme field. +func (a *AppIngressSpecRuleRoutingRedirect) GetScheme() string { + if a == nil { + return "" + } + return a.Scheme +} + +// GetUri returns the Uri field. +func (a *AppIngressSpecRuleRoutingRedirect) GetUri() string { + if a == nil { + return "" + } + return a.Uri +} + +// GetPrefix returns the Prefix field. +func (a *AppIngressSpecRuleStringMatch) GetPrefix() string { + if a == nil { + return "" + } + return a.Prefix +} + // GetCPUs returns the CPUs field. func (a *AppInstanceSize) GetCPUs() string { if a == nil { @@ -902,6 +1030,14 @@ func (a *AppJobSpec) GetDockerfilePath() string { return a.DockerfilePath } +// GetEnvironmentSlug returns the EnvironmentSlug field. +func (a *AppJobSpec) GetEnvironmentSlug() string { + if a == nil { + return "" + } + return a.EnvironmentSlug +} + // GetEnvs returns the Envs field. func (a *AppJobSpec) GetEnvs() []*AppVariableDefinition { if a == nil { @@ -1326,6 +1462,14 @@ func (a *AppServiceSpec) GetDockerfilePath() string { return a.DockerfilePath } +// GetEnvironmentSlug returns the EnvironmentSlug field. +func (a *AppServiceSpec) GetEnvironmentSlug() string { + if a == nil { + return "" + } + return a.EnvironmentSlug +} + // GetEnvs returns the Envs field. func (a *AppServiceSpec) GetEnvs() []*AppVariableDefinition { if a == nil { @@ -1646,6 +1790,14 @@ func (a *AppStaticSiteSpec) GetDockerfilePath() string { return a.DockerfilePath } +// GetEnvironmentSlug returns the EnvironmentSlug field. +func (a *AppStaticSiteSpec) GetEnvironmentSlug() string { + if a == nil { + return "" + } + return a.EnvironmentSlug +} + // GetEnvs returns the Envs field. func (a *AppStaticSiteSpec) GetEnvs() []*AppVariableDefinition { if a == nil { @@ -1838,6 +1990,14 @@ func (a *AppWorkerSpec) GetDockerfilePath() string { return a.DockerfilePath } +// GetEnvironmentSlug returns the EnvironmentSlug field. +func (a *AppWorkerSpec) GetEnvironmentSlug() string { + if a == nil { + return "" + } + return a.EnvironmentSlug +} + // GetEnvs returns the Envs field. func (a *AppWorkerSpec) GetEnvs() []*AppVariableDefinition { if a == nil { @@ -2726,6 +2886,14 @@ func (d *DetectResponseComponent) GetDockerfiles() []string { return d.Dockerfiles } +// GetEnvironmentSlug returns the EnvironmentSlug field. +func (d *DetectResponseComponent) GetEnvironmentSlug() string { + if d == nil { + return "" + } + return d.EnvironmentSlug +} + // GetEnvVars returns the EnvVars field. func (d *DetectResponseComponent) GetEnvVars() []*AppVariableDefinition { if d == nil { @@ -2966,6 +3134,14 @@ func (i *ImageSourceSpecDeployOnPush) GetEnabled() bool { return i.Enabled } +// GetBuildpacks returns the Buildpacks field. +func (l *ListBuildpacksResponse) GetBuildpacks() []*Buildpack { + if l == nil { + return nil + } + return l.Buildpacks +} + // GetAffectedComponents returns the AffectedComponents field. func (u *UpgradeBuildpackResponse) GetAffectedComponents() []string { if u == nil { diff --git a/vendor/github.com/digitalocean/godo/godo.go b/vendor/github.com/digitalocean/godo/godo.go index 2523f6617..14fac268c 100644 --- a/vendor/github.com/digitalocean/godo/godo.go +++ b/vendor/github.com/digitalocean/godo/godo.go @@ -21,7 +21,7 @@ import ( ) const ( - libraryVersion = "1.97.0" + libraryVersion = "1.98.0" defaultBaseURL = "https://api.digitalocean.com/" userAgent = "godo/" + libraryVersion mediaType = "application/json" @@ -442,7 +442,7 @@ func (c *Client) Do(ctx context.Context, req *http.Request, v interface{}) (*Res return response, err } - if v != nil { + if resp.StatusCode != http.StatusNoContent && v != nil { if w, ok := v.(io.Writer); ok { _, err = io.Copy(w, resp.Body) if err != nil { diff --git a/vendor/github.com/gophercloud/gophercloud/CHANGELOG.md b/vendor/github.com/gophercloud/gophercloud/CHANGELOG.md index 63a956bf1..9e2567b98 100644 --- a/vendor/github.com/gophercloud/gophercloud/CHANGELOG.md +++ b/vendor/github.com/gophercloud/gophercloud/CHANGELOG.md @@ -1,3 +1,34 @@ +## v1.3.0 (2023-03-28) + +* [GH-2464](https://github.com/gophercloud/gophercloud/pull/2464) keystone: add v3 limits create operation +* [GH-2512](https://github.com/gophercloud/gophercloud/pull/2512) Manila: add List for share-access-rules API +* [GH-2529](https://github.com/gophercloud/gophercloud/pull/2529) Added target state "rebuild" for Ironic nodes +* [GH-2539](https://github.com/gophercloud/gophercloud/pull/2539) Add release instructions +* [GH-2540](https://github.com/gophercloud/gophercloud/pull/2540) [all] IsEmpty to check for HTTP status 204 +* [GH-2543](https://github.com/gophercloud/gophercloud/pull/2543) keystone: add v3 OS-FEDERATION mappings get operation +* [GH-2545](https://github.com/gophercloud/gophercloud/pull/2545) baremetal: add inspection_{started,finished}_at to Node +* [GH-2546](https://github.com/gophercloud/gophercloud/pull/2546) Drop train job for baremetal +* [GH-2549](https://github.com/gophercloud/gophercloud/pull/2549) objects: Clarify ExtractContent usage +* [GH-2550](https://github.com/gophercloud/gophercloud/pull/2550) keystone: add v3 OS-FEDERATION mappings update operation +* [GH-2552](https://github.com/gophercloud/gophercloud/pull/2552) objectstorage: Reject container names with a slash +* [GH-2555](https://github.com/gophercloud/gophercloud/pull/2555) nova: introduce servers.ListSimple along with the more detailed servers.List +* [GH-2558](https://github.com/gophercloud/gophercloud/pull/2558) Expand docs on 'clientconfig' usage +* [GH-2563](https://github.com/gophercloud/gophercloud/pull/2563) Support propagate_uplink_status for Ports +* [GH-2567](https://github.com/gophercloud/gophercloud/pull/2567) Fix invalid baremetal-introspection service type +* [GH-2568](https://github.com/gophercloud/gophercloud/pull/2568) Prefer github mirrors over opendev repos +* [GH-2571](https://github.com/gophercloud/gophercloud/pull/2571) Swift V1: support object versioning +* [GH-2572](https://github.com/gophercloud/gophercloud/pull/2572) networking v2: add extraroutes Add and Remove methods +* [GH-2573](https://github.com/gophercloud/gophercloud/pull/2573) Enable tests for object versioning +* [GH-2576](https://github.com/gophercloud/gophercloud/pull/2576) keystone: add v3 OS-FEDERATION mappings delete operation +* [GH-2578](https://github.com/gophercloud/gophercloud/pull/2578) Add periodic jobs for OpenStack zed release and reduce periodic jobs frequency +* [GH-2580](https://github.com/gophercloud/gophercloud/pull/2580) [neutron v2]: Add support for network segments update +* [GH-2583](https://github.com/gophercloud/gophercloud/pull/2583) Add missing rule protocol constants for IPIP +* [GH-2584](https://github.com/gophercloud/gophercloud/pull/2584) CI: workaround mongodb dependency for messaging and clustering master jobs +* [GH-2587](https://github.com/gophercloud/gophercloud/pull/2587) fix: Incorrect Documentation +* [GH-2593](https://github.com/gophercloud/gophercloud/pull/2593) Make TestMTUNetworkCRUDL deterministic +* [GH-2594](https://github.com/gophercloud/gophercloud/pull/2594) Bump actions/setup-go from 3 to 4 + + ## v1.2.0 (2023-01-27) Starting with this version, Gophercloud sends its actual version in the diff --git a/vendor/github.com/gophercloud/gophercloud/README.md b/vendor/github.com/gophercloud/gophercloud/README.md index 696c2b4fd..89b08156f 100644 --- a/vendor/github.com/gophercloud/gophercloud/README.md +++ b/vendor/github.com/gophercloud/gophercloud/README.md @@ -13,7 +13,7 @@ Gophercloud is an OpenStack Go SDK. Reference a Gophercloud package in your code: -```Go +```go import "github.com/gophercloud/gophercloud" ``` @@ -28,43 +28,79 @@ go mod tidy ### Credentials Because you'll be hitting an API, you will need to retrieve your OpenStack -credentials and either store them as environment variables or in your local Go -files. The first method is recommended because it decouples credential -information from source code, allowing you to push the latter to your version -control system without any security risk. +credentials and either store them in a `clouds.yaml` file, as environment +variables, or in your local Go files. The first method is recommended because +it decouples credential information from source code, allowing you to push the +latter to your version control system without any security risk. You will need to retrieve the following: -* username -* password -* a valid Keystone identity URL +* A valid Keystone identity URL +* Credentials. These can be a username/password combo, a set of Application + Credentials, a pre-generated token, or any other supported authentication + mechanism. For users that have the OpenStack dashboard installed, there's a shortcut. If -you visit the `project/access_and_security` path in Horizon and click on the -"Download OpenStack RC File" button at the top right hand corner, you will -download a bash file that exports all of your access details to environment -variables. To execute the file, run `source admin-openrc.sh` and you will be -prompted for your password. +you visit the `project/api_access` path in Horizon and click on the +"Download OpenStack RC File" button at the top right hand corner, you can +download either a `clouds.yaml` file or an `openrc` bash file that exports all +of your access details to environment variables. To use the `clouds.yaml` file, +place it at `~/.config/openstack/clouds.yaml`. To use the `openrc` file, run +`source openrc` and you will be prompted for your password. ### Authentication -> NOTE: It is now recommended to use the `clientconfig` package found at -> https://github.com/gophercloud/utils/tree/master/openstack/clientconfig -> for all authentication purposes. -> -> The below documentation is still relevant. clientconfig simply implements -> the below and presents it in an easier and more flexible way. - Once you have access to your credentials, you can begin plugging them into -Gophercloud. The next step is authentication, and this is handled by a base -"Provider" struct. To get one, you can either pass in your credentials -explicitly, or tell Gophercloud to use environment variables: +Gophercloud. The next step is authentication, which is handled by a base +"Provider" struct. There are number of ways to construct such a struct. + +**With `gophercloud/utils`** + +The [github.com/gophercloud/utils](https://github.com/gophercloud/utils) +library provides the `clientconfig` package to simplify authentication. It +provides additional functionality, such as the ability to read `clouds.yaml` +files. To generate a "Provider" struct using the `clientconfig` package: ```go import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack" - "github.com/gophercloud/gophercloud/openstack/utils" + "github.com/gophercloud/utils/openstack/clientconfig" +) + +// You can also skip configuring this and instead set 'OS_CLOUD' in your +// environment +opts := new(clientconfig.ClientOpts) +opts.Cloud = "devstack-admin" + +provider, err := clientconfig.AuthenticatedClient(opts) +``` + +A provider client is a top-level client that all of your OpenStack service +clients derive from. The provider contains all of the authentication details +that allow your Go code to access the API - such as the base URL and token ID. + +Once we have a base Provider, we inject it as a dependency into each OpenStack +service. For example, in order to work with the Compute API, we need a Compute +service client. This can be created like so: + +```go +client, err := clientconfig.NewServiceClient("compute", opts) +``` + +**Without `gophercloud/utils`** + +> *Note* +> gophercloud doesn't provide support for `clouds.yaml` file so you need to +> implement this functionality yourself if you don't wish to use +> `gophercloud/utils`. + +You can also generate a "Provider" struct without using the `clientconfig` +package from `gophercloud/utils`. To do this, you can either pass in your +credentials explicitly or tell Gophercloud to use environment variables: + +```go +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack" ) // Option 1: Pass in the values yourself @@ -85,34 +121,29 @@ Once you have the `opts` variable, you can pass it in and get back a provider, err := openstack.AuthenticatedClient(opts) ``` -The `ProviderClient` is the top-level client that all of your OpenStack services -derive from. The provider contains all of the authentication details that allow -your Go code to access the API - such as the base URL and token ID. - -### Provision a server - -Once we have a base Provider, we inject it as a dependency into each OpenStack -service. In order to work with the Compute API, we need a Compute service -client; which can be created like so: +As above, you can then use this provider client to generate a service client +for a particular OpenStack service: ```go client, err := openstack.NewComputeV2(provider, gophercloud.EndpointOpts{ - Region: os.Getenv("OS_REGION_NAME"), + Region: os.Getenv("OS_REGION_NAME"), }) ``` -We then use this `client` for any Compute API operation we want. In our case, -we want to provision a new server - so we invoke the `Create` method and pass -in the flavor ID (hardware specification) and image ID (operating system) we're -interested in: +### Provision a server + +We can use the Compute service client generated above for any Compute API +operation we want. In our case, we want to provision a new server. To do this, +we invoke the `Create` method and pass in the flavor ID (hardware +specification) and image ID (operating system) we're interested in: ```go import "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" server, err := servers.Create(client, servers.CreateOpts{ - Name: "My new server!", - FlavorRef: "flavor_id", - ImageRef: "image_id", + Name: "My new server!", + FlavorRef: "flavor_id", + ImageRef: "image_id", }).Extract() ``` @@ -130,6 +161,8 @@ Gophercloud versioning follows [semver](https://semver.org/spec/v2.0.0.html). Before `v1.0.0`, there were no guarantees. Starting with v1, there will be no breaking changes within a major release. +See the [Release instructions](./RELEASE.md). + ## Contributing See the [contributing guide](./.github/CONTRIBUTING.md). diff --git a/vendor/github.com/gophercloud/gophercloud/RELEASE.md b/vendor/github.com/gophercloud/gophercloud/RELEASE.md new file mode 100644 index 000000000..6490ed887 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/RELEASE.md @@ -0,0 +1,79 @@ +# Gophercloud release + +## Contributions + +### The semver label + +Gophercloud follows [semver](https://semver.org/). + +Each Pull request must have a label indicating its impact on the API: +* `semver:patch` for changes that don't impact the API +* `semver:minor` for changes that impact the API in a backwards-compatible fashion +* `semver:major` for changes that introduce a breaking change in the API + +Automation prevents merges if the label is not present. + +### Metadata + +The release notes for a given release are generated based on the PR title: make +sure that the PR title is descriptive. + +## Release of a new version + +Requirements: +* [`gh`](https://github.com/cli/cli) +* [`jq`](https://stedolan.github.io/jq/) + +### Step 1: Collect all PRs since the last release + +Supposing that the base release is `v1.2.0`: + +``` +for commit_sha in $(git log --pretty=format:"%h" v1.2.0..HEAD); do + gh pr list --search "$commit_sha" --state merged --json number,title,labels,url +done | jq '.[]' | jq --slurp 'unique_by(.number)' > prs.json +``` + +This JSON file will be useful later. + +### Step 2: Determine the version + +In order to determine the version of the next release, we first check that no incompatible change is detected in the code that has been merged since the last release. This step can be automated with the `gorelease` tool: + +```shell +gorelease | grep -B2 -A0 '^## incompatible changes' +``` + +If the tool detects incompatible changes outside a `testing` package, then the bump is major. + +Next, we check all PRs merged since the last release using the file `prs.json` that we generated above. + +* Find PRs labeled with `semver:major`: `jq 'map(select(contains({labels: [{name: "semver:major"}]}) ))' prs.json` +* Find PRs labeled with `semver:minor`: `jq 'map(select(contains({labels: [{name: "semver:minor"}]}) ))' prs.json` + +The highest semver descriptor determines the release bump. + +### Step 3: Release notes and version string + +Once all PRs have a sensible title, generate the release notes: + +```shell +jq -r '.[] | "* [GH-\(.number)](\(.url)) \(.title)"' prs.json +``` + +Add that to the top of `CHANGELOG.md`. Also add any information that could be useful to consumers willing to upgrade. + +**Set the new version string in the `DefaultUserAgent` constant in `provider_client.go`.** + +Create a PR with these two changes. The new PR should be labeled with the semver label corresponding to the type of bump. + +### Step 3: Git tag and Github release + +The Go mod system relies on Git tags. In order to simulate a review mechanism, we rely on Github to create the tag through the Release mechanism. + +* [Prepare a new release](https://github.com/gophercloud/gophercloud/releases/new) +* Let Github generate the release notes by clicking on Generate release notes +* Click on **Save draft** +* Ask another Gophercloud maintainer to review and publish the release + +_Note: never change a release or force-push a tag. Tags are almost immediately picked up by the Go proxy and changing the commit it points to will be detected as tampering._ diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/auth_env.go b/vendor/github.com/gophercloud/gophercloud/openstack/auth_env.go index c801de555..7c6d06f0c 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/auth_env.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/auth_env.go @@ -46,6 +46,7 @@ func AuthOptionsFromEnv() (gophercloud.AuthOptions, error) { applicationCredentialID := os.Getenv("OS_APPLICATION_CREDENTIAL_ID") applicationCredentialName := os.Getenv("OS_APPLICATION_CREDENTIAL_NAME") applicationCredentialSecret := os.Getenv("OS_APPLICATION_CREDENTIAL_SECRET") + systemScope := os.Getenv("OS_SYSTEM_SCOPE") // If OS_PROJECT_ID is set, overwrite tenantID with the value. if v := os.Getenv("OS_PROJECT_ID"); v != "" { @@ -109,6 +110,13 @@ func AuthOptionsFromEnv() (gophercloud.AuthOptions, error) { } } + var scope *gophercloud.AuthScope + if systemScope == "all" { + scope = &gophercloud.AuthScope{ + System: true, + } + } + ao := gophercloud.AuthOptions{ IdentityEndpoint: authURL, UserID: userID, @@ -122,6 +130,7 @@ func AuthOptionsFromEnv() (gophercloud.AuthOptions, error) { ApplicationCredentialID: applicationCredentialID, ApplicationCredentialName: applicationCredentialName, ApplicationCredentialSecret: applicationCredentialSecret, + Scope: scope, } return ao, nil diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/client.go b/vendor/github.com/gophercloud/gophercloud/openstack/client.go index 655a9f6b9..81c907c35 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/client.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/client.go @@ -369,7 +369,7 @@ func NewBareMetalV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointO // NewBareMetalIntrospectionV1 creates a ServiceClient that may be used with the v1 // bare metal introspection package. func NewBareMetalIntrospectionV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "baremetal-inspector") + return initClientOpts(client, eo, "baremetal-introspection") } // NewObjectStorageV1 creates a ServiceClient that may be used with the v1 diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/results.go index da4e9da0e..4009b8336 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/results.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/results.go @@ -56,6 +56,10 @@ type FloatingIPPage struct { // IsEmpty determines whether or not a FloatingIPsPage is empty. func (page FloatingIPPage) IsEmpty() (bool, error) { + if page.StatusCode == 204 { + return true, nil + } + va, err := ExtractFloatingIPs(page) return len(va) == 0, err } diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors/results.go index 6c172b4ca..26113f348 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors/results.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors/results.go @@ -235,6 +235,10 @@ type HypervisorPage struct { // IsEmpty determines whether or not a HypervisorPage is empty. func (page HypervisorPage) IsEmpty() (bool, error) { + if page.StatusCode == 204 { + return true, nil + } + va, err := ExtractHypervisors(page) return len(va) == 0, err } diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/doc.go index 3b0ab7836..bab72c152 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/doc.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/doc.go @@ -11,6 +11,26 @@ Example to List Servers AllTenants: true, } + allPages, err := servers.ListSimple(computeClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allServers, err := servers.ExtractServers(allPages) + if err != nil { + panic(err) + } + + for _, server := range allServers { + fmt.Printf("%+v\n", server) + } + +Example to List Detail Servers + + listOpts := servers.ListOpts{ + AllTenants: true, + } + allPages, err := servers.List(computeClient, listOpts).AllPages() if err != nil { panic(err) diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/requests.go index 656e2de4d..d6a903aab 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/requests.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/requests.go @@ -94,7 +94,22 @@ func (opts ListOpts) ToServerListQuery() (string, error) { return q.String(), err } -// List makes a request against the API to list servers accessible to you. +// ListSimple makes a request against the API to list servers accessible to you. +func ListSimple(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(client) + if opts != nil { + query, err := opts.ToServerListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return ServerPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// List makes a request against the API to list servers details accessible to you. func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { url := listDetailURL(client) if opts != nil { diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/results.go index b92c66678..2c22a3c4d 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/results.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/results.go @@ -277,6 +277,10 @@ type ServerPage struct { // IsEmpty returns true if a page contains no Server results. func (r ServerPage) IsEmpty() (bool, error) { + if r.StatusCode == 204 { + return true, nil + } + s, err := ExtractServers(r) return len(s) == 0, err } @@ -385,6 +389,10 @@ type AddressPage struct { // IsEmpty returns true if an AddressPage contains no networks. func (r AddressPage) IsEmpty() (bool, error) { + if r.StatusCode == 204 { + return true, nil + } + addresses, err := ExtractAddresses(r) return len(addresses) == 0, err } @@ -410,6 +418,10 @@ type NetworkAddressPage struct { // IsEmpty returns true if a NetworkAddressPage contains no addresses. func (r NetworkAddressPage) IsEmpty() (bool, error) { + if r.StatusCode == 204 { + return true, nil + } + addresses, err := ExtractNetworkAddresses(r) return len(addresses) == 0, err } diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/results.go index bb6c2c6b0..2daff9840 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/results.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/results.go @@ -27,6 +27,10 @@ type TenantPage struct { // IsEmpty determines whether or not a page of Tenants contains any results. func (r TenantPage) IsEmpty() (bool, error) { + if r.StatusCode == 204 { + return true, nil + } + tenants, err := ExtractTenants(r) return len(tenants) == 0, err } diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/oauth1/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/oauth1/results.go index a67f9381d..2a3706162 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/oauth1/results.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/oauth1/results.go @@ -52,6 +52,10 @@ type GetConsumerResult struct { // IsEmpty determines whether or not a page of Consumers contains any results. func (c ConsumersPage) IsEmpty() (bool, error) { + if c.StatusCode == 204 { + return true, nil + } + consumers, err := ExtractConsumers(c) return len(consumers) == 0, err } @@ -208,6 +212,10 @@ type AccessTokensPage struct { // IsEmpty determines whether or not a an AccessTokensPage contains any results. func (r AccessTokensPage) IsEmpty() (bool, error) { + if r.StatusCode == 204 { + return true, nil + } + accessTokens, err := ExtractAccessTokens(r) return len(accessTokens) == 0, err } @@ -251,6 +259,10 @@ type AccessTokenRolesPage struct { // IsEmpty determines whether or not a an AccessTokensPage contains any results. func (r AccessTokenRolesPage) IsEmpty() (bool, error) { + if r.StatusCode == 204 { + return true, nil + } + accessTokenRoles, err := ExtractAccessTokenRoles(r) return len(accessTokenRoles) == 0, err } diff --git a/vendor/github.com/gophercloud/gophercloud/pagination/pager.go b/vendor/github.com/gophercloud/gophercloud/pagination/pager.go index 42c0b2dbe..1dec2703e 100644 --- a/vendor/github.com/gophercloud/gophercloud/pagination/pager.go +++ b/vendor/github.com/gophercloud/gophercloud/pagination/pager.go @@ -134,6 +134,9 @@ func (p Pager) EachPage(handler func(Page) (bool, error)) error { // AllPages returns all the pages from a `List` operation in a single page, // allowing the user to retrieve all the pages at once. func (p Pager) AllPages() (Page, error) { + if p.Err != nil { + return nil, p.Err + } // pagesSlice holds all the pages until they get converted into as Page Body. var pagesSlice []interface{} // body will contain the final concatenated Page body. diff --git a/vendor/github.com/gophercloud/gophercloud/provider_client.go b/vendor/github.com/gophercloud/gophercloud/provider_client.go index e6e80258e..c603d6dbe 100644 --- a/vendor/github.com/gophercloud/gophercloud/provider_client.go +++ b/vendor/github.com/gophercloud/gophercloud/provider_client.go @@ -14,7 +14,7 @@ import ( // DefaultUserAgent is the default User-Agent string set in the request header. const ( - DefaultUserAgent = "gophercloud/v1.2.0" + DefaultUserAgent = "gophercloud/v1.3.0" DefaultMaxBackoffRetries = 60 ) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/LICENSE.txt b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/LICENSE similarity index 100% rename from vendor/github.com/grpc-ecosystem/grpc-gateway/v2/LICENSE.txt rename to vendor/github.com/grpc-ecosystem/grpc-gateway/v2/LICENSE diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel index a8789f170..78d7c9f5c 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel @@ -26,7 +26,7 @@ go_library( deps = [ "//internal/httprule", "//utilities", - "@go_googleapis//google/api:httpbody_go_proto", + "@org_golang_google_genproto_googleapis_api//httpbody", "@org_golang_google_grpc//codes", "@org_golang_google_grpc//grpclog", "@org_golang_google_grpc//health/grpc_health_v1", @@ -70,9 +70,9 @@ go_test( "//utilities", "@com_github_google_go_cmp//cmp", "@com_github_google_go_cmp//cmp/cmpopts", - "@go_googleapis//google/api:httpbody_go_proto", - "@go_googleapis//google/rpc:errdetails_go_proto", - "@go_googleapis//google/rpc:status_go_proto", + "@org_golang_google_genproto_googleapis_api//httpbody", + "@org_golang_google_genproto_googleapis_rpc//errdetails", + "@org_golang_google_genproto_googleapis_rpc//status", "@org_golang_google_grpc//:go_default_library", "@org_golang_google_grpc//codes", "@org_golang_google_grpc//health/grpc_health_v1", diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_httpbodyproto.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_httpbodyproto.go index b86135c88..6de2e220c 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_httpbodyproto.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_httpbodyproto.go @@ -26,7 +26,7 @@ func (h *HTTPBodyMarshaler) ContentType(v interface{}) string { // google.api.HttpBody message, otherwise it falls back to the default Marshaler. func (h *HTTPBodyMarshaler) Marshal(v interface{}) ([]byte, error) { if httpBody, ok := v.(*httpbody.HttpBody); ok { - return httpBody.Data, nil + return httpBody.GetData(), nil } return h.Marshaler.Marshal(v) } diff --git a/vendor/github.com/hashicorp/consul/api/agent.go b/vendor/github.com/hashicorp/consul/api/agent.go index f45929cb5..b09ed1c1c 100644 --- a/vendor/github.com/hashicorp/consul/api/agent.go +++ b/vendor/github.com/hashicorp/consul/api/agent.go @@ -274,6 +274,8 @@ type MembersOpts struct { // Segment is the LAN segment to show members for. Setting this to the // AllSegments value above will show members in all segments. Segment string + + Filter string } // AgentServiceRegistration is used to register a new service @@ -790,6 +792,10 @@ func (a *Agent) MembersOpts(opts MembersOpts) ([]*AgentMember, error) { r.params.Set("wan", "1") } + if opts.Filter != "" { + r.params.Set("filter", opts.Filter) + } + _, resp, err := a.c.doRequest(r) if err != nil { return nil, err diff --git a/vendor/github.com/hashicorp/consul/api/api.go b/vendor/github.com/hashicorp/consul/api/api.go index 18bb3479c..f62c0c5a1 100644 --- a/vendor/github.com/hashicorp/consul/api/api.go +++ b/vendor/github.com/hashicorp/consul/api/api.go @@ -1005,8 +1005,10 @@ func (r *request) toHTTP() (*http.Request, error) { // this is required since go started validating req.host in 1.20.6 and 1.19.11. // prior to that they would strip out the slashes for you. They removed that // behavior and added more strict validation as part of a CVE. - // https://github.com/golang/go/issues/60374 - // the hope is that + // This issue is being tracked by the Go team: + // https://github.com/golang/go/issues/61431 + // If there is a resolution in this issue, we will remove this code. + // In the time being, this is the accepted workaround. if strings.HasPrefix(r.url.Host, "/") { r.url.Host = "localhost" } diff --git a/vendor/github.com/hashicorp/nomad/api/acl.go b/vendor/github.com/hashicorp/nomad/api/acl.go index fc4a5c0c3..78d47895e 100644 --- a/vendor/github.com/hashicorp/nomad/api/acl.go +++ b/vendor/github.com/hashicorp/nomad/api/acl.go @@ -1,8 +1,12 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( "encoding/json" "errors" + "fmt" "time" ) @@ -443,18 +447,33 @@ func (a *ACLBindingRules) Get(bindingRuleID string, q *QueryOptions) (*ACLBindin } // ACLOIDC is used to query the ACL OIDC endpoints. +// +// Deprecated: ACLOIDC is deprecated, use ACLAuth instead. type ACLOIDC struct { client *Client + ACLAuth } // ACLOIDC returns a new handle on the ACL auth-methods API client. +// +// Deprecated: c.ACLOIDC() is deprecated, use c.ACLAuth() instead. func (c *Client) ACLOIDC() *ACLOIDC { return &ACLOIDC{client: c} } +// ACLAuth is used to query the ACL auth endpoints. +type ACLAuth struct { + client *Client +} + +// ACLAuth returns a new handle on the ACL auth-methods API client. +func (c *Client) ACLAuth() *ACLAuth { + return &ACLAuth{client: c} +} + // GetAuthURL generates the OIDC provider authentication URL. This URL should // be visited in order to sign in to the provider. -func (a *ACLOIDC) GetAuthURL(req *ACLOIDCAuthURLRequest, q *WriteOptions) (*ACLOIDCAuthURLResponse, *WriteMeta, error) { +func (a *ACLAuth) GetAuthURL(req *ACLOIDCAuthURLRequest, q *WriteOptions) (*ACLOIDCAuthURLResponse, *WriteMeta, error) { var resp ACLOIDCAuthURLResponse wm, err := a.client.put("/v1/acl/oidc/auth-url", req, &resp, q) if err != nil { @@ -465,7 +484,7 @@ func (a *ACLOIDC) GetAuthURL(req *ACLOIDCAuthURLRequest, q *WriteOptions) (*ACLO // CompleteAuth exchanges the OIDC provider token for a Nomad token with the // appropriate claims attached. -func (a *ACLOIDC) CompleteAuth(req *ACLOIDCCompleteAuthRequest, q *WriteOptions) (*ACLToken, *WriteMeta, error) { +func (a *ACLAuth) CompleteAuth(req *ACLOIDCCompleteAuthRequest, q *WriteOptions) (*ACLToken, *WriteMeta, error) { var resp ACLToken wm, err := a.client.put("/v1/acl/oidc/complete-auth", req, &resp, q) if err != nil { @@ -474,6 +493,17 @@ func (a *ACLOIDC) CompleteAuth(req *ACLOIDCCompleteAuthRequest, q *WriteOptions) return &resp, wm, nil } +// Login exchanges the third party token for a Nomad token with the appropriate +// claims attached. +func (a *ACLAuth) Login(req *ACLLoginRequest, q *WriteOptions) (*ACLToken, *WriteMeta, error) { + var resp ACLToken + wm, err := a.client.put("/v1/acl/login", req, &resp, q) + if err != nil { + return nil, nil, err + } + return &resp, wm, nil +} + // ACLPolicyListStub is used to for listing ACL policies type ACLPolicyListStub struct { Name string @@ -740,20 +770,6 @@ type ACLAuthMethod struct { ModifyIndex uint64 } -// ACLAuthMethodConfig is used to store configuration of an auth method. -type ACLAuthMethodConfig struct { - OIDCDiscoveryURL string - OIDCClientID string - OIDCClientSecret string - OIDCScopes []string - BoundAudiences []string - AllowedRedirectURIs []string - DiscoveryCaPem []string - SigningAlgs []string - ClaimMappings map[string]string - ListClaimMappings map[string]string -} - // MarshalJSON implements the json.Marshaler interface and allows // ACLAuthMethod.MaxTokenTTL to be marshaled correctly. func (m *ACLAuthMethod) MarshalJSON() ([]byte, error) { @@ -793,6 +809,138 @@ func (m *ACLAuthMethod) UnmarshalJSON(data []byte) error { return nil } +// ACLAuthMethodConfig is used to store configuration of an auth method. +type ACLAuthMethodConfig struct { + // A list of PEM-encoded public keys to use to authenticate signatures + // locally + JWTValidationPubKeys []string + // JSON Web Key Sets url for authenticating signatures + JWKSURL string + // The OIDC Discovery URL, without any .well-known component (base path) + OIDCDiscoveryURL string + // The OAuth Client ID configured with the OIDC provider + OIDCClientID string + // The OAuth Client Secret configured with the OIDC provider + OIDCClientSecret string + // List of OIDC scopes + OIDCScopes []string + // List of auth claims that are valid for login + BoundAudiences []string + // The value against which to match the iss claim in a JWT + BoundIssuer []string + // A list of allowed values for redirect_uri + AllowedRedirectURIs []string + // PEM encoded CA certs for use by the TLS client used to talk with the + // OIDC Discovery URL. + DiscoveryCaPem []string + // PEM encoded CA cert for use by the TLS client used to talk with the JWKS + // URL + JWKSCACert string + // A list of supported signing algorithms + SigningAlgs []string + // Duration in seconds of leeway when validating expiration of a token to + // account for clock skew + ExpirationLeeway time.Duration + // Duration in seconds of leeway when validating not before values of a + // token to account for clock skew. + NotBeforeLeeway time.Duration + // Duration in seconds of leeway when validating all claims to account for + // clock skew. + ClockSkewLeeway time.Duration + // Mappings of claims (key) that will be copied to a metadata field + // (value). + ClaimMappings map[string]string + ListClaimMappings map[string]string +} + +// MarshalJSON implements the json.Marshaler interface and allows +// time.Duration fields to be marshaled correctly. +func (c *ACLAuthMethodConfig) MarshalJSON() ([]byte, error) { + type Alias ACLAuthMethodConfig + exported := &struct { + ExpirationLeeway string + NotBeforeLeeway string + ClockSkewLeeway string + *Alias + }{ + ExpirationLeeway: c.ExpirationLeeway.String(), + NotBeforeLeeway: c.NotBeforeLeeway.String(), + ClockSkewLeeway: c.ClockSkewLeeway.String(), + Alias: (*Alias)(c), + } + if c.ExpirationLeeway == 0 { + exported.ExpirationLeeway = "" + } + if c.NotBeforeLeeway == 0 { + exported.NotBeforeLeeway = "" + } + if c.ClockSkewLeeway == 0 { + exported.ClockSkewLeeway = "" + } + return json.Marshal(exported) +} + +// UnmarshalJSON implements the json.Unmarshaler interface and allows +// time.Duration fields to be unmarshalled correctly. +func (c *ACLAuthMethodConfig) UnmarshalJSON(data []byte) error { + type Alias ACLAuthMethodConfig + aux := &struct { + ExpirationLeeway any + NotBeforeLeeway any + ClockSkewLeeway any + *Alias + }{ + Alias: (*Alias)(c), + } + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + var err error + if aux.ExpirationLeeway != nil { + switch v := aux.ExpirationLeeway.(type) { + case string: + if v != "" { + if c.ExpirationLeeway, err = time.ParseDuration(v); err != nil { + return err + } + } + case float64: + c.ExpirationLeeway = time.Duration(v) + default: + return fmt.Errorf("unexpected ExpirationLeeway type: %v", v) + } + } + if aux.NotBeforeLeeway != nil { + switch v := aux.NotBeforeLeeway.(type) { + case string: + if v != "" { + if c.NotBeforeLeeway, err = time.ParseDuration(v); err != nil { + return err + } + } + case float64: + c.NotBeforeLeeway = time.Duration(v) + default: + return fmt.Errorf("unexpected NotBeforeLeeway type: %v", v) + } + } + if aux.ClockSkewLeeway != nil { + switch v := aux.ClockSkewLeeway.(type) { + case string: + if v != "" { + if c.ClockSkewLeeway, err = time.ParseDuration(v); err != nil { + return err + } + } + case float64: + c.ClockSkewLeeway = time.Duration(v) + default: + return fmt.Errorf("unexpected ClockSkewLeeway type: %v", v) + } + } + return nil +} + // ACLAuthMethodListStub is the stub object returned when performing a listing // of ACL auth-methods. It is intentionally minimal due to the unauthenticated // nature of the list endpoint. @@ -818,6 +966,10 @@ const ( // ACLAuthMethodTypeOIDC the ACLAuthMethod.Type and represents an // auth-method which uses the OIDC protocol. ACLAuthMethodTypeOIDC = "OIDC" + + // ACLAuthMethodTypeJWT the ACLAuthMethod.Type and represents an auth-method + // which uses the JWT type. + ACLAuthMethodTypeJWT = "JWT" ) // ACLBindingRule contains a direct relation to an ACLAuthMethod and represents @@ -947,3 +1099,13 @@ type ACLOIDCCompleteAuthRequest struct { // required parameter. RedirectURI string } + +// ACLLoginRequest is the request object to begin auth with an external bearer +// token provider. +type ACLLoginRequest struct { + // AuthMethodName is the name of the auth method being used to login. This + // is a required parameter. + AuthMethodName string + // LoginToken is the token used to login. This is a required parameter. + LoginToken string +} diff --git a/vendor/github.com/hashicorp/nomad/api/agent.go b/vendor/github.com/hashicorp/nomad/api/agent.go index 61102e563..521215803 100644 --- a/vendor/github.com/hashicorp/nomad/api/agent.go +++ b/vendor/github.com/hashicorp/nomad/api/agent.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/vendor/github.com/hashicorp/nomad/api/allocations.go b/vendor/github.com/hashicorp/nomad/api/allocations.go index 9bd2d7aa6..0159a9e12 100644 --- a/vendor/github.com/hashicorp/nomad/api/allocations.go +++ b/vendor/github.com/hashicorp/nomad/api/allocations.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( @@ -148,9 +151,6 @@ func (a *Allocations) GC(alloc *Allocation, q *QueryOptions) error { // Note: for cluster topologies where API consumers don't have network access to // Nomad clients, set api.ClientConnTimeout to a small value (ex 1ms) to avoid // long pauses on this API call. -// -// BREAKING: This method will have the following signature in 1.6.0 -// func (a *Allocations) Restart(allocID string, taskName string, allTasks bool, w *WriteOptions) (*WriteMeta, error) { func (a *Allocations) Restart(alloc *Allocation, taskName string, q *QueryOptions) error { req := AllocationRestartRequest{ TaskName: taskName, @@ -223,9 +223,6 @@ type AllocStopResponse struct { // Note: for cluster topologies where API consumers don't have network access to // Nomad clients, set api.ClientConnTimeout to a small value (ex 1ms) to avoid // long pauses on this API call. -// -// BREAKING: This method will have the following signature in 1.6.0 -// func (a *Allocations) Signal(allocID string, task string, signal string, w *WriteOptions) (*WriteMeta, error) { func (a *Allocations) Signal(alloc *Allocation, q *QueryOptions, task, signal string) error { req := AllocSignalRequest{ Signal: signal, diff --git a/vendor/github.com/hashicorp/nomad/api/allocations_exec.go b/vendor/github.com/hashicorp/nomad/api/allocations_exec.go index 25f7955de..5300f5f60 100644 --- a/vendor/github.com/hashicorp/nomad/api/allocations_exec.go +++ b/vendor/github.com/hashicorp/nomad/api/allocations_exec.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/vendor/github.com/hashicorp/nomad/api/api.go b/vendor/github.com/hashicorp/nomad/api/api.go index af960745d..1bf97f05f 100644 --- a/vendor/github.com/hashicorp/nomad/api/api.go +++ b/vendor/github.com/hashicorp/nomad/api/api.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( @@ -937,9 +940,8 @@ func (c *Client) query(endpoint string, out any, q *QueryOptions) (*QueryMeta, e return qm, nil } -// putQuery is used to do a PUT request when doing a read against an endpoint -// and deserialize the response into an interface using standard Nomad -// conventions. +// putQuery is used to do a PUT request when doing a "write" to a Client RPC. +// Client RPCs must use QueryOptions to allow setting AllowStale=true. func (c *Client) putQuery(endpoint string, in, out any, q *QueryOptions) (*QueryMeta, error) { r, err := c.newRequest("PUT", endpoint) if err != nil { @@ -969,6 +971,31 @@ func (c *Client) put(endpoint string, in, out any, q *WriteOptions) (*WriteMeta, return c.write(http.MethodPut, endpoint, in, out, q) } +// postQuery is used to do a POST request when doing a "write" to a Client RPC. +// Client RPCs must use QueryOptions to allow setting AllowStale=true. +func (c *Client) postQuery(endpoint string, in, out any, q *QueryOptions) (*QueryMeta, error) { + r, err := c.newRequest("POST", endpoint) + if err != nil { + return nil, err + } + r.setQueryOptions(q) + r.obj = in + rtt, resp, err := requireOK(c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if err := decodeBody(resp, out); err != nil { + return nil, err + } + return qm, nil +} + // post is used to do a POST request against an endpoint and // serialize/deserialized using the standard Nomad conventions. func (c *Client) post(endpoint string, in, out any, q *WriteOptions) (*WriteMeta, error) { diff --git a/vendor/github.com/hashicorp/nomad/api/constraint.go b/vendor/github.com/hashicorp/nomad/api/constraint.go index 7213270e5..9628c7cbc 100644 --- a/vendor/github.com/hashicorp/nomad/api/constraint.go +++ b/vendor/github.com/hashicorp/nomad/api/constraint.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api const ( diff --git a/vendor/github.com/hashicorp/nomad/api/consul.go b/vendor/github.com/hashicorp/nomad/api/consul.go index a70a80984..23451e7e0 100644 --- a/vendor/github.com/hashicorp/nomad/api/consul.go +++ b/vendor/github.com/hashicorp/nomad/api/consul.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( @@ -61,10 +64,11 @@ func (cc *ConsulConnect) Canonicalize() { // ConsulSidecarService represents a Consul Connect SidecarService jobspec // block. type ConsulSidecarService struct { - Tags []string `hcl:"tags,optional"` - Port string `hcl:"port,optional"` - Proxy *ConsulProxy `hcl:"proxy,block"` - DisableDefaultTCPCheck bool `mapstructure:"disable_default_tcp_check" hcl:"disable_default_tcp_check,optional"` + Tags []string `hcl:"tags,optional"` + Port string `hcl:"port,optional"` + Proxy *ConsulProxy `hcl:"proxy,block"` + DisableDefaultTCPCheck bool `mapstructure:"disable_default_tcp_check" hcl:"disable_default_tcp_check,optional"` + Meta map[string]string `hcl:"meta,block"` } func (css *ConsulSidecarService) Canonicalize() { @@ -76,6 +80,10 @@ func (css *ConsulSidecarService) Canonicalize() { css.Tags = nil } + if len(css.Meta) == 0 { + css.Meta = nil + } + css.Proxy.Canonicalize() } diff --git a/vendor/github.com/hashicorp/nomad/api/contexts/contexts.go b/vendor/github.com/hashicorp/nomad/api/contexts/contexts.go index f189c0de1..2ce523a72 100644 --- a/vendor/github.com/hashicorp/nomad/api/contexts/contexts.go +++ b/vendor/github.com/hashicorp/nomad/api/contexts/contexts.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // Package contexts provides constants used with the Nomad Search API. package contexts diff --git a/vendor/github.com/hashicorp/nomad/api/csi.go b/vendor/github.com/hashicorp/nomad/api/csi.go index 557e4c5c0..8a7a63dca 100644 --- a/vendor/github.com/hashicorp/nomad/api/csi.go +++ b/vendor/github.com/hashicorp/nomad/api/csi.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/vendor/github.com/hashicorp/nomad/api/deployments.go b/vendor/github.com/hashicorp/nomad/api/deployments.go index 6ca065379..665d38834 100644 --- a/vendor/github.com/hashicorp/nomad/api/deployments.go +++ b/vendor/github.com/hashicorp/nomad/api/deployments.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/vendor/github.com/hashicorp/nomad/api/evaluations.go b/vendor/github.com/hashicorp/nomad/api/evaluations.go index 4206ffde6..feac278ca 100644 --- a/vendor/github.com/hashicorp/nomad/api/evaluations.go +++ b/vendor/github.com/hashicorp/nomad/api/evaluations.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/vendor/github.com/hashicorp/nomad/api/event_stream.go b/vendor/github.com/hashicorp/nomad/api/event_stream.go index e81d8ef52..60e08fbe1 100644 --- a/vendor/github.com/hashicorp/nomad/api/event_stream.go +++ b/vendor/github.com/hashicorp/nomad/api/event_stream.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/vendor/github.com/hashicorp/nomad/api/fs.go b/vendor/github.com/hashicorp/nomad/api/fs.go index 0622aa708..f6b831c30 100644 --- a/vendor/github.com/hashicorp/nomad/api/fs.go +++ b/vendor/github.com/hashicorp/nomad/api/fs.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( @@ -18,6 +21,14 @@ const ( // and end of a file. OriginStart = "start" OriginEnd = "end" + + // FSLogNameStdout is the name given to the stdout log stream of a task. It + // can be used when calling AllocFS.Logs as the logType parameter. + FSLogNameStdout = "stdout" + + // FSLogNameStderr is the name given to the stderr log stream of a task. It + // can be used when calling AllocFS.Logs as the logType parameter. + FSLogNameStderr = "stderr" ) // AllocFileInfo holds information about a file inside the AllocDir diff --git a/vendor/github.com/hashicorp/nomad/api/ioutil.go b/vendor/github.com/hashicorp/nomad/api/ioutil.go index fe3cce5ac..6e7598153 100644 --- a/vendor/github.com/hashicorp/nomad/api/ioutil.go +++ b/vendor/github.com/hashicorp/nomad/api/ioutil.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/vendor/github.com/hashicorp/nomad/api/jobs.go b/vendor/github.com/hashicorp/nomad/api/jobs.go index c1490dc4e..64b25c710 100644 --- a/vendor/github.com/hashicorp/nomad/api/jobs.go +++ b/vendor/github.com/hashicorp/nomad/api/jobs.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( @@ -9,6 +12,7 @@ import ( "time" "github.com/hashicorp/cronexpr" + "golang.org/x/exp/maps" ) const ( @@ -68,6 +72,11 @@ type JobsParseRequest struct { // HCLv1 indicates whether the JobHCL should be parsed with the hcl v1 parser HCLv1 bool `json:"hclv1,omitempty"` + // Variables are HCL2 variables associated with the job. Only works with hcl2. + // + // Interpreted as if it were the content of a variables file. + Variables string + // Canonicalize is a flag as to if the server should return default values // for unset fields Canonicalize bool @@ -78,7 +87,7 @@ func (c *Client) Jobs() *Jobs { return &Jobs{client: c} } -// ParseHCL is used to convert the HCL repesentation of a Job to JSON server side. +// ParseHCL is used to convert the HCL representation of a Job to JSON server side. // To parse the HCL client side see package github.com/hashicorp/nomad/jobspec // Use ParseHCLOpts if you need to customize JobsParseRequest. func (j *Jobs) ParseHCL(jobHCL string, canonicalize bool) (*Job, error) { @@ -89,10 +98,8 @@ func (j *Jobs) ParseHCL(jobHCL string, canonicalize bool) (*Job, error) { return j.ParseHCLOpts(req) } -// ParseHCLOpts is used to convert the HCL representation of a Job to JSON -// server side. To parse the HCL client side see package -// github.com/hashicorp/nomad/jobspec. -// ParseHCL is an alternative convenience API for HCLv2 users. +// ParseHCLOpts is used to request the server convert the HCL representation of a +// Job to JSON on our behalf. Accepts HCL1 or HCL2 jobs as input. func (j *Jobs) ParseHCLOpts(req *JobsParseRequest) (*Job, error) { var job Job _, err := j.client.put("/v1/jobs/parse", req, &job, nil) @@ -116,6 +123,7 @@ type RegisterOptions struct { PolicyOverride bool PreserveCounts bool EvalPriority int + Submission *JobSubmission } // Register is used to register a new job. It returns the ID @@ -134,9 +142,7 @@ func (j *Jobs) EnforceRegister(job *Job, modifyIndex uint64, q *WriteOptions) (* // returns the ID of the evaluation, along with any errors encountered. func (j *Jobs) RegisterOpts(job *Job, opts *RegisterOptions, q *WriteOptions) (*JobRegisterResponse, *WriteMeta, error) { // Format the request - req := &JobRegisterRequest{ - Job: job, - } + req := &JobRegisterRequest{Job: job} if opts != nil { if opts.EnforceIndex { req.EnforceIndex = true @@ -145,6 +151,7 @@ func (j *Jobs) RegisterOpts(job *Job, opts *RegisterOptions, q *WriteOptions) (* req.PolicyOverride = opts.PolicyOverride req.PreserveCounts = opts.PreserveCounts req.EvalPriority = opts.EvalPriority + req.Submission = opts.Submission } var resp JobRegisterResponse @@ -252,6 +259,19 @@ func (j *Jobs) Versions(jobID string, diffs bool, q *QueryOptions) ([]*Job, []*J return resp.Versions, resp.Diffs, qm, nil } +// Submission is used to retrieve the original submitted source of a job given its +// namespace, jobID, and version number. The original source might not be available, +// which case nil is returned with no error. +func (j *Jobs) Submission(jobID string, version int, q *QueryOptions) (*JobSubmission, *QueryMeta, error) { + var sub JobSubmission + s := fmt.Sprintf("/v1/job/%s/submission?version=%d", url.PathEscape(jobID), version) + qm, err := j.client.query(s, &sub, q) + if err != nil { + return nil, nil, err + } + return &sub, qm, nil +} + // Allocations is used to return the allocs for a given job ID. func (j *Jobs) Allocations(jobID string, allAllocs bool, q *QueryOptions) ([]*AllocationListStub, *QueryMeta, error) { var resp []*AllocationListStub @@ -863,6 +883,51 @@ type ParameterizedJobConfig struct { MetaOptional []string `mapstructure:"meta_optional" hcl:"meta_optional,optional"` } +// JobSubmission is used to hold information about the original content of a job +// specification being submitted to Nomad. +// +// At any time a JobSubmission may be nil, indicating no information is known about +// the job submission. +type JobSubmission struct { + // Source contains the original job definition (may be in the format of + // hcl1, hcl2, or json). + Source string + + // Format indicates what the Source content was (hcl1, hcl2, or json). + Format string + + // VariableFlags contains the CLI "-var" flag arguments as submitted with the + // job (hcl2 only). + VariableFlags map[string]string + + // Variables contains the opaque variables configuration as coming from + // a var-file or the WebUI variables input (hcl2 only). + Variables string +} + +func (js *JobSubmission) Canonicalize() { + if js == nil { + return + } + + if len(js.VariableFlags) == 0 { + js.VariableFlags = nil + } +} + +func (js *JobSubmission) Copy() *JobSubmission { + if js == nil { + return nil + } + + return &JobSubmission{ + Source: js.Source, + Format: js.Format, + VariableFlags: maps.Clone(js.VariableFlags), + Variables: js.Variables, + } +} + // Job is used to serialize a job. type Job struct { /* Fields parsed from HCL config */ @@ -1248,7 +1313,9 @@ type JobRevertRequest struct { // JobRegisterRequest is used to update a job type JobRegisterRequest struct { - Job *Job + Submission *JobSubmission + Job *Job + // If EnforceIndex is set then the job will only be registered if the passed // JobModifyIndex matches the current Jobs index. If the index is zero, the // register only occurs if the job is new. @@ -1386,6 +1453,12 @@ type JobVersionsResponse struct { QueryMeta } +// JobSubmissionResponse is used for a job get submission request +type JobSubmissionResponse struct { + Submission *JobSubmission + QueryMeta +} + // JobStabilityRequest is used to marked a job as stable. type JobStabilityRequest struct { // Job to set the stability on diff --git a/vendor/github.com/hashicorp/nomad/api/keyring.go b/vendor/github.com/hashicorp/nomad/api/keyring.go index 40814c2f9..d87d8b720 100644 --- a/vendor/github.com/hashicorp/nomad/api/keyring.go +++ b/vendor/github.com/hashicorp/nomad/api/keyring.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/vendor/github.com/hashicorp/nomad/api/namespace.go b/vendor/github.com/hashicorp/nomad/api/namespace.go index 1c6cf02eb..80f9fe88e 100644 --- a/vendor/github.com/hashicorp/nomad/api/namespace.go +++ b/vendor/github.com/hashicorp/nomad/api/namespace.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/vendor/github.com/hashicorp/nomad/api/node_meta.go b/vendor/github.com/hashicorp/nomad/api/node_meta.go index be851206b..fac17f090 100644 --- a/vendor/github.com/hashicorp/nomad/api/node_meta.go +++ b/vendor/github.com/hashicorp/nomad/api/node_meta.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api // NodeMetaApplyRequest contains the Node meta update. @@ -30,9 +33,9 @@ func (n *Nodes) Meta() *NodeMeta { // Apply dynamic Node metadata updates to a Node. If NodeID is unset then Node // receiving the request is modified. -func (n *NodeMeta) Apply(meta *NodeMetaApplyRequest, qo *WriteOptions) (*NodeMetaResponse, error) { +func (n *NodeMeta) Apply(meta *NodeMetaApplyRequest, qo *QueryOptions) (*NodeMetaResponse, error) { var out NodeMetaResponse - _, err := n.client.post("/v1/client/metadata", meta, &out, qo) + _, err := n.client.postQuery("/v1/client/metadata", meta, &out, qo) if err != nil { return nil, err } diff --git a/vendor/github.com/hashicorp/nomad/api/nodes.go b/vendor/github.com/hashicorp/nomad/api/nodes.go index a99143841..dfc5646be 100644 --- a/vendor/github.com/hashicorp/nomad/api/nodes.go +++ b/vendor/github.com/hashicorp/nomad/api/nodes.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/vendor/github.com/hashicorp/nomad/api/operator.go b/vendor/github.com/hashicorp/nomad/api/operator.go index 579afc3f5..ba8d41cec 100644 --- a/vendor/github.com/hashicorp/nomad/api/operator.go +++ b/vendor/github.com/hashicorp/nomad/api/operator.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/vendor/github.com/hashicorp/nomad/api/operator_autopilot.go b/vendor/github.com/hashicorp/nomad/api/operator_autopilot.go index ddf4efe67..ddc5de74e 100644 --- a/vendor/github.com/hashicorp/nomad/api/operator_autopilot.go +++ b/vendor/github.com/hashicorp/nomad/api/operator_autopilot.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/vendor/github.com/hashicorp/nomad/api/operator_metrics.go b/vendor/github.com/hashicorp/nomad/api/operator_metrics.go index b5577a547..ba0de567a 100644 --- a/vendor/github.com/hashicorp/nomad/api/operator_metrics.go +++ b/vendor/github.com/hashicorp/nomad/api/operator_metrics.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/vendor/github.com/hashicorp/nomad/api/quota.go b/vendor/github.com/hashicorp/nomad/api/quota.go index f19e4c0c9..261d3d1d1 100644 --- a/vendor/github.com/hashicorp/nomad/api/quota.go +++ b/vendor/github.com/hashicorp/nomad/api/quota.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/vendor/github.com/hashicorp/nomad/api/raw.go b/vendor/github.com/hashicorp/nomad/api/raw.go index df4ca397d..87f8a9c5e 100644 --- a/vendor/github.com/hashicorp/nomad/api/raw.go +++ b/vendor/github.com/hashicorp/nomad/api/raw.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import "io" diff --git a/vendor/github.com/hashicorp/nomad/api/recommendations.go b/vendor/github.com/hashicorp/nomad/api/recommendations.go index 901dd8b2d..065fc9f95 100644 --- a/vendor/github.com/hashicorp/nomad/api/recommendations.go +++ b/vendor/github.com/hashicorp/nomad/api/recommendations.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api // Recommendations is used to query the recommendations endpoints. diff --git a/vendor/github.com/hashicorp/nomad/api/regions.go b/vendor/github.com/hashicorp/nomad/api/regions.go index 98df011d0..76b73c3d2 100644 --- a/vendor/github.com/hashicorp/nomad/api/regions.go +++ b/vendor/github.com/hashicorp/nomad/api/regions.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import "sort" diff --git a/vendor/github.com/hashicorp/nomad/api/resources.go b/vendor/github.com/hashicorp/nomad/api/resources.go index 84d9d1905..d41a46293 100644 --- a/vendor/github.com/hashicorp/nomad/api/resources.go +++ b/vendor/github.com/hashicorp/nomad/api/resources.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/vendor/github.com/hashicorp/nomad/api/scaling.go b/vendor/github.com/hashicorp/nomad/api/scaling.go index 32259c9f4..e3f49d036 100644 --- a/vendor/github.com/hashicorp/nomad/api/scaling.go +++ b/vendor/github.com/hashicorp/nomad/api/scaling.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api const ( diff --git a/vendor/github.com/hashicorp/nomad/api/search.go b/vendor/github.com/hashicorp/nomad/api/search.go index 3b020827a..a06ee1646 100644 --- a/vendor/github.com/hashicorp/nomad/api/search.go +++ b/vendor/github.com/hashicorp/nomad/api/search.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/vendor/github.com/hashicorp/nomad/api/sentinel.go b/vendor/github.com/hashicorp/nomad/api/sentinel.go index 5039abd59..e8a0644ae 100644 --- a/vendor/github.com/hashicorp/nomad/api/sentinel.go +++ b/vendor/github.com/hashicorp/nomad/api/sentinel.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/vendor/github.com/hashicorp/nomad/api/services.go b/vendor/github.com/hashicorp/nomad/api/services.go index 8d9b4157b..450236547 100644 --- a/vendor/github.com/hashicorp/nomad/api/services.go +++ b/vendor/github.com/hashicorp/nomad/api/services.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/vendor/github.com/hashicorp/nomad/api/status.go b/vendor/github.com/hashicorp/nomad/api/status.go index da1cb4c02..7a04715b9 100644 --- a/vendor/github.com/hashicorp/nomad/api/status.go +++ b/vendor/github.com/hashicorp/nomad/api/status.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api // Status is used to query the status-related endpoints. diff --git a/vendor/github.com/hashicorp/nomad/api/system.go b/vendor/github.com/hashicorp/nomad/api/system.go index 3443cff24..b19eaf8b1 100644 --- a/vendor/github.com/hashicorp/nomad/api/system.go +++ b/vendor/github.com/hashicorp/nomad/api/system.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api // Status is used to query the status-related endpoints. diff --git a/vendor/github.com/hashicorp/nomad/api/tasks.go b/vendor/github.com/hashicorp/nomad/api/tasks.go index ecf357271..e928b3a04 100644 --- a/vendor/github.com/hashicorp/nomad/api/tasks.go +++ b/vendor/github.com/hashicorp/nomad/api/tasks.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( @@ -976,6 +979,12 @@ func (t *Task) SetLogConfig(l *LogConfig) *Task { return t } +// SetLifecycle is used to set lifecycle config to a task. +func (t *Task) SetLifecycle(l *TaskLifecycle) *Task { + t.Lifecycle = l + return t +} + // TaskState tracks the current state of a task and events that caused state // transitions. type TaskState struct { diff --git a/vendor/github.com/hashicorp/nomad/api/utils.go b/vendor/github.com/hashicorp/nomad/api/utils.go index a8e1c02e4..a1cad14cc 100644 --- a/vendor/github.com/hashicorp/nomad/api/utils.go +++ b/vendor/github.com/hashicorp/nomad/api/utils.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/vendor/github.com/hashicorp/nomad/api/variables.go b/vendor/github.com/hashicorp/nomad/api/variables.go index 4d1c8f392..91dc13cf4 100644 --- a/vendor/github.com/hashicorp/nomad/api/variables.go +++ b/vendor/github.com/hashicorp/nomad/api/variables.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/architecture.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/architecture.go new file mode 100644 index 000000000..8e0c1df55 --- /dev/null +++ b/vendor/github.com/hetznercloud/hcloud-go/hcloud/architecture.go @@ -0,0 +1,12 @@ +package hcloud + +// Architecture specifies the architecture of the CPU. +type Architecture string + +const ( + // ArchitectureX86 is the architecture for Intel/AMD x86 CPUs. + ArchitectureX86 Architecture = "x86" + + // ArchitectureARM is the architecture for ARM CPUs. + ArchitectureARM Architecture = "arm" +) diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/hcloud.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/hcloud.go index ea0820739..7adc8745a 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/hcloud.go +++ b/vendor/github.com/hetznercloud/hcloud-go/hcloud/hcloud.go @@ -2,4 +2,4 @@ package hcloud // Version is the library's version following Semantic Versioning. -const Version = "1.41.0" // x-release-please-version +const Version = "1.42.0" // x-release-please-version diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/image.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/image.go index 6a7189825..04d61b50c 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/image.go +++ b/vendor/github.com/hetznercloud/hcloud-go/hcloud/image.go @@ -26,8 +26,9 @@ type Image struct { BoundTo *Server RapidDeploy bool - OSFlavor string - OSVersion string + OSFlavor string + OSVersion string + Architecture Architecture Protection ImageProtection Deprecated time.Time // The zero value denotes the image is not deprecated. @@ -98,6 +99,8 @@ func (c *ImageClient) GetByID(ctx context.Context, id int) (*Image, *Response, e } // GetByName retrieves an image by its name. If the image does not exist, nil is returned. +// +// Deprecated: Use [ImageClient.GetByNameAndArchitecture] instead. func (c *ImageClient) GetByName(ctx context.Context, name string) (*Image, *Response, error) { if name == "" { return nil, nil, nil @@ -109,15 +112,44 @@ func (c *ImageClient) GetByName(ctx context.Context, name string) (*Image, *Resp return images[0], response, err } +// GetByNameAndArchitecture retrieves an image by its name and architecture. If the image does not exist, +// nil is returned. +// In contrast to [ImageClient.Get], this method also returns deprecated images. Depending on your needs you should +// check for this in your calling method. +func (c *ImageClient) GetByNameAndArchitecture(ctx context.Context, name string, architecture Architecture) (*Image, *Response, error) { + if name == "" { + return nil, nil, nil + } + images, response, err := c.List(ctx, ImageListOpts{Name: name, Architecture: []Architecture{architecture}, IncludeDeprecated: true}) + if len(images) == 0 { + return nil, response, err + } + return images[0], response, err +} + // Get retrieves an image by its ID if the input can be parsed as an integer, otherwise it // retrieves an image by its name. If the image does not exist, nil is returned. +// +// Deprecated: Use [ImageClient.GetForArchitecture] instead. func (c *ImageClient) Get(ctx context.Context, idOrName string) (*Image, *Response, error) { if id, err := strconv.Atoi(idOrName); err == nil { - return c.GetByID(ctx, int(id)) + return c.GetByID(ctx, id) } return c.GetByName(ctx, idOrName) } +// GetForArchitecture retrieves an image by its ID if the input can be parsed as an integer, otherwise it +// retrieves an image by its name and architecture. If the image does not exist, nil is returned. +// +// In contrast to [ImageClient.Get], this method also returns deprecated images. Depending on your needs you should +// check for this in your calling method. +func (c *ImageClient) GetForArchitecture(ctx context.Context, idOrName string, architecture Architecture) (*Image, *Response, error) { + if id, err := strconv.Atoi(idOrName); err == nil { + return c.GetByID(ctx, id) + } + return c.GetByNameAndArchitecture(ctx, idOrName, architecture) +} + // ImageListOpts specifies options for listing images. type ImageListOpts struct { ListOpts @@ -127,6 +159,7 @@ type ImageListOpts struct { Sort []string Status []ImageStatus IncludeDeprecated bool + Architecture []Architecture } func (l ImageListOpts) values() url.Values { @@ -149,6 +182,9 @@ func (l ImageListOpts) values() url.Values { for _, status := range l.Status { vals.Add("status", string(status)) } + for _, arch := range l.Architecture { + vals.Add("architecture", string(arch)) + } return vals } diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/iso.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/iso.go index ed2825ba8..d5814cb80 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/iso.go +++ b/vendor/github.com/hetznercloud/hcloud-go/hcloud/iso.go @@ -12,11 +12,12 @@ import ( // ISO represents an ISO image in the Hetzner Cloud. type ISO struct { - ID int - Name string - Description string - Type ISOType - Deprecated time.Time + ID int + Name string + Description string + Type ISOType + Architecture *Architecture + Deprecated time.Time } // IsDeprecated returns true if the ISO is deprecated. @@ -83,6 +84,12 @@ type ISOListOpts struct { ListOpts Name string Sort []string + // Architecture filters the ISOs by Architecture. Note that custom ISOs do not have any architecture set, and you + // must use IncludeWildcardArchitecture to include them. + Architecture []Architecture + // IncludeWildcardArchitecture must be set to also return custom ISOs that have no architecture set, if you are + // also setting the Architecture field. + IncludeWildcardArchitecture bool } func (l ISOListOpts) values() url.Values { @@ -93,6 +100,12 @@ func (l ISOListOpts) values() url.Values { for _, sort := range l.Sort { vals.Add("sort", sort) } + for _, arch := range l.Architecture { + vals.Add("architecture", string(arch)) + } + if l.IncludeWildcardArchitecture { + vals.Add("include_architecture_wildcard", "true") + } return vals } diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema.go index 091426bc9..b72db88d2 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema.go +++ b/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema.go @@ -118,13 +118,17 @@ func PrimaryIPFromSchema(s schema.PrimaryIP) *PrimaryIP { // ISOFromSchema converts a schema.ISO to an ISO. func ISOFromSchema(s schema.ISO) *ISO { - return &ISO{ + iso := &ISO{ ID: s.ID, Name: s.Name, Description: s.Description, Type: ISOType(s.Type), Deprecated: s.Deprecated, } + if s.Architecture != nil { + iso.Architecture = Ptr(Architecture(*s.Architecture)) + } + return iso } // LocationFromSchema converts a schema.Location to a Location. @@ -274,14 +278,15 @@ func ServerPrivateNetFromSchema(s schema.ServerPrivateNet) ServerPrivateNet { // ServerTypeFromSchema converts a schema.ServerType to a ServerType. func ServerTypeFromSchema(s schema.ServerType) *ServerType { st := &ServerType{ - ID: s.ID, - Name: s.Name, - Description: s.Description, - Cores: s.Cores, - Memory: s.Memory, - Disk: s.Disk, - StorageType: StorageType(s.StorageType), - CPUType: CPUType(s.CPUType), + ID: s.ID, + Name: s.Name, + Description: s.Description, + Cores: s.Cores, + Memory: s.Memory, + Disk: s.Disk, + StorageType: StorageType(s.StorageType), + CPUType: CPUType(s.CPUType), + Architecture: Architecture(s.Architecture), } for _, price := range s.Prices { st.Pricings = append(st.Pricings, ServerTypeLocationPricing{ @@ -318,14 +323,15 @@ func SSHKeyFromSchema(s schema.SSHKey) *SSHKey { // ImageFromSchema converts a schema.Image to an Image. func ImageFromSchema(s schema.Image) *Image { i := &Image{ - ID: s.ID, - Type: ImageType(s.Type), - Status: ImageStatus(s.Status), - Description: s.Description, - DiskSize: s.DiskSize, - Created: s.Created, - RapidDeploy: s.RapidDeploy, - OSFlavor: s.OSFlavor, + ID: s.ID, + Type: ImageType(s.Type), + Status: ImageStatus(s.Status), + Description: s.Description, + DiskSize: s.DiskSize, + Created: s.Created, + RapidDeploy: s.RapidDeploy, + OSFlavor: s.OSFlavor, + Architecture: Architecture(s.Architecture), Protection: ImageProtection{ Delete: s.Protection.Delete, }, diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/image.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/image.go index 7a3be8875..76775b131 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/image.go +++ b/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/image.go @@ -4,23 +4,24 @@ import "time" // Image defines the schema of an image. type Image struct { - ID int `json:"id"` - Status string `json:"status"` - Type string `json:"type"` - Name *string `json:"name"` - Description string `json:"description"` - ImageSize *float32 `json:"image_size"` - DiskSize float32 `json:"disk_size"` - Created time.Time `json:"created"` - CreatedFrom *ImageCreatedFrom `json:"created_from"` - BoundTo *int `json:"bound_to"` - OSFlavor string `json:"os_flavor"` - OSVersion *string `json:"os_version"` - RapidDeploy bool `json:"rapid_deploy"` - Protection ImageProtection `json:"protection"` - Deprecated time.Time `json:"deprecated"` - Deleted time.Time `json:"deleted"` - Labels map[string]string `json:"labels"` + ID int `json:"id"` + Status string `json:"status"` + Type string `json:"type"` + Name *string `json:"name"` + Description string `json:"description"` + ImageSize *float32 `json:"image_size"` + DiskSize float32 `json:"disk_size"` + Created time.Time `json:"created"` + CreatedFrom *ImageCreatedFrom `json:"created_from"` + BoundTo *int `json:"bound_to"` + OSFlavor string `json:"os_flavor"` + OSVersion *string `json:"os_version"` + Architecture string `json:"architecture"` + RapidDeploy bool `json:"rapid_deploy"` + Protection ImageProtection `json:"protection"` + Deprecated time.Time `json:"deprecated"` + Deleted time.Time `json:"deleted"` + Labels map[string]string `json:"labels"` } // ImageProtection represents the protection level of a image. diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/iso.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/iso.go index e41046896..dfcc4e347 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/iso.go +++ b/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/iso.go @@ -4,11 +4,12 @@ import "time" // ISO defines the schema of an ISO image. type ISO struct { - ID int `json:"id"` - Name string `json:"name"` - Description string `json:"description"` - Type string `json:"type"` - Deprecated time.Time `json:"deprecated"` + ID int `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + Type string `json:"type"` + Architecture *string `json:"architecture"` + Deprecated time.Time `json:"deprecated"` } // ISOGetResponse defines the schema of the response when retrieving a single ISO. diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/server_type.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/server_type.go index 5d4f10b03..e2fe2f726 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/server_type.go +++ b/vendor/github.com/hetznercloud/hcloud-go/hcloud/schema/server_type.go @@ -2,15 +2,16 @@ package schema // ServerType defines the schema of a server type. type ServerType struct { - ID int `json:"id"` - Name string `json:"name"` - Description string `json:"description"` - Cores int `json:"cores"` - Memory float32 `json:"memory"` - Disk int `json:"disk"` - StorageType string `json:"storage_type"` - CPUType string `json:"cpu_type"` - Prices []PricingServerTypePrice `json:"prices"` + ID int `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + Cores int `json:"cores"` + Memory float32 `json:"memory"` + Disk int `json:"disk"` + StorageType string `json:"storage_type"` + CPUType string `json:"cpu_type"` + Architecture string `json:"architecture"` + Prices []PricingServerTypePrice `json:"prices"` } // ServerTypeListResponse defines the schema of the response when diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/server_type.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/server_type.go index cf712eb80..37ebb7f09 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/server_type.go +++ b/vendor/github.com/hetznercloud/hcloud-go/hcloud/server_type.go @@ -11,15 +11,16 @@ import ( // ServerType represents a server type in the Hetzner Cloud. type ServerType struct { - ID int - Name string - Description string - Cores int - Memory float32 - Disk int - StorageType StorageType - CPUType CPUType - Pricings []ServerTypeLocationPricing + ID int + Name string + Description string + Cores int + Memory float32 + Disk int + StorageType StorageType + CPUType CPUType + Architecture Architecture + Pricings []ServerTypeLocationPricing } // StorageType specifies the type of storage. diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/README.md b/vendor/github.com/ionos-cloud/sdk-go/v6/README.md index df5137dab..f020bdf43 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/README.md +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/README.md @@ -53,7 +53,7 @@ go get github.com/ionos-cloud/sdk-go/v6@latest | `IONOS_PASSWORD` | Specify the password used to login, to authenticate against the IONOS Cloud API | | `IONOS_TOKEN` | Specify the token used to login, if a token is being used instead of username and password | | `IONOS_API_URL` | Specify the API URL. It will overwrite the API endpoint default value `api.ionos.com`. Note: the host URL does not contain the `/cloudapi/v6` path, so it should _not_ be included in the `IONOS_API_URL` environment variable | -| `IONOS_LOGLEVEL` | Specify the Log Level used to log messages. Possible values: Off, Debug, Trace | +| `IONOS_LOG_LEVEL` | Specify the Log Level used to log messages. Possible values: Off, Debug, Trace | | `IONOS_PINNED_CERT` | Specify the SHA-256 public fingerprint here, enables certificate pinning | ⚠️ **_Note: To overwrite the api endpoint - `api.ionos.com`, the environment variable `$IONOS_API_URL` can be set, and used with `NewConfigurationFromEnv()` function._** @@ -278,25 +278,25 @@ All URIs are relative to *https://api.ionos.com/cloudapi/v6* Class | Method | HTTP request | Description ------------- | ------------- | ------------- | ------------- -DefaultApi | [**ApiInfoGet**](docs/api/DefaultApi.md#apiinfoget) | **Get** / | Display API information -ApplicationLoadBalancersApi | [**DatacentersApplicationloadbalancersDelete**](docs/api/ApplicationLoadBalancersApi.md#datacentersapplicationloadbalancersdelete) | **Delete** /datacenters/{datacenterId}/applicationloadbalancers/{applicationLoadBalancerId} | Delete Application Load Balancers -ApplicationLoadBalancersApi | [**DatacentersApplicationloadbalancersFindByApplicationLoadBalancerId**](docs/api/ApplicationLoadBalancersApi.md#datacentersapplicationloadbalancersfindbyapplicationloadbalancerid) | **Get** /datacenters/{datacenterId}/applicationloadbalancers/{applicationLoadBalancerId} | Retrieve Application Load Balancers -ApplicationLoadBalancersApi | [**DatacentersApplicationloadbalancersFlowlogsDelete**](docs/api/ApplicationLoadBalancersApi.md#datacentersapplicationloadbalancersflowlogsdelete) | **Delete** /datacenters/{datacenterId}/applicationloadbalancers/{applicationLoadBalancerId}/flowlogs/{flowLogId} | Delete ALB Flow Logs -ApplicationLoadBalancersApi | [**DatacentersApplicationloadbalancersFlowlogsFindByFlowLogId**](docs/api/ApplicationLoadBalancersApi.md#datacentersapplicationloadbalancersflowlogsfindbyflowlogid) | **Get** /datacenters/{datacenterId}/applicationloadbalancers/{applicationLoadBalancerId}/flowlogs/{flowLogId} | Retrieve ALB Flow Logs -ApplicationLoadBalancersApi | [**DatacentersApplicationloadbalancersFlowlogsGet**](docs/api/ApplicationLoadBalancersApi.md#datacentersapplicationloadbalancersflowlogsget) | **Get** /datacenters/{datacenterId}/applicationloadbalancers/{applicationLoadBalancerId}/flowlogs | List ALB Flow Logs -ApplicationLoadBalancersApi | [**DatacentersApplicationloadbalancersFlowlogsPatch**](docs/api/ApplicationLoadBalancersApi.md#datacentersapplicationloadbalancersflowlogspatch) | **Patch** /datacenters/{datacenterId}/applicationloadbalancers/{applicationLoadBalancerId}/flowlogs/{flowLogId} | Partially modify ALB Flow Logs -ApplicationLoadBalancersApi | [**DatacentersApplicationloadbalancersFlowlogsPost**](docs/api/ApplicationLoadBalancersApi.md#datacentersapplicationloadbalancersflowlogspost) | **Post** /datacenters/{datacenterId}/applicationloadbalancers/{applicationLoadBalancerId}/flowlogs | Create ALB Flow Logs -ApplicationLoadBalancersApi | [**DatacentersApplicationloadbalancersFlowlogsPut**](docs/api/ApplicationLoadBalancersApi.md#datacentersapplicationloadbalancersflowlogsput) | **Put** /datacenters/{datacenterId}/applicationloadbalancers/{applicationLoadBalancerId}/flowlogs/{flowLogId} | Modify ALB Flow Logs -ApplicationLoadBalancersApi | [**DatacentersApplicationloadbalancersForwardingrulesDelete**](docs/api/ApplicationLoadBalancersApi.md#datacentersapplicationloadbalancersforwardingrulesdelete) | **Delete** /datacenters/{datacenterId}/applicationloadbalancers/{applicationLoadBalancerId}/forwardingrules/{forwardingRuleId} | Delete ALB forwarding rules -ApplicationLoadBalancersApi | [**DatacentersApplicationloadbalancersForwardingrulesFindByForwardingRuleId**](docs/api/ApplicationLoadBalancersApi.md#datacentersapplicationloadbalancersforwardingrulesfindbyforwardingruleid) | **Get** /datacenters/{datacenterId}/applicationloadbalancers/{applicationLoadBalancerId}/forwardingrules/{forwardingRuleId} | Retrieve ALB forwarding rules -ApplicationLoadBalancersApi | [**DatacentersApplicationloadbalancersForwardingrulesGet**](docs/api/ApplicationLoadBalancersApi.md#datacentersapplicationloadbalancersforwardingrulesget) | **Get** /datacenters/{datacenterId}/applicationloadbalancers/{applicationLoadBalancerId}/forwardingrules | List ALB forwarding rules -ApplicationLoadBalancersApi | [**DatacentersApplicationloadbalancersForwardingrulesPatch**](docs/api/ApplicationLoadBalancersApi.md#datacentersapplicationloadbalancersforwardingrulespatch) | **Patch** /datacenters/{datacenterId}/applicationloadbalancers/{applicationLoadBalancerId}/forwardingrules/{forwardingRuleId} | Partially modify ALB forwarding rules -ApplicationLoadBalancersApi | [**DatacentersApplicationloadbalancersForwardingrulesPost**](docs/api/ApplicationLoadBalancersApi.md#datacentersapplicationloadbalancersforwardingrulespost) | **Post** /datacenters/{datacenterId}/applicationloadbalancers/{applicationLoadBalancerId}/forwardingrules | Create ALB forwarding rules -ApplicationLoadBalancersApi | [**DatacentersApplicationloadbalancersForwardingrulesPut**](docs/api/ApplicationLoadBalancersApi.md#datacentersapplicationloadbalancersforwardingrulesput) | **Put** /datacenters/{datacenterId}/applicationloadbalancers/{applicationLoadBalancerId}/forwardingrules/{forwardingRuleId} | Modify ALB forwarding rules -ApplicationLoadBalancersApi | [**DatacentersApplicationloadbalancersGet**](docs/api/ApplicationLoadBalancersApi.md#datacentersapplicationloadbalancersget) | **Get** /datacenters/{datacenterId}/applicationloadbalancers | List Application Load Balancers -ApplicationLoadBalancersApi | [**DatacentersApplicationloadbalancersPatch**](docs/api/ApplicationLoadBalancersApi.md#datacentersapplicationloadbalancerspatch) | **Patch** /datacenters/{datacenterId}/applicationloadbalancers/{applicationLoadBalancerId} | Partially modify Application Load Balancers -ApplicationLoadBalancersApi | [**DatacentersApplicationloadbalancersPost**](docs/api/ApplicationLoadBalancersApi.md#datacentersapplicationloadbalancerspost) | **Post** /datacenters/{datacenterId}/applicationloadbalancers | Create Application Load Balancers -ApplicationLoadBalancersApi | [**DatacentersApplicationloadbalancersPut**](docs/api/ApplicationLoadBalancersApi.md#datacentersapplicationloadbalancersput) | **Put** /datacenters/{datacenterId}/applicationloadbalancers/{applicationLoadBalancerId} | Modify Application Load Balancers +DefaultApi | [**ApiInfoGet**](docs/api/DefaultApi.md#apiinfoget) | **Get** / | Get API information +ApplicationLoadBalancersApi | [**DatacentersApplicationloadbalancersDelete**](docs/api/ApplicationLoadBalancersApi.md#datacentersapplicationloadbalancersdelete) | **Delete** /datacenters/{datacenterId}/applicationloadbalancers/{applicationLoadBalancerId} | Delete an Application Load Balancer by ID +ApplicationLoadBalancersApi | [**DatacentersApplicationloadbalancersFindByApplicationLoadBalancerId**](docs/api/ApplicationLoadBalancersApi.md#datacentersapplicationloadbalancersfindbyapplicationloadbalancerid) | **Get** /datacenters/{datacenterId}/applicationloadbalancers/{applicationLoadBalancerId} | Get an Application Load Balancer by ID +ApplicationLoadBalancersApi | [**DatacentersApplicationloadbalancersFlowlogsDelete**](docs/api/ApplicationLoadBalancersApi.md#datacentersapplicationloadbalancersflowlogsdelete) | **Delete** /datacenters/{datacenterId}/applicationloadbalancers/{applicationLoadBalancerId}/flowlogs/{flowLogId} | Delete an ALB Flow Log by ID +ApplicationLoadBalancersApi | [**DatacentersApplicationloadbalancersFlowlogsFindByFlowLogId**](docs/api/ApplicationLoadBalancersApi.md#datacentersapplicationloadbalancersflowlogsfindbyflowlogid) | **Get** /datacenters/{datacenterId}/applicationloadbalancers/{applicationLoadBalancerId}/flowlogs/{flowLogId} | Get an ALB Flow Log by ID +ApplicationLoadBalancersApi | [**DatacentersApplicationloadbalancersFlowlogsGet**](docs/api/ApplicationLoadBalancersApi.md#datacentersapplicationloadbalancersflowlogsget) | **Get** /datacenters/{datacenterId}/applicationloadbalancers/{applicationLoadBalancerId}/flowlogs | Get ALB Flow Logs +ApplicationLoadBalancersApi | [**DatacentersApplicationloadbalancersFlowlogsPatch**](docs/api/ApplicationLoadBalancersApi.md#datacentersapplicationloadbalancersflowlogspatch) | **Patch** /datacenters/{datacenterId}/applicationloadbalancers/{applicationLoadBalancerId}/flowlogs/{flowLogId} | Partially Modify an ALB Flow Log by ID +ApplicationLoadBalancersApi | [**DatacentersApplicationloadbalancersFlowlogsPost**](docs/api/ApplicationLoadBalancersApi.md#datacentersapplicationloadbalancersflowlogspost) | **Post** /datacenters/{datacenterId}/applicationloadbalancers/{applicationLoadBalancerId}/flowlogs | Create an ALB Flow Log +ApplicationLoadBalancersApi | [**DatacentersApplicationloadbalancersFlowlogsPut**](docs/api/ApplicationLoadBalancersApi.md#datacentersapplicationloadbalancersflowlogsput) | **Put** /datacenters/{datacenterId}/applicationloadbalancers/{applicationLoadBalancerId}/flowlogs/{flowLogId} | Modify an ALB Flow Log by ID +ApplicationLoadBalancersApi | [**DatacentersApplicationloadbalancersForwardingrulesDelete**](docs/api/ApplicationLoadBalancersApi.md#datacentersapplicationloadbalancersforwardingrulesdelete) | **Delete** /datacenters/{datacenterId}/applicationloadbalancers/{applicationLoadBalancerId}/forwardingrules/{forwardingRuleId} | Delete an ALB Forwarding Rule by ID +ApplicationLoadBalancersApi | [**DatacentersApplicationloadbalancersForwardingrulesFindByForwardingRuleId**](docs/api/ApplicationLoadBalancersApi.md#datacentersapplicationloadbalancersforwardingrulesfindbyforwardingruleid) | **Get** /datacenters/{datacenterId}/applicationloadbalancers/{applicationLoadBalancerId}/forwardingrules/{forwardingRuleId} | Get an ALB Forwarding Rule by ID +ApplicationLoadBalancersApi | [**DatacentersApplicationloadbalancersForwardingrulesGet**](docs/api/ApplicationLoadBalancersApi.md#datacentersapplicationloadbalancersforwardingrulesget) | **Get** /datacenters/{datacenterId}/applicationloadbalancers/{applicationLoadBalancerId}/forwardingrules | Get ALB Forwarding Rules +ApplicationLoadBalancersApi | [**DatacentersApplicationloadbalancersForwardingrulesPatch**](docs/api/ApplicationLoadBalancersApi.md#datacentersapplicationloadbalancersforwardingrulespatch) | **Patch** /datacenters/{datacenterId}/applicationloadbalancers/{applicationLoadBalancerId}/forwardingrules/{forwardingRuleId} | Partially modify an ALB Forwarding Rule by ID +ApplicationLoadBalancersApi | [**DatacentersApplicationloadbalancersForwardingrulesPost**](docs/api/ApplicationLoadBalancersApi.md#datacentersapplicationloadbalancersforwardingrulespost) | **Post** /datacenters/{datacenterId}/applicationloadbalancers/{applicationLoadBalancerId}/forwardingrules | Create an ALB Forwarding Rule +ApplicationLoadBalancersApi | [**DatacentersApplicationloadbalancersForwardingrulesPut**](docs/api/ApplicationLoadBalancersApi.md#datacentersapplicationloadbalancersforwardingrulesput) | **Put** /datacenters/{datacenterId}/applicationloadbalancers/{applicationLoadBalancerId}/forwardingrules/{forwardingRuleId} | Modify an ALB Forwarding Rule by ID +ApplicationLoadBalancersApi | [**DatacentersApplicationloadbalancersGet**](docs/api/ApplicationLoadBalancersApi.md#datacentersapplicationloadbalancersget) | **Get** /datacenters/{datacenterId}/applicationloadbalancers | Get Application Load Balancers +ApplicationLoadBalancersApi | [**DatacentersApplicationloadbalancersPatch**](docs/api/ApplicationLoadBalancersApi.md#datacentersapplicationloadbalancerspatch) | **Patch** /datacenters/{datacenterId}/applicationloadbalancers/{applicationLoadBalancerId} | Partially Modify an Application Load Balancer by ID +ApplicationLoadBalancersApi | [**DatacentersApplicationloadbalancersPost**](docs/api/ApplicationLoadBalancersApi.md#datacentersapplicationloadbalancerspost) | **Post** /datacenters/{datacenterId}/applicationloadbalancers | Create an Application Load Balancer +ApplicationLoadBalancersApi | [**DatacentersApplicationloadbalancersPut**](docs/api/ApplicationLoadBalancersApi.md#datacentersapplicationloadbalancersput) | **Put** /datacenters/{datacenterId}/applicationloadbalancers/{applicationLoadBalancerId} | Modify an Application Load Balancer by ID BackupUnitsApi | [**BackupunitsDelete**](docs/api/BackupUnitsApi.md#backupunitsdelete) | **Delete** /backupunits/{backupunitId} | Delete backup units BackupUnitsApi | [**BackupunitsFindById**](docs/api/BackupUnitsApi.md#backupunitsfindbyid) | **Get** /backupunits/{backupunitId} | Retrieve backup units BackupUnitsApi | [**BackupunitsGet**](docs/api/BackupUnitsApi.md#backupunitsget) | **Get** /backupunits | List backup units @@ -304,54 +304,55 @@ BackupUnitsApi | [**BackupunitsPatch**](docs/api/BackupUnitsApi.md#backupunitspa BackupUnitsApi | [**BackupunitsPost**](docs/api/BackupUnitsApi.md#backupunitspost) | **Post** /backupunits | Create backup units BackupUnitsApi | [**BackupunitsPut**](docs/api/BackupUnitsApi.md#backupunitsput) | **Put** /backupunits/{backupunitId} | Modify backup units BackupUnitsApi | [**BackupunitsSsourlGet**](docs/api/BackupUnitsApi.md#backupunitsssourlget) | **Get** /backupunits/{backupunitId}/ssourl | Retrieve BU single sign-on URLs -ContractResourcesApi | [**ContractsGet**](docs/api/ContractResourcesApi.md#contractsget) | **Get** /contracts | Retrieve contracts +ContractResourcesApi | [**ContractsGet**](docs/api/ContractResourcesApi.md#contractsget) | **Get** /contracts | Get Contract Information DataCentersApi | [**DatacentersDelete**](docs/api/DataCentersApi.md#datacentersdelete) | **Delete** /datacenters/{datacenterId} | Delete data centers DataCentersApi | [**DatacentersFindById**](docs/api/DataCentersApi.md#datacentersfindbyid) | **Get** /datacenters/{datacenterId} | Retrieve data centers DataCentersApi | [**DatacentersGet**](docs/api/DataCentersApi.md#datacentersget) | **Get** /datacenters | List your data centers -DataCentersApi | [**DatacentersPatch**](docs/api/DataCentersApi.md#datacenterspatch) | **Patch** /datacenters/{datacenterId} | Partially modify data centers -DataCentersApi | [**DatacentersPost**](docs/api/DataCentersApi.md#datacenterspost) | **Post** /datacenters | Create data centers -DataCentersApi | [**DatacentersPut**](docs/api/DataCentersApi.md#datacentersput) | **Put** /datacenters/{datacenterId} | Modify data centers +DataCentersApi | [**DatacentersPatch**](docs/api/DataCentersApi.md#datacenterspatch) | **Patch** /datacenters/{datacenterId} | Partially modify a Data Center by ID +DataCentersApi | [**DatacentersPost**](docs/api/DataCentersApi.md#datacenterspost) | **Post** /datacenters | Create a Data Center +DataCentersApi | [**DatacentersPut**](docs/api/DataCentersApi.md#datacentersput) | **Put** /datacenters/{datacenterId} | Modify a Data Center by ID FirewallRulesApi | [**DatacentersServersNicsFirewallrulesDelete**](docs/api/FirewallRulesApi.md#datacentersserversnicsfirewallrulesdelete) | **Delete** /datacenters/{datacenterId}/servers/{serverId}/nics/{nicId}/firewallrules/{firewallruleId} | Delete firewall rules FirewallRulesApi | [**DatacentersServersNicsFirewallrulesFindById**](docs/api/FirewallRulesApi.md#datacentersserversnicsfirewallrulesfindbyid) | **Get** /datacenters/{datacenterId}/servers/{serverId}/nics/{nicId}/firewallrules/{firewallruleId} | Retrieve firewall rules FirewallRulesApi | [**DatacentersServersNicsFirewallrulesGet**](docs/api/FirewallRulesApi.md#datacentersserversnicsfirewallrulesget) | **Get** /datacenters/{datacenterId}/servers/{serverId}/nics/{nicId}/firewallrules | List firewall rules FirewallRulesApi | [**DatacentersServersNicsFirewallrulesPatch**](docs/api/FirewallRulesApi.md#datacentersserversnicsfirewallrulespatch) | **Patch** /datacenters/{datacenterId}/servers/{serverId}/nics/{nicId}/firewallrules/{firewallruleId} | Partially modify firewall rules -FirewallRulesApi | [**DatacentersServersNicsFirewallrulesPost**](docs/api/FirewallRulesApi.md#datacentersserversnicsfirewallrulespost) | **Post** /datacenters/{datacenterId}/servers/{serverId}/nics/{nicId}/firewallrules | Create firewall rules -FirewallRulesApi | [**DatacentersServersNicsFirewallrulesPut**](docs/api/FirewallRulesApi.md#datacentersserversnicsfirewallrulesput) | **Put** /datacenters/{datacenterId}/servers/{serverId}/nics/{nicId}/firewallrules/{firewallruleId} | Modify firewall rules +FirewallRulesApi | [**DatacentersServersNicsFirewallrulesPost**](docs/api/FirewallRulesApi.md#datacentersserversnicsfirewallrulespost) | **Post** /datacenters/{datacenterId}/servers/{serverId}/nics/{nicId}/firewallrules | Create a Firewall Rule +FirewallRulesApi | [**DatacentersServersNicsFirewallrulesPut**](docs/api/FirewallRulesApi.md#datacentersserversnicsfirewallrulesput) | **Put** /datacenters/{datacenterId}/servers/{serverId}/nics/{nicId}/firewallrules/{firewallruleId} | Modify a Firewall Rule FlowLogsApi | [**DatacentersServersNicsFlowlogsDelete**](docs/api/FlowLogsApi.md#datacentersserversnicsflowlogsdelete) | **Delete** /datacenters/{datacenterId}/servers/{serverId}/nics/{nicId}/flowlogs/{flowlogId} | Delete Flow Logs FlowLogsApi | [**DatacentersServersNicsFlowlogsFindById**](docs/api/FlowLogsApi.md#datacentersserversnicsflowlogsfindbyid) | **Get** /datacenters/{datacenterId}/servers/{serverId}/nics/{nicId}/flowlogs/{flowlogId} | Retrieve Flow Logs FlowLogsApi | [**DatacentersServersNicsFlowlogsGet**](docs/api/FlowLogsApi.md#datacentersserversnicsflowlogsget) | **Get** /datacenters/{datacenterId}/servers/{serverId}/nics/{nicId}/flowlogs | List Flow Logs FlowLogsApi | [**DatacentersServersNicsFlowlogsPatch**](docs/api/FlowLogsApi.md#datacentersserversnicsflowlogspatch) | **Patch** /datacenters/{datacenterId}/servers/{serverId}/nics/{nicId}/flowlogs/{flowlogId} | Partially modify Flow Logs -FlowLogsApi | [**DatacentersServersNicsFlowlogsPost**](docs/api/FlowLogsApi.md#datacentersserversnicsflowlogspost) | **Post** /datacenters/{datacenterId}/servers/{serverId}/nics/{nicId}/flowlogs | Create Flow Logs +FlowLogsApi | [**DatacentersServersNicsFlowlogsPost**](docs/api/FlowLogsApi.md#datacentersserversnicsflowlogspost) | **Post** /datacenters/{datacenterId}/servers/{serverId}/nics/{nicId}/flowlogs | Create a Flow Log FlowLogsApi | [**DatacentersServersNicsFlowlogsPut**](docs/api/FlowLogsApi.md#datacentersserversnicsflowlogsput) | **Put** /datacenters/{datacenterId}/servers/{serverId}/nics/{nicId}/flowlogs/{flowlogId} | Modify Flow Logs IPBlocksApi | [**IpblocksDelete**](docs/api/IPBlocksApi.md#ipblocksdelete) | **Delete** /ipblocks/{ipblockId} | Delete IP blocks IPBlocksApi | [**IpblocksFindById**](docs/api/IPBlocksApi.md#ipblocksfindbyid) | **Get** /ipblocks/{ipblockId} | Retrieve IP blocks IPBlocksApi | [**IpblocksGet**](docs/api/IPBlocksApi.md#ipblocksget) | **Get** /ipblocks | List IP blocks IPBlocksApi | [**IpblocksPatch**](docs/api/IPBlocksApi.md#ipblockspatch) | **Patch** /ipblocks/{ipblockId} | Partially modify IP blocks -IPBlocksApi | [**IpblocksPost**](docs/api/IPBlocksApi.md#ipblockspost) | **Post** /ipblocks | Reserve IP blocks -IPBlocksApi | [**IpblocksPut**](docs/api/IPBlocksApi.md#ipblocksput) | **Put** /ipblocks/{ipblockId} | Modify IP blocks +IPBlocksApi | [**IpblocksPost**](docs/api/IPBlocksApi.md#ipblockspost) | **Post** /ipblocks | Reserve a IP Block +IPBlocksApi | [**IpblocksPut**](docs/api/IPBlocksApi.md#ipblocksput) | **Put** /ipblocks/{ipblockId} | Modify a IP Block by ID ImagesApi | [**ImagesDelete**](docs/api/ImagesApi.md#imagesdelete) | **Delete** /images/{imageId} | Delete images ImagesApi | [**ImagesFindById**](docs/api/ImagesApi.md#imagesfindbyid) | **Get** /images/{imageId} | Retrieve images ImagesApi | [**ImagesGet**](docs/api/ImagesApi.md#imagesget) | **Get** /images | List images ImagesApi | [**ImagesPatch**](docs/api/ImagesApi.md#imagespatch) | **Patch** /images/{imageId} | Partially modify images -ImagesApi | [**ImagesPut**](docs/api/ImagesApi.md#imagesput) | **Put** /images/{imageId} | Modify images -KubernetesApi | [**K8sDelete**](docs/api/KubernetesApi.md#k8sdelete) | **Delete** /k8s/{k8sClusterId} | Delete Kubernetes clusters -KubernetesApi | [**K8sFindByClusterId**](docs/api/KubernetesApi.md#k8sfindbyclusterid) | **Get** /k8s/{k8sClusterId} | Retrieve Kubernetes clusters -KubernetesApi | [**K8sGet**](docs/api/KubernetesApi.md#k8sget) | **Get** /k8s | List Kubernetes clusters -KubernetesApi | [**K8sKubeconfigGet**](docs/api/KubernetesApi.md#k8skubeconfigget) | **Get** /k8s/{k8sClusterId}/kubeconfig | Retrieve Kubernetes configuration files -KubernetesApi | [**K8sNodepoolsDelete**](docs/api/KubernetesApi.md#k8snodepoolsdelete) | **Delete** /k8s/{k8sClusterId}/nodepools/{nodepoolId} | Delete Kubernetes node pools -KubernetesApi | [**K8sNodepoolsFindById**](docs/api/KubernetesApi.md#k8snodepoolsfindbyid) | **Get** /k8s/{k8sClusterId}/nodepools/{nodepoolId} | Retrieve Kubernetes node pools -KubernetesApi | [**K8sNodepoolsGet**](docs/api/KubernetesApi.md#k8snodepoolsget) | **Get** /k8s/{k8sClusterId}/nodepools | List Kubernetes node pools -KubernetesApi | [**K8sNodepoolsNodesDelete**](docs/api/KubernetesApi.md#k8snodepoolsnodesdelete) | **Delete** /k8s/{k8sClusterId}/nodepools/{nodepoolId}/nodes/{nodeId} | Delete Kubernetes nodes -KubernetesApi | [**K8sNodepoolsNodesFindById**](docs/api/KubernetesApi.md#k8snodepoolsnodesfindbyid) | **Get** /k8s/{k8sClusterId}/nodepools/{nodepoolId}/nodes/{nodeId} | Retrieve Kubernetes nodes -KubernetesApi | [**K8sNodepoolsNodesGet**](docs/api/KubernetesApi.md#k8snodepoolsnodesget) | **Get** /k8s/{k8sClusterId}/nodepools/{nodepoolId}/nodes | List Kubernetes nodes -KubernetesApi | [**K8sNodepoolsNodesReplacePost**](docs/api/KubernetesApi.md#k8snodepoolsnodesreplacepost) | **Post** /k8s/{k8sClusterId}/nodepools/{nodepoolId}/nodes/{nodeId}/replace | Recreate Kubernetes nodes -KubernetesApi | [**K8sNodepoolsPost**](docs/api/KubernetesApi.md#k8snodepoolspost) | **Post** /k8s/{k8sClusterId}/nodepools | Create Kubernetes node pools -KubernetesApi | [**K8sNodepoolsPut**](docs/api/KubernetesApi.md#k8snodepoolsput) | **Put** /k8s/{k8sClusterId}/nodepools/{nodepoolId} | Modify Kubernetes node pools -KubernetesApi | [**K8sPost**](docs/api/KubernetesApi.md#k8spost) | **Post** /k8s | Create Kubernetes clusters -KubernetesApi | [**K8sPut**](docs/api/KubernetesApi.md#k8sput) | **Put** /k8s/{k8sClusterId} | Modify Kubernetes clusters -KubernetesApi | [**K8sVersionsDefaultGet**](docs/api/KubernetesApi.md#k8sversionsdefaultget) | **Get** /k8s/versions/default | Retrieve current default Kubernetes version -KubernetesApi | [**K8sVersionsGet**](docs/api/KubernetesApi.md#k8sversionsget) | **Get** /k8s/versions | List Kubernetes versions +ImagesApi | [**ImagesPut**](docs/api/ImagesApi.md#imagesput) | **Put** /images/{imageId} | Modify an Image by ID +KubernetesApi | [**K8sDelete**](docs/api/KubernetesApi.md#k8sdelete) | **Delete** /k8s/{k8sClusterId} | Delete a Kubernetes Cluster by ID +KubernetesApi | [**K8sFindByClusterId**](docs/api/KubernetesApi.md#k8sfindbyclusterid) | **Get** /k8s/{k8sClusterId} | Get a Kubernetes Cluster by ID +KubernetesApi | [**K8sGet**](docs/api/KubernetesApi.md#k8sget) | **Get** /k8s | Get Kubernetes Clusters +KubernetesApi | [**K8sKubeconfigGet**](docs/api/KubernetesApi.md#k8skubeconfigget) | **Get** /k8s/{k8sClusterId}/kubeconfig | Get Kubernetes Configuration File +KubernetesApi | [**K8sNodepoolsDelete**](docs/api/KubernetesApi.md#k8snodepoolsdelete) | **Delete** /k8s/{k8sClusterId}/nodepools/{nodepoolId} | Delete a Kubernetes Node Pool by ID +KubernetesApi | [**K8sNodepoolsFindById**](docs/api/KubernetesApi.md#k8snodepoolsfindbyid) | **Get** /k8s/{k8sClusterId}/nodepools/{nodepoolId} | Get a Kubernetes Node Pool by ID +KubernetesApi | [**K8sNodepoolsGet**](docs/api/KubernetesApi.md#k8snodepoolsget) | **Get** /k8s/{k8sClusterId}/nodepools | Get Kubernetes Node Pools +KubernetesApi | [**K8sNodepoolsNodesDelete**](docs/api/KubernetesApi.md#k8snodepoolsnodesdelete) | **Delete** /k8s/{k8sClusterId}/nodepools/{nodepoolId}/nodes/{nodeId} | Delete a Kubernetes Node by ID +KubernetesApi | [**K8sNodepoolsNodesFindById**](docs/api/KubernetesApi.md#k8snodepoolsnodesfindbyid) | **Get** /k8s/{k8sClusterId}/nodepools/{nodepoolId}/nodes/{nodeId} | Get Kubernetes Node by ID +KubernetesApi | [**K8sNodepoolsNodesGet**](docs/api/KubernetesApi.md#k8snodepoolsnodesget) | **Get** /k8s/{k8sClusterId}/nodepools/{nodepoolId}/nodes | Get Kubernetes Nodes +KubernetesApi | [**K8sNodepoolsNodesReplacePost**](docs/api/KubernetesApi.md#k8snodepoolsnodesreplacepost) | **Post** /k8s/{k8sClusterId}/nodepools/{nodepoolId}/nodes/{nodeId}/replace | Recreate a Kubernetes Node by ID +KubernetesApi | [**K8sNodepoolsPost**](docs/api/KubernetesApi.md#k8snodepoolspost) | **Post** /k8s/{k8sClusterId}/nodepools | Create a Kubernetes Node Pool +KubernetesApi | [**K8sNodepoolsPut**](docs/api/KubernetesApi.md#k8snodepoolsput) | **Put** /k8s/{k8sClusterId}/nodepools/{nodepoolId} | Modify a Kubernetes Node Pool by ID +KubernetesApi | [**K8sPost**](docs/api/KubernetesApi.md#k8spost) | **Post** /k8s | Create a Kubernetes Cluster +KubernetesApi | [**K8sPut**](docs/api/KubernetesApi.md#k8sput) | **Put** /k8s/{k8sClusterId} | Modify a Kubernetes Cluster by ID +KubernetesApi | [**K8sVersionsDefaultGet**](docs/api/KubernetesApi.md#k8sversionsdefaultget) | **Get** /k8s/versions/default | Get Default Kubernetes Version +KubernetesApi | [**K8sVersionsGet**](docs/api/KubernetesApi.md#k8sversionsget) | **Get** /k8s/versions | Get Kubernetes Versions LANsApi | [**DatacentersLansDelete**](docs/api/LANsApi.md#datacenterslansdelete) | **Delete** /datacenters/{datacenterId}/lans/{lanId} | Delete LANs +LANsApi | [**DatacentersLansEnableIpv6**](docs/api/LANsApi.md#datacenterslansenableipv6) | **Post** /datacenters/{datacenterId}/lans/enable-ipv6 | Enable IPv6 in the current Virtual Datacenter LANsApi | [**DatacentersLansFindById**](docs/api/LANsApi.md#datacenterslansfindbyid) | **Get** /datacenters/{datacenterId}/lans/{lanId} | Retrieve LANs LANsApi | [**DatacentersLansGet**](docs/api/LANsApi.md#datacenterslansget) | **Get** /datacenters/{datacenterId}/lans | List LANs LANsApi | [**DatacentersLansNicsFindById**](docs/api/LANsApi.md#datacenterslansnicsfindbyid) | **Get** /datacenters/{datacenterId}/lans/{lanId}/nics/{nicId} | Retrieve attached NICs @@ -363,30 +364,30 @@ LANsApi | [**DatacentersLansPut**](docs/api/LANsApi.md#datacenterslansput) | **P LabelsApi | [**DatacentersLabelsDelete**](docs/api/LabelsApi.md#datacenterslabelsdelete) | **Delete** /datacenters/{datacenterId}/labels/{key} | Delete data center labels LabelsApi | [**DatacentersLabelsFindByKey**](docs/api/LabelsApi.md#datacenterslabelsfindbykey) | **Get** /datacenters/{datacenterId}/labels/{key} | Retrieve data center labels LabelsApi | [**DatacentersLabelsGet**](docs/api/LabelsApi.md#datacenterslabelsget) | **Get** /datacenters/{datacenterId}/labels | List data center labels -LabelsApi | [**DatacentersLabelsPost**](docs/api/LabelsApi.md#datacenterslabelspost) | **Post** /datacenters/{datacenterId}/labels | Create data center labels -LabelsApi | [**DatacentersLabelsPut**](docs/api/LabelsApi.md#datacenterslabelsput) | **Put** /datacenters/{datacenterId}/labels/{key} | Modify data center labels +LabelsApi | [**DatacentersLabelsPost**](docs/api/LabelsApi.md#datacenterslabelspost) | **Post** /datacenters/{datacenterId}/labels | Create a Data Center Label +LabelsApi | [**DatacentersLabelsPut**](docs/api/LabelsApi.md#datacenterslabelsput) | **Put** /datacenters/{datacenterId}/labels/{key} | Modify a Data Center Label by Key LabelsApi | [**DatacentersServersLabelsDelete**](docs/api/LabelsApi.md#datacentersserverslabelsdelete) | **Delete** /datacenters/{datacenterId}/servers/{serverId}/labels/{key} | Delete server labels LabelsApi | [**DatacentersServersLabelsFindByKey**](docs/api/LabelsApi.md#datacentersserverslabelsfindbykey) | **Get** /datacenters/{datacenterId}/servers/{serverId}/labels/{key} | Retrieve server labels LabelsApi | [**DatacentersServersLabelsGet**](docs/api/LabelsApi.md#datacentersserverslabelsget) | **Get** /datacenters/{datacenterId}/servers/{serverId}/labels | List server labels -LabelsApi | [**DatacentersServersLabelsPost**](docs/api/LabelsApi.md#datacentersserverslabelspost) | **Post** /datacenters/{datacenterId}/servers/{serverId}/labels | Create server labels -LabelsApi | [**DatacentersServersLabelsPut**](docs/api/LabelsApi.md#datacentersserverslabelsput) | **Put** /datacenters/{datacenterId}/servers/{serverId}/labels/{key} | Modify server labels +LabelsApi | [**DatacentersServersLabelsPost**](docs/api/LabelsApi.md#datacentersserverslabelspost) | **Post** /datacenters/{datacenterId}/servers/{serverId}/labels | Create a Server Label +LabelsApi | [**DatacentersServersLabelsPut**](docs/api/LabelsApi.md#datacentersserverslabelsput) | **Put** /datacenters/{datacenterId}/servers/{serverId}/labels/{key} | Modify a Server Label LabelsApi | [**DatacentersVolumesLabelsDelete**](docs/api/LabelsApi.md#datacentersvolumeslabelsdelete) | **Delete** /datacenters/{datacenterId}/volumes/{volumeId}/labels/{key} | Delete volume labels LabelsApi | [**DatacentersVolumesLabelsFindByKey**](docs/api/LabelsApi.md#datacentersvolumeslabelsfindbykey) | **Get** /datacenters/{datacenterId}/volumes/{volumeId}/labels/{key} | Retrieve volume labels LabelsApi | [**DatacentersVolumesLabelsGet**](docs/api/LabelsApi.md#datacentersvolumeslabelsget) | **Get** /datacenters/{datacenterId}/volumes/{volumeId}/labels | List volume labels -LabelsApi | [**DatacentersVolumesLabelsPost**](docs/api/LabelsApi.md#datacentersvolumeslabelspost) | **Post** /datacenters/{datacenterId}/volumes/{volumeId}/labels | Create volume labels -LabelsApi | [**DatacentersVolumesLabelsPut**](docs/api/LabelsApi.md#datacentersvolumeslabelsput) | **Put** /datacenters/{datacenterId}/volumes/{volumeId}/labels/{key} | Modify volume labels +LabelsApi | [**DatacentersVolumesLabelsPost**](docs/api/LabelsApi.md#datacentersvolumeslabelspost) | **Post** /datacenters/{datacenterId}/volumes/{volumeId}/labels | Create a Volume Label +LabelsApi | [**DatacentersVolumesLabelsPut**](docs/api/LabelsApi.md#datacentersvolumeslabelsput) | **Put** /datacenters/{datacenterId}/volumes/{volumeId}/labels/{key} | Modify a Volume Label LabelsApi | [**IpblocksLabelsDelete**](docs/api/LabelsApi.md#ipblockslabelsdelete) | **Delete** /ipblocks/{ipblockId}/labels/{key} | Delete IP block labels LabelsApi | [**IpblocksLabelsFindByKey**](docs/api/LabelsApi.md#ipblockslabelsfindbykey) | **Get** /ipblocks/{ipblockId}/labels/{key} | Retrieve IP block labels LabelsApi | [**IpblocksLabelsGet**](docs/api/LabelsApi.md#ipblockslabelsget) | **Get** /ipblocks/{ipblockId}/labels | List IP block labels LabelsApi | [**IpblocksLabelsPost**](docs/api/LabelsApi.md#ipblockslabelspost) | **Post** /ipblocks/{ipblockId}/labels | Create IP block labels -LabelsApi | [**IpblocksLabelsPut**](docs/api/LabelsApi.md#ipblockslabelsput) | **Put** /ipblocks/{ipblockId}/labels/{key} | Modify IP block labels +LabelsApi | [**IpblocksLabelsPut**](docs/api/LabelsApi.md#ipblockslabelsput) | **Put** /ipblocks/{ipblockId}/labels/{key} | Modify a IP Block Label by ID LabelsApi | [**LabelsFindByUrn**](docs/api/LabelsApi.md#labelsfindbyurn) | **Get** /labels/{labelurn} | Retrieve labels by URN LabelsApi | [**LabelsGet**](docs/api/LabelsApi.md#labelsget) | **Get** /labels | List labels LabelsApi | [**SnapshotsLabelsDelete**](docs/api/LabelsApi.md#snapshotslabelsdelete) | **Delete** /snapshots/{snapshotId}/labels/{key} | Delete snapshot labels LabelsApi | [**SnapshotsLabelsFindByKey**](docs/api/LabelsApi.md#snapshotslabelsfindbykey) | **Get** /snapshots/{snapshotId}/labels/{key} | Retrieve snapshot labels LabelsApi | [**SnapshotsLabelsGet**](docs/api/LabelsApi.md#snapshotslabelsget) | **Get** /snapshots/{snapshotId}/labels | List snapshot labels -LabelsApi | [**SnapshotsLabelsPost**](docs/api/LabelsApi.md#snapshotslabelspost) | **Post** /snapshots/{snapshotId}/labels | Create snapshot labels -LabelsApi | [**SnapshotsLabelsPut**](docs/api/LabelsApi.md#snapshotslabelsput) | **Put** /snapshots/{snapshotId}/labels/{key} | Modify snapshot labels +LabelsApi | [**SnapshotsLabelsPost**](docs/api/LabelsApi.md#snapshotslabelspost) | **Post** /snapshots/{snapshotId}/labels | Create a Snapshot Label +LabelsApi | [**SnapshotsLabelsPut**](docs/api/LabelsApi.md#snapshotslabelsput) | **Put** /snapshots/{snapshotId}/labels/{key} | Modify a Snapshot Label by ID LoadBalancersApi | [**DatacentersLoadbalancersBalancednicsDelete**](docs/api/LoadBalancersApi.md#datacentersloadbalancersbalancednicsdelete) | **Delete** /datacenters/{datacenterId}/loadbalancers/{loadbalancerId}/balancednics/{nicId} | Detach balanced NICs LoadBalancersApi | [**DatacentersLoadbalancersBalancednicsFindByNicId**](docs/api/LoadBalancersApi.md#datacentersloadbalancersbalancednicsfindbynicid) | **Get** /datacenters/{datacenterId}/loadbalancers/{loadbalancerId}/balancednics/{nicId} | Retrieve balanced NICs LoadBalancersApi | [**DatacentersLoadbalancersBalancednicsGet**](docs/api/LoadBalancersApi.md#datacentersloadbalancersbalancednicsget) | **Get** /datacenters/{datacenterId}/loadbalancers/{loadbalancerId}/balancednics | List balanced NICs @@ -395,34 +396,34 @@ LoadBalancersApi | [**DatacentersLoadbalancersDelete**](docs/api/LoadBalancersAp LoadBalancersApi | [**DatacentersLoadbalancersFindById**](docs/api/LoadBalancersApi.md#datacentersloadbalancersfindbyid) | **Get** /datacenters/{datacenterId}/loadbalancers/{loadbalancerId} | Retrieve Load Balancers LoadBalancersApi | [**DatacentersLoadbalancersGet**](docs/api/LoadBalancersApi.md#datacentersloadbalancersget) | **Get** /datacenters/{datacenterId}/loadbalancers | List Load Balancers LoadBalancersApi | [**DatacentersLoadbalancersPatch**](docs/api/LoadBalancersApi.md#datacentersloadbalancerspatch) | **Patch** /datacenters/{datacenterId}/loadbalancers/{loadbalancerId} | Partially modify Load Balancers -LoadBalancersApi | [**DatacentersLoadbalancersPost**](docs/api/LoadBalancersApi.md#datacentersloadbalancerspost) | **Post** /datacenters/{datacenterId}/loadbalancers | Create Load Balancers -LoadBalancersApi | [**DatacentersLoadbalancersPut**](docs/api/LoadBalancersApi.md#datacentersloadbalancersput) | **Put** /datacenters/{datacenterId}/loadbalancers/{loadbalancerId} | Modify Load Balancers -LocationsApi | [**LocationsFindByRegionId**](docs/api/LocationsApi.md#locationsfindbyregionid) | **Get** /locations/{regionId} | List locations within regions -LocationsApi | [**LocationsFindByRegionIdAndId**](docs/api/LocationsApi.md#locationsfindbyregionidandid) | **Get** /locations/{regionId}/{locationId} | Retrieve specified locations -LocationsApi | [**LocationsGet**](docs/api/LocationsApi.md#locationsget) | **Get** /locations | List locations +LoadBalancersApi | [**DatacentersLoadbalancersPost**](docs/api/LoadBalancersApi.md#datacentersloadbalancerspost) | **Post** /datacenters/{datacenterId}/loadbalancers | Create a Load Balancer +LoadBalancersApi | [**DatacentersLoadbalancersPut**](docs/api/LoadBalancersApi.md#datacentersloadbalancersput) | **Put** /datacenters/{datacenterId}/loadbalancers/{loadbalancerId} | Modify a Load Balancer by ID +LocationsApi | [**LocationsFindByRegionId**](docs/api/LocationsApi.md#locationsfindbyregionid) | **Get** /locations/{regionId} | Get Locations within a Region +LocationsApi | [**LocationsFindByRegionIdAndId**](docs/api/LocationsApi.md#locationsfindbyregionidandid) | **Get** /locations/{regionId}/{locationId} | Get Location by ID +LocationsApi | [**LocationsGet**](docs/api/LocationsApi.md#locationsget) | **Get** /locations | Get Locations NATGatewaysApi | [**DatacentersNatgatewaysDelete**](docs/api/NATGatewaysApi.md#datacentersnatgatewaysdelete) | **Delete** /datacenters/{datacenterId}/natgateways/{natGatewayId} | Delete NAT Gateways NATGatewaysApi | [**DatacentersNatgatewaysFindByNatGatewayId**](docs/api/NATGatewaysApi.md#datacentersnatgatewaysfindbynatgatewayid) | **Get** /datacenters/{datacenterId}/natgateways/{natGatewayId} | Retrieve NAT Gateways NATGatewaysApi | [**DatacentersNatgatewaysFlowlogsDelete**](docs/api/NATGatewaysApi.md#datacentersnatgatewaysflowlogsdelete) | **Delete** /datacenters/{datacenterId}/natgateways/{natGatewayId}/flowlogs/{flowLogId} | Delete NAT Gateway Flow Logs NATGatewaysApi | [**DatacentersNatgatewaysFlowlogsFindByFlowLogId**](docs/api/NATGatewaysApi.md#datacentersnatgatewaysflowlogsfindbyflowlogid) | **Get** /datacenters/{datacenterId}/natgateways/{natGatewayId}/flowlogs/{flowLogId} | Retrieve NAT Gateway Flow Logs NATGatewaysApi | [**DatacentersNatgatewaysFlowlogsGet**](docs/api/NATGatewaysApi.md#datacentersnatgatewaysflowlogsget) | **Get** /datacenters/{datacenterId}/natgateways/{natGatewayId}/flowlogs | List NAT Gateway Flow Logs NATGatewaysApi | [**DatacentersNatgatewaysFlowlogsPatch**](docs/api/NATGatewaysApi.md#datacentersnatgatewaysflowlogspatch) | **Patch** /datacenters/{datacenterId}/natgateways/{natGatewayId}/flowlogs/{flowLogId} | Partially modify NAT Gateway Flow Logs -NATGatewaysApi | [**DatacentersNatgatewaysFlowlogsPost**](docs/api/NATGatewaysApi.md#datacentersnatgatewaysflowlogspost) | **Post** /datacenters/{datacenterId}/natgateways/{natGatewayId}/flowlogs | Create NAT Gateway Flow Logs +NATGatewaysApi | [**DatacentersNatgatewaysFlowlogsPost**](docs/api/NATGatewaysApi.md#datacentersnatgatewaysflowlogspost) | **Post** /datacenters/{datacenterId}/natgateways/{natGatewayId}/flowlogs | Create a NAT Gateway Flow Log NATGatewaysApi | [**DatacentersNatgatewaysFlowlogsPut**](docs/api/NATGatewaysApi.md#datacentersnatgatewaysflowlogsput) | **Put** /datacenters/{datacenterId}/natgateways/{natGatewayId}/flowlogs/{flowLogId} | Modify NAT Gateway Flow Logs NATGatewaysApi | [**DatacentersNatgatewaysGet**](docs/api/NATGatewaysApi.md#datacentersnatgatewaysget) | **Get** /datacenters/{datacenterId}/natgateways | List NAT Gateways NATGatewaysApi | [**DatacentersNatgatewaysPatch**](docs/api/NATGatewaysApi.md#datacentersnatgatewayspatch) | **Patch** /datacenters/{datacenterId}/natgateways/{natGatewayId} | Partially modify NAT Gateways -NATGatewaysApi | [**DatacentersNatgatewaysPost**](docs/api/NATGatewaysApi.md#datacentersnatgatewayspost) | **Post** /datacenters/{datacenterId}/natgateways | Create NAT Gateways +NATGatewaysApi | [**DatacentersNatgatewaysPost**](docs/api/NATGatewaysApi.md#datacentersnatgatewayspost) | **Post** /datacenters/{datacenterId}/natgateways | Create a NAT Gateway NATGatewaysApi | [**DatacentersNatgatewaysPut**](docs/api/NATGatewaysApi.md#datacentersnatgatewaysput) | **Put** /datacenters/{datacenterId}/natgateways/{natGatewayId} | Modify NAT Gateways NATGatewaysApi | [**DatacentersNatgatewaysRulesDelete**](docs/api/NATGatewaysApi.md#datacentersnatgatewaysrulesdelete) | **Delete** /datacenters/{datacenterId}/natgateways/{natGatewayId}/rules/{natGatewayRuleId} | Delete NAT Gateway rules NATGatewaysApi | [**DatacentersNatgatewaysRulesFindByNatGatewayRuleId**](docs/api/NATGatewaysApi.md#datacentersnatgatewaysrulesfindbynatgatewayruleid) | **Get** /datacenters/{datacenterId}/natgateways/{natGatewayId}/rules/{natGatewayRuleId} | Retrieve NAT Gateway rules NATGatewaysApi | [**DatacentersNatgatewaysRulesGet**](docs/api/NATGatewaysApi.md#datacentersnatgatewaysrulesget) | **Get** /datacenters/{datacenterId}/natgateways/{natGatewayId}/rules | List NAT Gateway rules -NATGatewaysApi | [**DatacentersNatgatewaysRulesPatch**](docs/api/NATGatewaysApi.md#datacentersnatgatewaysrulespatch) | **Patch** /datacenters/{datacenterId}/natgateways/{natGatewayId}/rules/{natGatewayRuleId} | Partially modify NAT Gateway rules -NATGatewaysApi | [**DatacentersNatgatewaysRulesPost**](docs/api/NATGatewaysApi.md#datacentersnatgatewaysrulespost) | **Post** /datacenters/{datacenterId}/natgateways/{natGatewayId}/rules | Create NAT Gateway rules -NATGatewaysApi | [**DatacentersNatgatewaysRulesPut**](docs/api/NATGatewaysApi.md#datacentersnatgatewaysrulesput) | **Put** /datacenters/{datacenterId}/natgateways/{natGatewayId}/rules/{natGatewayRuleId} | Modify NAT Gateway rules +NATGatewaysApi | [**DatacentersNatgatewaysRulesPatch**](docs/api/NATGatewaysApi.md#datacentersnatgatewaysrulespatch) | **Patch** /datacenters/{datacenterId}/natgateways/{natGatewayId}/rules/{natGatewayRuleId} | Partially Modify a NAT Gateway Rule by ID +NATGatewaysApi | [**DatacentersNatgatewaysRulesPost**](docs/api/NATGatewaysApi.md#datacentersnatgatewaysrulespost) | **Post** /datacenters/{datacenterId}/natgateways/{natGatewayId}/rules | Create a NAT Gateway Rule +NATGatewaysApi | [**DatacentersNatgatewaysRulesPut**](docs/api/NATGatewaysApi.md#datacentersnatgatewaysrulesput) | **Put** /datacenters/{datacenterId}/natgateways/{natGatewayId}/rules/{natGatewayRuleId} | Modify a NAT Gateway Rule by ID NetworkInterfacesApi | [**DatacentersServersNicsDelete**](docs/api/NetworkInterfacesApi.md#datacentersserversnicsdelete) | **Delete** /datacenters/{datacenterId}/servers/{serverId}/nics/{nicId} | Delete NICs NetworkInterfacesApi | [**DatacentersServersNicsFindById**](docs/api/NetworkInterfacesApi.md#datacentersserversnicsfindbyid) | **Get** /datacenters/{datacenterId}/servers/{serverId}/nics/{nicId} | Retrieve NICs NetworkInterfacesApi | [**DatacentersServersNicsGet**](docs/api/NetworkInterfacesApi.md#datacentersserversnicsget) | **Get** /datacenters/{datacenterId}/servers/{serverId}/nics | List NICs NetworkInterfacesApi | [**DatacentersServersNicsPatch**](docs/api/NetworkInterfacesApi.md#datacentersserversnicspatch) | **Patch** /datacenters/{datacenterId}/servers/{serverId}/nics/{nicId} | Partially modify NICs -NetworkInterfacesApi | [**DatacentersServersNicsPost**](docs/api/NetworkInterfacesApi.md#datacentersserversnicspost) | **Post** /datacenters/{datacenterId}/servers/{serverId}/nics | Create NICs +NetworkInterfacesApi | [**DatacentersServersNicsPost**](docs/api/NetworkInterfacesApi.md#datacentersserversnicspost) | **Post** /datacenters/{datacenterId}/servers/{serverId}/nics | Create a NIC NetworkInterfacesApi | [**DatacentersServersNicsPut**](docs/api/NetworkInterfacesApi.md#datacentersserversnicsput) | **Put** /datacenters/{datacenterId}/servers/{serverId}/nics/{nicId} | Modify NICs NetworkLoadBalancersApi | [**DatacentersNetworkloadbalancersDelete**](docs/api/NetworkLoadBalancersApi.md#datacentersnetworkloadbalancersdelete) | **Delete** /datacenters/{datacenterId}/networkloadbalancers/{networkLoadBalancerId} | Delete Network Load Balancers NetworkLoadBalancersApi | [**DatacentersNetworkloadbalancersFindByNetworkLoadBalancerId**](docs/api/NetworkLoadBalancersApi.md#datacentersnetworkloadbalancersfindbynetworkloadbalancerid) | **Get** /datacenters/{datacenterId}/networkloadbalancers/{networkLoadBalancerId} | Retrieve Network Load Balancers @@ -430,61 +431,61 @@ NetworkLoadBalancersApi | [**DatacentersNetworkloadbalancersFlowlogsDelete**](do NetworkLoadBalancersApi | [**DatacentersNetworkloadbalancersFlowlogsFindByFlowLogId**](docs/api/NetworkLoadBalancersApi.md#datacentersnetworkloadbalancersflowlogsfindbyflowlogid) | **Get** /datacenters/{datacenterId}/networkloadbalancers/{networkLoadBalancerId}/flowlogs/{flowLogId} | Retrieve NLB Flow Logs NetworkLoadBalancersApi | [**DatacentersNetworkloadbalancersFlowlogsGet**](docs/api/NetworkLoadBalancersApi.md#datacentersnetworkloadbalancersflowlogsget) | **Get** /datacenters/{datacenterId}/networkloadbalancers/{networkLoadBalancerId}/flowlogs | List NLB Flow Logs NetworkLoadBalancersApi | [**DatacentersNetworkloadbalancersFlowlogsPatch**](docs/api/NetworkLoadBalancersApi.md#datacentersnetworkloadbalancersflowlogspatch) | **Patch** /datacenters/{datacenterId}/networkloadbalancers/{networkLoadBalancerId}/flowlogs/{flowLogId} | Partially modify NLB Flow Logs -NetworkLoadBalancersApi | [**DatacentersNetworkloadbalancersFlowlogsPost**](docs/api/NetworkLoadBalancersApi.md#datacentersnetworkloadbalancersflowlogspost) | **Post** /datacenters/{datacenterId}/networkloadbalancers/{networkLoadBalancerId}/flowlogs | Create NLB Flow Logs +NetworkLoadBalancersApi | [**DatacentersNetworkloadbalancersFlowlogsPost**](docs/api/NetworkLoadBalancersApi.md#datacentersnetworkloadbalancersflowlogspost) | **Post** /datacenters/{datacenterId}/networkloadbalancers/{networkLoadBalancerId}/flowlogs | Create a NLB Flow Log NetworkLoadBalancersApi | [**DatacentersNetworkloadbalancersFlowlogsPut**](docs/api/NetworkLoadBalancersApi.md#datacentersnetworkloadbalancersflowlogsput) | **Put** /datacenters/{datacenterId}/networkloadbalancers/{networkLoadBalancerId}/flowlogs/{flowLogId} | Modify NLB Flow Logs NetworkLoadBalancersApi | [**DatacentersNetworkloadbalancersForwardingrulesDelete**](docs/api/NetworkLoadBalancersApi.md#datacentersnetworkloadbalancersforwardingrulesdelete) | **Delete** /datacenters/{datacenterId}/networkloadbalancers/{networkLoadBalancerId}/forwardingrules/{forwardingRuleId} | Delete NLB forwarding rules NetworkLoadBalancersApi | [**DatacentersNetworkloadbalancersForwardingrulesFindByForwardingRuleId**](docs/api/NetworkLoadBalancersApi.md#datacentersnetworkloadbalancersforwardingrulesfindbyforwardingruleid) | **Get** /datacenters/{datacenterId}/networkloadbalancers/{networkLoadBalancerId}/forwardingrules/{forwardingRuleId} | Retrieve NLB forwarding rules NetworkLoadBalancersApi | [**DatacentersNetworkloadbalancersForwardingrulesGet**](docs/api/NetworkLoadBalancersApi.md#datacentersnetworkloadbalancersforwardingrulesget) | **Get** /datacenters/{datacenterId}/networkloadbalancers/{networkLoadBalancerId}/forwardingrules | List NLB forwarding rules NetworkLoadBalancersApi | [**DatacentersNetworkloadbalancersForwardingrulesPatch**](docs/api/NetworkLoadBalancersApi.md#datacentersnetworkloadbalancersforwardingrulespatch) | **Patch** /datacenters/{datacenterId}/networkloadbalancers/{networkLoadBalancerId}/forwardingrules/{forwardingRuleId} | Partially modify NLB forwarding rules -NetworkLoadBalancersApi | [**DatacentersNetworkloadbalancersForwardingrulesPost**](docs/api/NetworkLoadBalancersApi.md#datacentersnetworkloadbalancersforwardingrulespost) | **Post** /datacenters/{datacenterId}/networkloadbalancers/{networkLoadBalancerId}/forwardingrules | Create NLB forwarding rules +NetworkLoadBalancersApi | [**DatacentersNetworkloadbalancersForwardingrulesPost**](docs/api/NetworkLoadBalancersApi.md#datacentersnetworkloadbalancersforwardingrulespost) | **Post** /datacenters/{datacenterId}/networkloadbalancers/{networkLoadBalancerId}/forwardingrules | Create a NLB Forwarding Rule NetworkLoadBalancersApi | [**DatacentersNetworkloadbalancersForwardingrulesPut**](docs/api/NetworkLoadBalancersApi.md#datacentersnetworkloadbalancersforwardingrulesput) | **Put** /datacenters/{datacenterId}/networkloadbalancers/{networkLoadBalancerId}/forwardingrules/{forwardingRuleId} | Modify NLB forwarding rules NetworkLoadBalancersApi | [**DatacentersNetworkloadbalancersGet**](docs/api/NetworkLoadBalancersApi.md#datacentersnetworkloadbalancersget) | **Get** /datacenters/{datacenterId}/networkloadbalancers | List Network Load Balancers NetworkLoadBalancersApi | [**DatacentersNetworkloadbalancersPatch**](docs/api/NetworkLoadBalancersApi.md#datacentersnetworkloadbalancerspatch) | **Patch** /datacenters/{datacenterId}/networkloadbalancers/{networkLoadBalancerId} | Partially modify Network Load Balancers -NetworkLoadBalancersApi | [**DatacentersNetworkloadbalancersPost**](docs/api/NetworkLoadBalancersApi.md#datacentersnetworkloadbalancerspost) | **Post** /datacenters/{datacenterId}/networkloadbalancers | Create Network Load Balancers +NetworkLoadBalancersApi | [**DatacentersNetworkloadbalancersPost**](docs/api/NetworkLoadBalancersApi.md#datacentersnetworkloadbalancerspost) | **Post** /datacenters/{datacenterId}/networkloadbalancers | Create a Network Load Balancer NetworkLoadBalancersApi | [**DatacentersNetworkloadbalancersPut**](docs/api/NetworkLoadBalancersApi.md#datacentersnetworkloadbalancersput) | **Put** /datacenters/{datacenterId}/networkloadbalancers/{networkLoadBalancerId} | Modify Network Load Balancers PrivateCrossConnectsApi | [**PccsDelete**](docs/api/PrivateCrossConnectsApi.md#pccsdelete) | **Delete** /pccs/{pccId} | Delete private Cross-Connects PrivateCrossConnectsApi | [**PccsFindById**](docs/api/PrivateCrossConnectsApi.md#pccsfindbyid) | **Get** /pccs/{pccId} | Retrieve private Cross-Connects PrivateCrossConnectsApi | [**PccsGet**](docs/api/PrivateCrossConnectsApi.md#pccsget) | **Get** /pccs | List private Cross-Connects PrivateCrossConnectsApi | [**PccsPatch**](docs/api/PrivateCrossConnectsApi.md#pccspatch) | **Patch** /pccs/{pccId} | Partially modify private Cross-Connects -PrivateCrossConnectsApi | [**PccsPost**](docs/api/PrivateCrossConnectsApi.md#pccspost) | **Post** /pccs | Create private Cross-Connects +PrivateCrossConnectsApi | [**PccsPost**](docs/api/PrivateCrossConnectsApi.md#pccspost) | **Post** /pccs | Create a Private Cross-Connect RequestsApi | [**RequestsFindById**](docs/api/RequestsApi.md#requestsfindbyid) | **Get** /requests/{requestId} | Retrieve requests RequestsApi | [**RequestsGet**](docs/api/RequestsApi.md#requestsget) | **Get** /requests | List requests RequestsApi | [**RequestsStatusGet**](docs/api/RequestsApi.md#requestsstatusget) | **Get** /requests/{requestId}/status | Retrieve request status -ServersApi | [**DatacentersServersCdromsDelete**](docs/api/ServersApi.md#datacentersserverscdromsdelete) | **Delete** /datacenters/{datacenterId}/servers/{serverId}/cdroms/{cdromId} | Detach CD-ROMs -ServersApi | [**DatacentersServersCdromsFindById**](docs/api/ServersApi.md#datacentersserverscdromsfindbyid) | **Get** /datacenters/{datacenterId}/servers/{serverId}/cdroms/{cdromId} | Retrieve attached CD-ROMs -ServersApi | [**DatacentersServersCdromsGet**](docs/api/ServersApi.md#datacentersserverscdromsget) | **Get** /datacenters/{datacenterId}/servers/{serverId}/cdroms | List attached CD-ROMs -ServersApi | [**DatacentersServersCdromsPost**](docs/api/ServersApi.md#datacentersserverscdromspost) | **Post** /datacenters/{datacenterId}/servers/{serverId}/cdroms | Attach CD-ROMs +ServersApi | [**DatacentersServersCdromsDelete**](docs/api/ServersApi.md#datacentersserverscdromsdelete) | **Delete** /datacenters/{datacenterId}/servers/{serverId}/cdroms/{cdromId} | Detach a CD-ROM by ID +ServersApi | [**DatacentersServersCdromsFindById**](docs/api/ServersApi.md#datacentersserverscdromsfindbyid) | **Get** /datacenters/{datacenterId}/servers/{serverId}/cdroms/{cdromId} | Get Attached CD-ROM by ID +ServersApi | [**DatacentersServersCdromsGet**](docs/api/ServersApi.md#datacentersserverscdromsget) | **Get** /datacenters/{datacenterId}/servers/{serverId}/cdroms | Get Attached CD-ROMs +ServersApi | [**DatacentersServersCdromsPost**](docs/api/ServersApi.md#datacentersserverscdromspost) | **Post** /datacenters/{datacenterId}/servers/{serverId}/cdroms | Attach a CD-ROM ServersApi | [**DatacentersServersDelete**](docs/api/ServersApi.md#datacentersserversdelete) | **Delete** /datacenters/{datacenterId}/servers/{serverId} | Delete servers ServersApi | [**DatacentersServersFindById**](docs/api/ServersApi.md#datacentersserversfindbyid) | **Get** /datacenters/{datacenterId}/servers/{serverId} | Retrieve servers by ID ServersApi | [**DatacentersServersGet**](docs/api/ServersApi.md#datacentersserversget) | **Get** /datacenters/{datacenterId}/servers | List servers ServersApi | [**DatacentersServersPatch**](docs/api/ServersApi.md#datacentersserverspatch) | **Patch** /datacenters/{datacenterId}/servers/{serverId} | Partially modify servers -ServersApi | [**DatacentersServersPost**](docs/api/ServersApi.md#datacentersserverspost) | **Post** /datacenters/{datacenterId}/servers | Create servers -ServersApi | [**DatacentersServersPut**](docs/api/ServersApi.md#datacentersserversput) | **Put** /datacenters/{datacenterId}/servers/{serverId} | Modify servers +ServersApi | [**DatacentersServersPost**](docs/api/ServersApi.md#datacentersserverspost) | **Post** /datacenters/{datacenterId}/servers | Create a Server +ServersApi | [**DatacentersServersPut**](docs/api/ServersApi.md#datacentersserversput) | **Put** /datacenters/{datacenterId}/servers/{serverId} | Modify a Server by ID ServersApi | [**DatacentersServersRebootPost**](docs/api/ServersApi.md#datacentersserversrebootpost) | **Post** /datacenters/{datacenterId}/servers/{serverId}/reboot | Reboot servers ServersApi | [**DatacentersServersRemoteConsoleGet**](docs/api/ServersApi.md#datacentersserversremoteconsoleget) | **Get** /datacenters/{datacenterId}/servers/{serverId}/remoteconsole | Get Remote Console link -ServersApi | [**DatacentersServersResumePost**](docs/api/ServersApi.md#datacentersserversresumepost) | **Post** /datacenters/{datacenterId}/servers/{serverId}/resume | Resume Cubes instances -ServersApi | [**DatacentersServersStartPost**](docs/api/ServersApi.md#datacentersserversstartpost) | **Post** /datacenters/{datacenterId}/servers/{serverId}/start | Start servers -ServersApi | [**DatacentersServersStopPost**](docs/api/ServersApi.md#datacentersserversstoppost) | **Post** /datacenters/{datacenterId}/servers/{serverId}/stop | Stop VMs -ServersApi | [**DatacentersServersSuspendPost**](docs/api/ServersApi.md#datacentersserverssuspendpost) | **Post** /datacenters/{datacenterId}/servers/{serverId}/suspend | Suspend Cubes instances +ServersApi | [**DatacentersServersResumePost**](docs/api/ServersApi.md#datacentersserversresumepost) | **Post** /datacenters/{datacenterId}/servers/{serverId}/resume | Resume a Cube Server by ID +ServersApi | [**DatacentersServersStartPost**](docs/api/ServersApi.md#datacentersserversstartpost) | **Post** /datacenters/{datacenterId}/servers/{serverId}/start | Start an Enterprise Server by ID +ServersApi | [**DatacentersServersStopPost**](docs/api/ServersApi.md#datacentersserversstoppost) | **Post** /datacenters/{datacenterId}/servers/{serverId}/stop | Stop an Enterprise Server by ID +ServersApi | [**DatacentersServersSuspendPost**](docs/api/ServersApi.md#datacentersserverssuspendpost) | **Post** /datacenters/{datacenterId}/servers/{serverId}/suspend | Suspend a Cube Server by ID ServersApi | [**DatacentersServersTokenGet**](docs/api/ServersApi.md#datacentersserverstokenget) | **Get** /datacenters/{datacenterId}/servers/{serverId}/token | Get JASON Web Token -ServersApi | [**DatacentersServersUpgradePost**](docs/api/ServersApi.md#datacentersserversupgradepost) | **Post** /datacenters/{datacenterId}/servers/{serverId}/upgrade | Upgrade servers -ServersApi | [**DatacentersServersVolumesDelete**](docs/api/ServersApi.md#datacentersserversvolumesdelete) | **Delete** /datacenters/{datacenterId}/servers/{serverId}/volumes/{volumeId} | Detach volumes -ServersApi | [**DatacentersServersVolumesFindById**](docs/api/ServersApi.md#datacentersserversvolumesfindbyid) | **Get** /datacenters/{datacenterId}/servers/{serverId}/volumes/{volumeId} | Retrieve attached volumes -ServersApi | [**DatacentersServersVolumesGet**](docs/api/ServersApi.md#datacentersserversvolumesget) | **Get** /datacenters/{datacenterId}/servers/{serverId}/volumes | List attached volumes -ServersApi | [**DatacentersServersVolumesPost**](docs/api/ServersApi.md#datacentersserversvolumespost) | **Post** /datacenters/{datacenterId}/servers/{serverId}/volumes | Attach volumes +ServersApi | [**DatacentersServersUpgradePost**](docs/api/ServersApi.md#datacentersserversupgradepost) | **Post** /datacenters/{datacenterId}/servers/{serverId}/upgrade | Upgrade a Server by ID +ServersApi | [**DatacentersServersVolumesDelete**](docs/api/ServersApi.md#datacentersserversvolumesdelete) | **Delete** /datacenters/{datacenterId}/servers/{serverId}/volumes/{volumeId} | Detach a Volume by ID +ServersApi | [**DatacentersServersVolumesFindById**](docs/api/ServersApi.md#datacentersserversvolumesfindbyid) | **Get** /datacenters/{datacenterId}/servers/{serverId}/volumes/{volumeId} | Get Attached Volume by ID +ServersApi | [**DatacentersServersVolumesGet**](docs/api/ServersApi.md#datacentersserversvolumesget) | **Get** /datacenters/{datacenterId}/servers/{serverId}/volumes | Get Attached Volumes +ServersApi | [**DatacentersServersVolumesPost**](docs/api/ServersApi.md#datacentersserversvolumespost) | **Post** /datacenters/{datacenterId}/servers/{serverId}/volumes | Attach a Volume to a Server SnapshotsApi | [**SnapshotsDelete**](docs/api/SnapshotsApi.md#snapshotsdelete) | **Delete** /snapshots/{snapshotId} | Delete snapshots SnapshotsApi | [**SnapshotsFindById**](docs/api/SnapshotsApi.md#snapshotsfindbyid) | **Get** /snapshots/{snapshotId} | Retrieve snapshots by ID SnapshotsApi | [**SnapshotsGet**](docs/api/SnapshotsApi.md#snapshotsget) | **Get** /snapshots | List snapshots SnapshotsApi | [**SnapshotsPatch**](docs/api/SnapshotsApi.md#snapshotspatch) | **Patch** /snapshots/{snapshotId} | Partially modify snapshots -SnapshotsApi | [**SnapshotsPut**](docs/api/SnapshotsApi.md#snapshotsput) | **Put** /snapshots/{snapshotId} | Modify snapshots -TargetGroupsApi | [**TargetGroupsDelete**](docs/api/TargetGroupsApi.md#targetgroupsdelete) | **Delete** /targetgroups/{targetGroupId} | Remove target groups -TargetGroupsApi | [**TargetgroupsFindByTargetGroupId**](docs/api/TargetGroupsApi.md#targetgroupsfindbytargetgroupid) | **Get** /targetgroups/{targetGroupId} | Retrieve target groups -TargetGroupsApi | [**TargetgroupsGet**](docs/api/TargetGroupsApi.md#targetgroupsget) | **Get** /targetgroups | List target groups -TargetGroupsApi | [**TargetgroupsPatch**](docs/api/TargetGroupsApi.md#targetgroupspatch) | **Patch** /targetgroups/{targetGroupId} | Partially modify target groups -TargetGroupsApi | [**TargetgroupsPost**](docs/api/TargetGroupsApi.md#targetgroupspost) | **Post** /targetgroups | Create target groups -TargetGroupsApi | [**TargetgroupsPut**](docs/api/TargetGroupsApi.md#targetgroupsput) | **Put** /targetgroups/{targetGroupId} | Modify target groups -TemplatesApi | [**TemplatesFindById**](docs/api/TemplatesApi.md#templatesfindbyid) | **Get** /templates/{templateId} | Retrieve Cubes Templates -TemplatesApi | [**TemplatesGet**](docs/api/TemplatesApi.md#templatesget) | **Get** /templates | List Cubes Templates +SnapshotsApi | [**SnapshotsPut**](docs/api/SnapshotsApi.md#snapshotsput) | **Put** /snapshots/{snapshotId} | Modify a Snapshot by ID +TargetGroupsApi | [**TargetGroupsDelete**](docs/api/TargetGroupsApi.md#targetgroupsdelete) | **Delete** /targetgroups/{targetGroupId} | Delete a Target Group by ID +TargetGroupsApi | [**TargetgroupsFindByTargetGroupId**](docs/api/TargetGroupsApi.md#targetgroupsfindbytargetgroupid) | **Get** /targetgroups/{targetGroupId} | Get a Target Group by ID +TargetGroupsApi | [**TargetgroupsGet**](docs/api/TargetGroupsApi.md#targetgroupsget) | **Get** /targetgroups | Get Target Groups +TargetGroupsApi | [**TargetgroupsPatch**](docs/api/TargetGroupsApi.md#targetgroupspatch) | **Patch** /targetgroups/{targetGroupId} | Partially Modify a Target Group by ID +TargetGroupsApi | [**TargetgroupsPost**](docs/api/TargetGroupsApi.md#targetgroupspost) | **Post** /targetgroups | Create a Target Group +TargetGroupsApi | [**TargetgroupsPut**](docs/api/TargetGroupsApi.md#targetgroupsput) | **Put** /targetgroups/{targetGroupId} | Modify a Target Group by ID +TemplatesApi | [**TemplatesFindById**](docs/api/TemplatesApi.md#templatesfindbyid) | **Get** /templates/{templateId} | Get Cubes Template by ID +TemplatesApi | [**TemplatesGet**](docs/api/TemplatesApi.md#templatesget) | **Get** /templates | Get Cubes Templates UserManagementApi | [**UmGroupsDelete**](docs/api/UserManagementApi.md#umgroupsdelete) | **Delete** /um/groups/{groupId} | Delete groups UserManagementApi | [**UmGroupsFindById**](docs/api/UserManagementApi.md#umgroupsfindbyid) | **Get** /um/groups/{groupId} | Retrieve groups UserManagementApi | [**UmGroupsGet**](docs/api/UserManagementApi.md#umgroupsget) | **Get** /um/groups | List all groups @@ -498,7 +499,7 @@ UserManagementApi | [**UmGroupsSharesPost**](docs/api/UserManagementApi.md#umgro UserManagementApi | [**UmGroupsSharesPut**](docs/api/UserManagementApi.md#umgroupssharesput) | **Put** /um/groups/{groupId}/shares/{resourceId} | Modify group share privileges UserManagementApi | [**UmGroupsUsersDelete**](docs/api/UserManagementApi.md#umgroupsusersdelete) | **Delete** /um/groups/{groupId}/users/{userId} | Remove users from groups UserManagementApi | [**UmGroupsUsersGet**](docs/api/UserManagementApi.md#umgroupsusersget) | **Get** /um/groups/{groupId}/users | List group members -UserManagementApi | [**UmGroupsUsersPost**](docs/api/UserManagementApi.md#umgroupsuserspost) | **Post** /um/groups/{groupId}/users | Add group members +UserManagementApi | [**UmGroupsUsersPost**](docs/api/UserManagementApi.md#umgroupsuserspost) | **Post** /um/groups/{groupId}/users | Add a Group Member UserManagementApi | [**UmResourcesFindByType**](docs/api/UserManagementApi.md#umresourcesfindbytype) | **Get** /um/resources/{resourceType} | List resources by type UserManagementApi | [**UmResourcesFindByTypeAndId**](docs/api/UserManagementApi.md#umresourcesfindbytypeandid) | **Get** /um/resources/{resourceType}/{resourceId} | Retrieve resources by type UserManagementApi | [**UmResourcesGet**](docs/api/UserManagementApi.md#umresourcesget) | **Get** /um/resources | List all resources @@ -513,15 +514,15 @@ UserS3KeysApi | [**UmUsersS3keysDelete**](docs/api/UserS3KeysApi.md#umuserss3key UserS3KeysApi | [**UmUsersS3keysFindByKeyId**](docs/api/UserS3KeysApi.md#umuserss3keysfindbykeyid) | **Get** /um/users/{userId}/s3keys/{keyId} | Retrieve user S3 keys by key ID UserS3KeysApi | [**UmUsersS3keysGet**](docs/api/UserS3KeysApi.md#umuserss3keysget) | **Get** /um/users/{userId}/s3keys | List user S3 keys UserS3KeysApi | [**UmUsersS3keysPost**](docs/api/UserS3KeysApi.md#umuserss3keyspost) | **Post** /um/users/{userId}/s3keys | Create user S3 keys -UserS3KeysApi | [**UmUsersS3keysPut**](docs/api/UserS3KeysApi.md#umuserss3keysput) | **Put** /um/users/{userId}/s3keys/{keyId} | Modify S3 keys by key ID +UserS3KeysApi | [**UmUsersS3keysPut**](docs/api/UserS3KeysApi.md#umuserss3keysput) | **Put** /um/users/{userId}/s3keys/{keyId} | Modify a S3 Key by Key ID UserS3KeysApi | [**UmUsersS3ssourlGet**](docs/api/UserS3KeysApi.md#umuserss3ssourlget) | **Get** /um/users/{userId}/s3ssourl | Retrieve S3 single sign-on URLs VolumesApi | [**DatacentersVolumesCreateSnapshotPost**](docs/api/VolumesApi.md#datacentersvolumescreatesnapshotpost) | **Post** /datacenters/{datacenterId}/volumes/{volumeId}/create-snapshot | Create volume snapshots VolumesApi | [**DatacentersVolumesDelete**](docs/api/VolumesApi.md#datacentersvolumesdelete) | **Delete** /datacenters/{datacenterId}/volumes/{volumeId} | Delete volumes VolumesApi | [**DatacentersVolumesFindById**](docs/api/VolumesApi.md#datacentersvolumesfindbyid) | **Get** /datacenters/{datacenterId}/volumes/{volumeId} | Retrieve volumes VolumesApi | [**DatacentersVolumesGet**](docs/api/VolumesApi.md#datacentersvolumesget) | **Get** /datacenters/{datacenterId}/volumes | List volumes VolumesApi | [**DatacentersVolumesPatch**](docs/api/VolumesApi.md#datacentersvolumespatch) | **Patch** /datacenters/{datacenterId}/volumes/{volumeId} | Partially modify volumes -VolumesApi | [**DatacentersVolumesPost**](docs/api/VolumesApi.md#datacentersvolumespost) | **Post** /datacenters/{datacenterId}/volumes | Create volumes -VolumesApi | [**DatacentersVolumesPut**](docs/api/VolumesApi.md#datacentersvolumesput) | **Put** /datacenters/{datacenterId}/volumes/{volumeId} | Modify volumes +VolumesApi | [**DatacentersVolumesPost**](docs/api/VolumesApi.md#datacentersvolumespost) | **Post** /datacenters/{datacenterId}/volumes | Create a Volume +VolumesApi | [**DatacentersVolumesPut**](docs/api/VolumesApi.md#datacentersvolumesput) | **Put** /datacenters/{datacenterId}/volumes/{volumeId} | Modify a Volume by ID VolumesApi | [**DatacentersVolumesRestoreSnapshotPost**](docs/api/VolumesApi.md#datacentersvolumesrestoresnapshotpost) | **Post** /datacenters/{datacenterId}/volumes/{volumeId}/restore-snapshot | Restore volume snapshots @@ -619,9 +620,7 @@ All URIs are relative to *https://api.ionos.com/cloudapi/v6* - [Lan](docs/models/Lan) - [LanEntities](docs/models/LanEntities) - [LanNics](docs/models/LanNics) - - [LanPost](docs/models/LanPost) - [LanProperties](docs/models/LanProperties) - - [LanPropertiesPost](docs/models/LanPropertiesPost) - [Lans](docs/models/Lans) - [Loadbalancer](docs/models/Loadbalancer) - [LoadbalancerEntities](docs/models/LoadbalancerEntities) @@ -733,6 +732,8 @@ Due to the fact that model structure members are all pointers, this package cont a number of utility functions to easily obtain pointers to values of basic types. Each of these functions takes a value of the given basic type and returns a pointer to it: +Deprecated in favor of ToPtr that uses generics + * `PtrBool` * `PtrInt` * `PtrInt32` diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/api_.go b/vendor/github.com/ionos-cloud/sdk-go/v6/api_.go index 162acb0e3..3db9ea5cc 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/api_.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/api_.go @@ -74,8 +74,8 @@ func (r ApiApiInfoGetRequest) Execute() (Info, *APIResponse, error) { } /* - * ApiInfoGet Display API information - * Display API information + * ApiInfoGet Get API information + * Retrieves the API information such as API version. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @return ApiApiInfoGetRequest */ diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/api_application_load_balancers.go b/vendor/github.com/ionos-cloud/sdk-go/v6/api_application_load_balancers.go index 0fec0d48a..e651c73e9 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/api_application_load_balancers.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/api_application_load_balancers.go @@ -55,8 +55,8 @@ func (r ApiDatacentersApplicationloadbalancersDeleteRequest) Execute() (*APIResp } /* - * DatacentersApplicationloadbalancersDelete Delete Application Load Balancers - * Remove the specified Application Load Balancer from the data center.. + * DatacentersApplicationloadbalancersDelete Delete an Application Load Balancer by ID + * Removes the specified Application Load Balancer from the data center. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param datacenterId The unique ID of the data center. * @param applicationLoadBalancerId The unique ID of the Application Load Balancer. @@ -220,8 +220,8 @@ func (r ApiDatacentersApplicationloadbalancersFindByApplicationLoadBalancerIdReq } /* - * DatacentersApplicationloadbalancersFindByApplicationLoadBalancerId Retrieve Application Load Balancers - * Retrieve the properties of the specified Application Load Balancer within the data center. + * DatacentersApplicationloadbalancersFindByApplicationLoadBalancerId Get an Application Load Balancer by ID + * Retrieves the properties of the specified Application Load Balancer within the data center. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param datacenterId The unique ID of the data center. * @param applicationLoadBalancerId The unique ID of the Application Load Balancer. @@ -398,12 +398,12 @@ func (r ApiDatacentersApplicationloadbalancersFlowlogsDeleteRequest) Execute() ( } /* - * DatacentersApplicationloadbalancersFlowlogsDelete Delete ALB Flow Logs - * Delete the specified Application Load Balancer Flow Log. + * DatacentersApplicationloadbalancersFlowlogsDelete Delete an ALB Flow Log by ID + * Deletes the Application Load Balancer flow log specified by its ID. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param datacenterId The unique ID of the data center. * @param applicationLoadBalancerId The unique ID of the Application Load Balancer. - * @param flowLogId The unique ID of the Flow Log. + * @param flowLogId The unique ID of the flow log. * @return ApiDatacentersApplicationloadbalancersFlowlogsDeleteRequest */ func (a *ApplicationLoadBalancersApiService) DatacentersApplicationloadbalancersFlowlogsDelete(ctx _context.Context, datacenterId string, applicationLoadBalancerId string, flowLogId string) ApiDatacentersApplicationloadbalancersFlowlogsDeleteRequest { @@ -567,12 +567,12 @@ func (r ApiDatacentersApplicationloadbalancersFlowlogsFindByFlowLogIdRequest) Ex } /* - * DatacentersApplicationloadbalancersFlowlogsFindByFlowLogId Retrieve ALB Flow Logs - * Retrieve the specified Application Load Balancer Flow Log. + * DatacentersApplicationloadbalancersFlowlogsFindByFlowLogId Get an ALB Flow Log by ID + * Retrieves the Application Load Balancer flow log specified by its ID. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param datacenterId The unique ID of the data center. * @param applicationLoadBalancerId The unique ID of the Application Load Balancer. - * @param flowLogId The unique ID of the Flow Log. + * @param flowLogId The unique ID of the flow log. * @return ApiDatacentersApplicationloadbalancersFlowlogsFindByFlowLogIdRequest */ func (a *ApplicationLoadBalancersApiService) DatacentersApplicationloadbalancersFlowlogsFindByFlowLogId(ctx _context.Context, datacenterId string, applicationLoadBalancerId string, flowLogId string) ApiDatacentersApplicationloadbalancersFlowlogsFindByFlowLogIdRequest { @@ -769,8 +769,8 @@ func (r ApiDatacentersApplicationloadbalancersFlowlogsGetRequest) Execute() (Flo } /* - * DatacentersApplicationloadbalancersFlowlogsGet List ALB Flow Logs - * List the Flow Logs for the specified Application Load Balancer. + * DatacentersApplicationloadbalancersFlowlogsGet Get ALB Flow Logs + * Retrieves the flow logs for the specified Application Load Balancer. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param datacenterId The unique ID of the data center. * @param applicationLoadBalancerId The unique ID of the Application Load Balancer. @@ -966,12 +966,12 @@ func (r ApiDatacentersApplicationloadbalancersFlowlogsPatchRequest) Execute() (F } /* - * DatacentersApplicationloadbalancersFlowlogsPatch Partially modify ALB Flow Logs - * Update the properties of the specified Application Load Balancer Flow Log. + * DatacentersApplicationloadbalancersFlowlogsPatch Partially Modify an ALB Flow Log by ID + * Updates the properties of the Application Load Balancer flow log specified by its ID. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param datacenterId The unique ID of the data center. * @param applicationLoadBalancerId The unique ID of the Application Load Balancer. - * @param flowLogId The unique ID of the Flow Log. + * @param flowLogId The unique ID of the flow log. * @return ApiDatacentersApplicationloadbalancersFlowlogsPatchRequest */ func (a *ApplicationLoadBalancersApiService) DatacentersApplicationloadbalancersFlowlogsPatch(ctx _context.Context, datacenterId string, applicationLoadBalancerId string, flowLogId string) ApiDatacentersApplicationloadbalancersFlowlogsPatchRequest { @@ -1156,8 +1156,8 @@ func (r ApiDatacentersApplicationloadbalancersFlowlogsPostRequest) Execute() (Fl } /* - * DatacentersApplicationloadbalancersFlowlogsPost Create ALB Flow Logs - * Add a new Flow Log for the Application Load Balancer. + * DatacentersApplicationloadbalancersFlowlogsPost Create an ALB Flow Log + * Creates a flow log for the Application Load Balancer specified by ID. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param datacenterId The unique ID of the data center. * @param applicationLoadBalancerId The unique ID of the Application Load Balancer. @@ -1344,12 +1344,12 @@ func (r ApiDatacentersApplicationloadbalancersFlowlogsPutRequest) Execute() (Flo } /* - * DatacentersApplicationloadbalancersFlowlogsPut Modify ALB Flow Logs - * Modify the specified Application Load Balancer Flow Log. + * DatacentersApplicationloadbalancersFlowlogsPut Modify an ALB Flow Log by ID + * Modifies the Application Load Balancer flow log specified by its ID. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param datacenterId The unique ID of the data center. * @param applicationLoadBalancerId The unique ID of the Application Load Balancer. - * @param flowLogId The unique ID of the Flow Log. + * @param flowLogId The unique ID of the flow log. * @return ApiDatacentersApplicationloadbalancersFlowlogsPutRequest */ func (a *ApplicationLoadBalancersApiService) DatacentersApplicationloadbalancersFlowlogsPut(ctx _context.Context, datacenterId string, applicationLoadBalancerId string, flowLogId string) ApiDatacentersApplicationloadbalancersFlowlogsPutRequest { @@ -1530,8 +1530,8 @@ func (r ApiDatacentersApplicationloadbalancersForwardingrulesDeleteRequest) Exec } /* - * DatacentersApplicationloadbalancersForwardingrulesDelete Delete ALB forwarding rules - * Delete the specified Application Load Balancer forwarding rule. + * DatacentersApplicationloadbalancersForwardingrulesDelete Delete an ALB Forwarding Rule by ID + * Deletes the Application Load Balancer forwarding rule specified by its ID. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param datacenterId The unique ID of the data center. * @param applicationLoadBalancerId The unique ID of the Application Load Balancer. @@ -1699,8 +1699,8 @@ func (r ApiDatacentersApplicationloadbalancersForwardingrulesFindByForwardingRul } /* - * DatacentersApplicationloadbalancersForwardingrulesFindByForwardingRuleId Retrieve ALB forwarding rules - * Retrieve the specified Application Load Balancer forwarding rule. + * DatacentersApplicationloadbalancersForwardingrulesFindByForwardingRuleId Get an ALB Forwarding Rule by ID + * Retrieves the Application Load Balancer forwarding rule specified by its ID. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param datacenterId The unique ID of the data center. * @param applicationLoadBalancerId The unique ID of the Application Load Balancer. @@ -1901,8 +1901,8 @@ func (r ApiDatacentersApplicationloadbalancersForwardingrulesGetRequest) Execute } /* - * DatacentersApplicationloadbalancersForwardingrulesGet List ALB forwarding rules - * List the forwarding rules for the specified Application Load Balancer. + * DatacentersApplicationloadbalancersForwardingrulesGet Get ALB Forwarding Rules + * Lists the forwarding rules of the specified Application Load Balancer. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param datacenterId The unique ID of the data center. * @param applicationLoadBalancerId The unique ID of the Application Load Balancer. @@ -2098,8 +2098,8 @@ func (r ApiDatacentersApplicationloadbalancersForwardingrulesPatchRequest) Execu } /* - * DatacentersApplicationloadbalancersForwardingrulesPatch Partially modify ALB forwarding rules - * Update the properties of the specified Application Load Balancer forwarding rule. + * DatacentersApplicationloadbalancersForwardingrulesPatch Partially modify an ALB Forwarding Rule by ID + * Updates the properties of the Application Load Balancer forwarding rule specified by its ID. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param datacenterId The unique ID of the data center. * @param applicationLoadBalancerId The unique ID of the Application Load Balancer. @@ -2288,8 +2288,8 @@ func (r ApiDatacentersApplicationloadbalancersForwardingrulesPostRequest) Execut } /* - * DatacentersApplicationloadbalancersForwardingrulesPost Create ALB forwarding rules - * Create a forwarding rule for the Application Load Balancer. + * DatacentersApplicationloadbalancersForwardingrulesPost Create an ALB Forwarding Rule + * Creates a forwarding rule for the specified Application Load Balancer. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param datacenterId The unique ID of the data center. * @param applicationLoadBalancerId The unique ID of the Application Load Balancer. @@ -2476,8 +2476,8 @@ func (r ApiDatacentersApplicationloadbalancersForwardingrulesPutRequest) Execute } /* - * DatacentersApplicationloadbalancersForwardingrulesPut Modify ALB forwarding rules - * Modify the specified Application Load Balancer forwarding rule. + * DatacentersApplicationloadbalancersForwardingrulesPut Modify an ALB Forwarding Rule by ID + * Modifies the Application Load Balancer forwarding rule specified by its ID. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param datacenterId The unique ID of the data center. * @param applicationLoadBalancerId The unique ID of the Application Load Balancer. @@ -2692,8 +2692,8 @@ func (r ApiDatacentersApplicationloadbalancersGetRequest) Execute() (Application } /* - * DatacentersApplicationloadbalancersGet List Application Load Balancers - * List all Application Load Balancers within the data center. + * DatacentersApplicationloadbalancersGet Get Application Load Balancers + * Lists all Application Load Balancers within a data center. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param datacenterId The unique ID of the data center. * @return ApiDatacentersApplicationloadbalancersGetRequest @@ -2901,8 +2901,8 @@ func (r ApiDatacentersApplicationloadbalancersPatchRequest) Execute() (Applicati } /* - * DatacentersApplicationloadbalancersPatch Partially modify Application Load Balancers - * Update the properties of the specified Application Load Balancer within the data center. + * DatacentersApplicationloadbalancersPatch Partially Modify an Application Load Balancer by ID + * Updates the properties of the specified Application Load Balancer within the data center. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param datacenterId The unique ID of the data center. * @param applicationLoadBalancerId The unique ID of the Application Load Balancer. @@ -3087,8 +3087,8 @@ func (r ApiDatacentersApplicationloadbalancersPostRequest) Execute() (Applicatio } /* - * DatacentersApplicationloadbalancersPost Create Application Load Balancers - * Create an Application Load Balancer within the datacenter. + * DatacentersApplicationloadbalancersPost Create an Application Load Balancer + * Creates an Application Load Balancer within the data center. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param datacenterId The unique ID of the data center. * @return ApiDatacentersApplicationloadbalancersPostRequest @@ -3271,8 +3271,8 @@ func (r ApiDatacentersApplicationloadbalancersPutRequest) Execute() (Application } /* - * DatacentersApplicationloadbalancersPut Modify Application Load Balancers - * Modify the properties of the specified Application Load Balancer within the data center. + * DatacentersApplicationloadbalancersPut Modify an Application Load Balancer by ID + * Modifies the properties of the specified Application Load Balancer within the data center. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param datacenterId The unique ID of the data center. * @param applicationLoadBalancerId The unique ID of the Application Load Balancer. diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/api_contract_resources.go b/vendor/github.com/ionos-cloud/sdk-go/v6/api_contract_resources.go index 4b4d4d9eb..7285d83ba 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/api_contract_resources.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/api_contract_resources.go @@ -74,8 +74,8 @@ func (r ApiContractsGetRequest) Execute() (Contracts, *APIResponse, error) { } /* - * ContractsGet Retrieve contracts - * Retrieve the properties of the user's contract. In this version, the resource became a collection. + * ContractsGet Get Contract Information + * Retrieves the properties of the user's contract. This operation allows you to obtain the resource limits and the general contract information. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @return ApiContractsGetRequest */ diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/api_data_centers.go b/vendor/github.com/ionos-cloud/sdk-go/v6/api_data_centers.go index fabe7f01f..20a650699 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/api_data_centers.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/api_data_centers.go @@ -624,8 +624,8 @@ func (r ApiDatacentersPatchRequest) Execute() (Datacenter, *APIResponse, error) } /* - * DatacentersPatch Partially modify data centers - * Update the properties of the specified data center, rename it, or change the description. + * DatacentersPatch Partially modify a Data Center by ID + * Updates the properties of the specified data center, rename it, or change the description. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param datacenterId The unique ID of the data center. * @return ApiDatacentersPatchRequest @@ -806,8 +806,8 @@ func (r ApiDatacentersPostRequest) Execute() (Datacenter, *APIResponse, error) { } /* - - DatacentersPost Create data centers - - Create new data centers, and data centers that already contain elements, such as servers and storage volumes. + - DatacentersPost Create a Data Center + - Creates new data centers, and data centers that already contain elements, such as servers and storage volumes. Virtual data centers are the foundation of the platform; they act as logical containers for all other objects you create, such as servers and storage volumes. You can provision as many data centers as needed. Data centers have their own private networks and are logically segmented from each other to create isolation. - @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). @@ -988,8 +988,8 @@ func (r ApiDatacentersPutRequest) Execute() (Datacenter, *APIResponse, error) { } /* - * DatacentersPut Modify data centers - * Modify the properties of the specified data center, rename it, or change the description. + * DatacentersPut Modify a Data Center by ID + * Modifies the properties of the specified data center, rename it, or change the description. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param datacenterId The unique ID of the data center. * @return ApiDatacentersPutRequest diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/api_firewall_rules.go b/vendor/github.com/ionos-cloud/sdk-go/v6/api_firewall_rules.go index 8c8ac95c9..bd3c896dd 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/api_firewall_rules.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/api_firewall_rules.go @@ -857,8 +857,8 @@ func (r ApiDatacentersServersNicsFirewallrulesPostRequest) Execute() (FirewallRu } /* - * DatacentersServersNicsFirewallrulesPost Create firewall rules - * Create a firewall rule for the specified NIC. + * DatacentersServersNicsFirewallrulesPost Create a Firewall Rule + * Creates a firewall rule for the specified NIC. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param datacenterId The unique ID of the data center. * @param serverId The unique ID of the server. @@ -1049,8 +1049,8 @@ func (r ApiDatacentersServersNicsFirewallrulesPutRequest) Execute() (FirewallRul } /* - * DatacentersServersNicsFirewallrulesPut Modify firewall rules - * Modify the properties of the specified firewall rule. + * DatacentersServersNicsFirewallrulesPut Modify a Firewall Rule + * Modifies the properties of the specified firewall rule. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param datacenterId The unique ID of the data center. * @param serverId The unique ID of the server. diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/api_flow_logs.go b/vendor/github.com/ionos-cloud/sdk-go/v6/api_flow_logs.go index 299413a08..1bdd3107b 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/api_flow_logs.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/api_flow_logs.go @@ -820,8 +820,8 @@ func (r ApiDatacentersServersNicsFlowlogsPostRequest) Execute() (FlowLog, *APIRe } /* - * DatacentersServersNicsFlowlogsPost Create Flow Logs - * Add a new Flow Log for the specified NIC. + * DatacentersServersNicsFlowlogsPost Create a Flow Log + * Adds a new Flow Log for the specified NIC. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param datacenterId The unique ID of the data center. * @param serverId The unique ID of the server. diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/api_images.go b/vendor/github.com/ionos-cloud/sdk-go/v6/api_images.go index 83561c024..25fd2e838 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/api_images.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/api_images.go @@ -781,8 +781,8 @@ func (r ApiImagesPutRequest) Execute() (Image, *APIResponse, error) { } /* - * ImagesPut Modify images - * Modify the properties of the specified image. + * ImagesPut Modify an Image by ID + * Modifies the properties of the specified image. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param imageId The unique ID of the image. * @return ApiImagesPutRequest diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/api_ip_blocks.go b/vendor/github.com/ionos-cloud/sdk-go/v6/api_ip_blocks.go index 53e8eb6d3..820c73d16 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/api_ip_blocks.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/api_ip_blocks.go @@ -806,8 +806,8 @@ func (r ApiIpblocksPostRequest) Execute() (IpBlock, *APIResponse, error) { } /* - * IpblocksPost Reserve IP blocks - * Reserve a new IP block. + * IpblocksPost Reserve a IP Block + * Reserves a new IP block. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @return ApiIpblocksPostRequest */ @@ -986,8 +986,8 @@ func (r ApiIpblocksPutRequest) Execute() (IpBlock, *APIResponse, error) { } /* - * IpblocksPut Modify IP blocks - * Modify the properties of the specified IP block. + * IpblocksPut Modify a IP Block by ID + * Modifies the properties of the specified IP block. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param ipblockId The unique ID of the IP block. * @return ApiIpblocksPutRequest diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/api_kubernetes.go b/vendor/github.com/ionos-cloud/sdk-go/v6/api_kubernetes.go index e87c40d8e..b5a54c544 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/api_kubernetes.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/api_kubernetes.go @@ -54,8 +54,8 @@ func (r ApiK8sDeleteRequest) Execute() (*APIResponse, error) { } /* - * K8sDelete Delete Kubernetes clusters - * Delete the specified Kubernetes cluster. + * K8sDelete Delete a Kubernetes Cluster by ID + * Deletes the K8s cluster specified by its ID. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param k8sClusterId The unique ID of the Kubernetes cluster. * @return ApiK8sDeleteRequest @@ -215,10 +215,10 @@ func (r ApiK8sFindByClusterIdRequest) Execute() (KubernetesCluster, *APIResponse } /* - * K8sFindByClusterId Retrieve Kubernetes clusters - * Retrieve the specified Kubernetes cluster. + * K8sFindByClusterId Get a Kubernetes Cluster by ID + * Retrieves the K8s cluster specified by its ID. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). - * @param k8sClusterId The unique ID of the Kubernetes cluster. + * @param k8sClusterId The unique ID of the K8s cluster to be retrieved. * @return ApiK8sFindByClusterIdRequest */ func (a *KubernetesApiService) K8sFindByClusterId(ctx _context.Context, k8sClusterId string) ApiK8sFindByClusterIdRequest { @@ -409,8 +409,8 @@ func (r ApiK8sGetRequest) Execute() (KubernetesClusters, *APIResponse, error) { } /* - * K8sGet List Kubernetes clusters - * List all available Kubernetes clusters. + * K8sGet Get Kubernetes Clusters + * Retrieves a list of all K8s clusters provisioned under your account. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @return ApiK8sGetRequest */ @@ -615,8 +615,8 @@ func (r ApiK8sKubeconfigGetRequest) Execute() (string, *APIResponse, error) { } /* - * K8sKubeconfigGet Retrieve Kubernetes configuration files - * Retrieve a configuration file for the specified Kubernetes cluster, in YAML or JSON format as defined in the Accept header; the default Accept header is application/yaml. + * K8sKubeconfigGet Get Kubernetes Configuration File + * Retrieves the configuration file for the specified K8s cluster. You can define the format (YAML or JSON) of the returned file in the Accept header. By default, 'application/yaml' is specified. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param k8sClusterId The unique ID of the Kubernetes cluster. * @return ApiK8sKubeconfigGetRequest @@ -803,8 +803,8 @@ func (r ApiK8sNodepoolsDeleteRequest) Execute() (*APIResponse, error) { } /* - * K8sNodepoolsDelete Delete Kubernetes node pools - * Delete the specified Kubernetes node pool. + * K8sNodepoolsDelete Delete a Kubernetes Node Pool by ID + * Deletes the K8s node pool specified by its ID. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param k8sClusterId The unique ID of the Kubernetes cluster. * @param nodepoolId The unique ID of the Kubernetes node pool. @@ -968,8 +968,8 @@ func (r ApiK8sNodepoolsFindByIdRequest) Execute() (KubernetesNodePool, *APIRespo } /* - * K8sNodepoolsFindById Retrieve Kubernetes node pools - * Retrieve the specified Kubernetes node pool. + * K8sNodepoolsFindById Get a Kubernetes Node Pool by ID + * Retrieves the K8s node pool specified by its ID. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param k8sClusterId The unique ID of the Kubernetes cluster. * @param nodepoolId The unique ID of the Kubernetes node pool. @@ -1166,8 +1166,8 @@ func (r ApiK8sNodepoolsGetRequest) Execute() (KubernetesNodePools, *APIResponse, } /* - * K8sNodepoolsGet List Kubernetes node pools - * List all Kubernetes node pools, included the specified Kubernetes cluster. + * K8sNodepoolsGet Get Kubernetes Node Pools + * Retrieves a list of K8s node pools of a cluster specified by its ID. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param k8sClusterId The unique ID of the Kubernetes cluster. * @return ApiK8sNodepoolsGetRequest @@ -1355,8 +1355,8 @@ func (r ApiK8sNodepoolsNodesDeleteRequest) Execute() (*APIResponse, error) { } /* - * K8sNodepoolsNodesDelete Delete Kubernetes nodes - * Delete the specified Kubernetes node. + * K8sNodepoolsNodesDelete Delete a Kubernetes Node by ID + * Deletes the K8s node specified by its ID. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param k8sClusterId The unique ID of the Kubernetes cluster. * @param nodepoolId The unique ID of the Kubernetes node pool. @@ -1524,8 +1524,8 @@ func (r ApiK8sNodepoolsNodesFindByIdRequest) Execute() (KubernetesNode, *APIResp } /* - * K8sNodepoolsNodesFindById Retrieve Kubernetes nodes - * Retrieve the specified Kubernetes node. + * K8sNodepoolsNodesFindById Get Kubernetes Node by ID + * Retrieves the K8s node specified by its ID. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param k8sClusterId The unique ID of the Kubernetes cluster. * @param nodepoolId The unique ID of the Kubernetes node pool. @@ -1726,8 +1726,8 @@ func (r ApiK8sNodepoolsNodesGetRequest) Execute() (KubernetesNodes, *APIResponse } /* - * K8sNodepoolsNodesGet List Kubernetes nodes - * List all the nodes, included in the specified Kubernetes node pool. + * K8sNodepoolsNodesGet Get Kubernetes Nodes + * Retrieves the list of all K8s nodes of the specified node pool. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param k8sClusterId The unique ID of the Kubernetes cluster. * @param nodepoolId The unique ID of the Kubernetes node pool. @@ -1918,10 +1918,10 @@ func (r ApiK8sNodepoolsNodesReplacePostRequest) Execute() (*APIResponse, error) } /* - - K8sNodepoolsNodesReplacePost Recreate Kubernetes nodes - - Recreate the specified Kubernetes node. + - K8sNodepoolsNodesReplacePost Recreate a Kubernetes Node by ID + - Recreates the K8s node specified by its ID. -A new node is created and configured by Managed Kubernetes, based on the node pool template. Once the status is "Active", all the pods are migrated from the faulty node, which is then deleted once empty. During this operation, the node pool will have an additional billable "Active" node. +If a node becomes unusable, Managed Kubernetes allows you to recreate it with a configuration based on the node pool template. Once the status is 'Active,' all the pods from the failed node will be migrated to the new node. The node pool has an additional billable 'active' node during this process. - @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). - @param k8sClusterId The unique ID of the Kubernetes cluster. - @param nodepoolId The unique ID of the Kubernetes node pool. @@ -2092,8 +2092,8 @@ func (r ApiK8sNodepoolsPostRequest) Execute() (KubernetesNodePool, *APIResponse, } /* - * K8sNodepoolsPost Create Kubernetes node pools - * Create a Kubernetes node pool inside the specified Kubernetes cluster. + * K8sNodepoolsPost Create a Kubernetes Node Pool + * Creates a node pool inside the specified K8s cluster. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param k8sClusterId The unique ID of the Kubernetes cluster. * @return ApiK8sNodepoolsPostRequest @@ -2276,8 +2276,8 @@ func (r ApiK8sNodepoolsPutRequest) Execute() (KubernetesNodePool, *APIResponse, } /* - * K8sNodepoolsPut Modify Kubernetes node pools - * Modify the specified Kubernetes node pool. + * K8sNodepoolsPut Modify a Kubernetes Node Pool by ID + * Modifies the K8s node pool specified by its ID. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param k8sClusterId The unique ID of the Kubernetes cluster. * @param nodepoolId The unique ID of the Kubernetes node pool. @@ -2461,8 +2461,8 @@ func (r ApiK8sPostRequest) Execute() (KubernetesCluster, *APIResponse, error) { } /* - * K8sPost Create Kubernetes clusters - * Create a Kubernetes cluster. + * K8sPost Create a Kubernetes Cluster + * Creates a K8s cluster provisioned under your account. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @return ApiK8sPostRequest */ @@ -2641,8 +2641,8 @@ func (r ApiK8sPutRequest) Execute() (KubernetesCluster, *APIResponse, error) { } /* - * K8sPut Modify Kubernetes clusters - * Modify the specified Kubernetes cluster. + * K8sPut Modify a Kubernetes Cluster by ID + * Modifies the K8s cluster specified by its ID. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param k8sClusterId The unique ID of the Kubernetes cluster. * @return ApiK8sPutRequest @@ -2824,8 +2824,8 @@ func (r ApiK8sVersionsDefaultGetRequest) Execute() (string, *APIResponse, error) } /* - * K8sVersionsDefaultGet Retrieve current default Kubernetes version - * Retrieve current default Kubernetes version for clusters and nodepools. + * K8sVersionsDefaultGet Get Default Kubernetes Version + * Retrieves the current default K8s version to be used by the clusters and node pools. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @return ApiK8sVersionsDefaultGetRequest */ @@ -2994,8 +2994,8 @@ func (r ApiK8sVersionsGetRequest) Execute() ([]string, *APIResponse, error) { } /* - * K8sVersionsGet List Kubernetes versions - * List available Kubernetes versions. + * K8sVersionsGet Get Kubernetes Versions + * Lists available K8s versions. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @return ApiK8sVersionsGetRequest */ diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/api_labels.go b/vendor/github.com/ionos-cloud/sdk-go/v6/api_labels.go index 5c9ebea31..bfeb50565 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/api_labels.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/api_labels.go @@ -610,8 +610,8 @@ func (r ApiDatacentersLabelsPostRequest) Execute() (LabelResource, *APIResponse, } /* - * DatacentersLabelsPost Create data center labels - * Add a new label to the specified data center. + * DatacentersLabelsPost Create a Data Center Label + * Adds a new label to the specified data center. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param datacenterId The unique ID of the data center. * @return ApiDatacentersLabelsPostRequest @@ -794,8 +794,8 @@ func (r ApiDatacentersLabelsPutRequest) Execute() (LabelResource, *APIResponse, } /* - * DatacentersLabelsPut Modify data center labels - * Modify the specified data center label. + * DatacentersLabelsPut Modify a Data Center Label by Key + * Modifies the specified data center label. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param datacenterId The unique ID of the data center. * @param key The label key @@ -1544,8 +1544,8 @@ func (r ApiDatacentersServersLabelsPostRequest) Execute() (LabelResource, *APIRe } /* - * DatacentersServersLabelsPost Create server labels - * Add a new label to the specified server. + * DatacentersServersLabelsPost Create a Server Label + * Adds a new label to the specified server. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param datacenterId The unique ID of the data center. * @param serverId The unique ID of the server. @@ -1732,8 +1732,8 @@ func (r ApiDatacentersServersLabelsPutRequest) Execute() (LabelResource, *APIRes } /* - * DatacentersServersLabelsPut Modify server labels - * Modify the specified server label. + * DatacentersServersLabelsPut Modify a Server Label + * Modifies the specified server label. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param datacenterId The unique ID of the data center. * @param serverId The unique ID of the server. @@ -2485,8 +2485,8 @@ func (r ApiDatacentersVolumesLabelsPostRequest) Execute() (LabelResource, *APIRe } /* - * DatacentersVolumesLabelsPost Create volume labels - * Add a new label to the specified volume. + * DatacentersVolumesLabelsPost Create a Volume Label + * Adds a new label to the specified volume. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param datacenterId The unique ID of the data center. * @param volumeId The unique ID of the volume. @@ -2673,8 +2673,8 @@ func (r ApiDatacentersVolumesLabelsPutRequest) Execute() (LabelResource, *APIRes } /* - * DatacentersVolumesLabelsPut Modify volume labels - * Modify the specified volume label. + * DatacentersVolumesLabelsPut Modify a Volume Label + * Modifies the specified volume label. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param datacenterId The unique ID of the data center. * @param volumeId The unique ID of the volume. @@ -3597,8 +3597,8 @@ func (r ApiIpblocksLabelsPutRequest) Execute() (LabelResource, *APIResponse, err } /* - * IpblocksLabelsPut Modify IP block labels - * Modify the specified IP block label. + * IpblocksLabelsPut Modify a IP Block Label by ID + * Modifies the specified IP block label. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param ipblockId The unique ID of the IP block. * @param key The label key @@ -4716,8 +4716,8 @@ func (r ApiSnapshotsLabelsPostRequest) Execute() (LabelResource, *APIResponse, e } /* - * SnapshotsLabelsPost Create snapshot labels - * Add a new label to the specified snapshot. + * SnapshotsLabelsPost Create a Snapshot Label + * Adds a new label to the specified snapshot. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param snapshotId The unique ID of the snapshot. * @return ApiSnapshotsLabelsPostRequest @@ -4900,8 +4900,8 @@ func (r ApiSnapshotsLabelsPutRequest) Execute() (LabelResource, *APIResponse, er } /* - * SnapshotsLabelsPut Modify snapshot labels - * Modify the specified snapshot label. + * SnapshotsLabelsPut Modify a Snapshot Label by ID + * Modifies the specified snapshot label. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param snapshotId The unique ID of the snapshot. * @param key The label key diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/api_lans.go b/vendor/github.com/ionos-cloud/sdk-go/v6/api_lans.go index ddc74e2af..07560c528 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/api_lans.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/api_lans.go @@ -192,6 +192,167 @@ func (a *LANsApiService) DatacentersLansDeleteExecute(r ApiDatacentersLansDelete return localVarAPIResponse, nil } +type ApiDatacentersLansEnableIpv6Request struct { + ctx _context.Context + ApiService *LANsApiService + datacenterId string + pretty *bool + depth *int32 + xContractNumber *int32 +} + +func (r ApiDatacentersLansEnableIpv6Request) Pretty(pretty bool) ApiDatacentersLansEnableIpv6Request { + r.pretty = &pretty + return r +} +func (r ApiDatacentersLansEnableIpv6Request) Depth(depth int32) ApiDatacentersLansEnableIpv6Request { + r.depth = &depth + return r +} +func (r ApiDatacentersLansEnableIpv6Request) XContractNumber(xContractNumber int32) ApiDatacentersLansEnableIpv6Request { + r.xContractNumber = &xContractNumber + return r +} + +func (r ApiDatacentersLansEnableIpv6Request) Execute() (*APIResponse, error) { + return r.ApiService.DatacentersLansEnableIpv6Execute(r) +} + +/* + * DatacentersLansEnableIpv6 Enable IPv6 in the current Virtual Datacenter + * Enable IPv6 for all NICs in the current Virtual Datacenter. + * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). + * @param datacenterId The unique ID of the data center. + * @return ApiDatacentersLansEnableIpv6Request + */ +func (a *LANsApiService) DatacentersLansEnableIpv6(ctx _context.Context, datacenterId string) ApiDatacentersLansEnableIpv6Request { + return ApiDatacentersLansEnableIpv6Request{ + ApiService: a, + ctx: ctx, + datacenterId: datacenterId, + } +} + +/* + * Execute executes the request + */ +func (a *LANsApiService) DatacentersLansEnableIpv6Execute(r ApiDatacentersLansEnableIpv6Request) (*APIResponse, error) { + var ( + localVarHTTPMethod = _nethttp.MethodPost + localVarPostBody interface{} + localVarFormFileName string + localVarFileName string + localVarFileBytes []byte + ) + + localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "LANsApiService.DatacentersLansEnableIpv6") + if err != nil { + return nil, GenericOpenAPIError{error: err.Error()} + } + + localVarPath := localBasePath + "/datacenters/{datacenterId}/lans/enable-ipv6" + localVarPath = strings.Replace(localVarPath, "{"+"datacenterId"+"}", _neturl.PathEscape(parameterToString(r.datacenterId, "")), -1) + + localVarHeaderParams := make(map[string]string) + localVarQueryParams := _neturl.Values{} + localVarFormParams := _neturl.Values{} + + if r.pretty != nil { + localVarQueryParams.Add("pretty", parameterToString(*r.pretty, "")) + } else { + defaultQueryParam := a.client.cfg.DefaultQueryParams.Get("pretty") + if defaultQueryParam == "" { + localVarQueryParams.Add("pretty", parameterToString(true, "")) + } + } + if r.depth != nil { + localVarQueryParams.Add("depth", parameterToString(*r.depth, "")) + } else { + defaultQueryParam := a.client.cfg.DefaultQueryParams.Get("depth") + if defaultQueryParam == "" { + localVarQueryParams.Add("depth", parameterToString(0, "")) + } + } + + // to determine the Content-Type header + localVarHTTPContentTypes := []string{} + + // set Content-Type header + localVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes) + if localVarHTTPContentType != "" { + localVarHeaderParams["Content-Type"] = localVarHTTPContentType + } + + // to determine the Accept header + localVarHTTPHeaderAccepts := []string{"*/*"} + + // set Accept header + localVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts) + if localVarHTTPHeaderAccept != "" { + localVarHeaderParams["Accept"] = localVarHTTPHeaderAccept + } + if r.xContractNumber != nil { + localVarHeaderParams["X-Contract-Number"] = parameterToString(*r.xContractNumber, "") + } + if r.ctx != nil { + // API Key Authentication + if auth, ok := r.ctx.Value(ContextAPIKeys).(map[string]APIKey); ok { + if apiKey, ok := auth["Token Authentication"]; ok { + var key string + if apiKey.Prefix != "" { + key = apiKey.Prefix + " " + apiKey.Key + } else { + key = apiKey.Key + } + localVarHeaderParams["Authorization"] = key + } + } + } + req, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes) + if err != nil { + return nil, err + } + + localVarHTTPResponse, httpRequestTime, err := a.client.callAPI(req) + + localVarAPIResponse := &APIResponse{ + Response: localVarHTTPResponse, + Method: localVarHTTPMethod, + RequestURL: localVarPath, + RequestTime: httpRequestTime, + Operation: "DatacentersLansEnableIpv6", + } + + if err != nil || localVarHTTPResponse == nil { + return localVarAPIResponse, err + } + + localVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body) + localVarHTTPResponse.Body.Close() + localVarAPIResponse.Payload = localVarBody + if err != nil { + return localVarAPIResponse, err + } + + if localVarHTTPResponse.StatusCode >= 300 { + newErr := GenericOpenAPIError{ + statusCode: localVarHTTPResponse.StatusCode, + body: localVarBody, + error: fmt.Sprintf(FormatStringErr, localVarHTTPResponse.Status, string(localVarBody)), + } + var v Error + err = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get("Content-Type")) + if err != nil { + newErr.error = fmt.Sprintf(FormatStringErr, localVarHTTPResponse.Status, err.Error()) + return localVarAPIResponse, newErr + } + newErr.model = v + return localVarAPIResponse, newErr + } + + return localVarAPIResponse, nil +} + type ApiDatacentersLansFindByIdRequest struct { ctx _context.Context ApiService *LANsApiService @@ -1402,13 +1563,13 @@ type ApiDatacentersLansPostRequest struct { ctx _context.Context ApiService *LANsApiService datacenterId string - lan *LanPost + lan *Lan pretty *bool depth *int32 xContractNumber *int32 } -func (r ApiDatacentersLansPostRequest) Lan(lan LanPost) ApiDatacentersLansPostRequest { +func (r ApiDatacentersLansPostRequest) Lan(lan Lan) ApiDatacentersLansPostRequest { r.lan = &lan return r } @@ -1425,13 +1586,13 @@ func (r ApiDatacentersLansPostRequest) XContractNumber(xContractNumber int32) Ap return r } -func (r ApiDatacentersLansPostRequest) Execute() (LanPost, *APIResponse, error) { +func (r ApiDatacentersLansPostRequest) Execute() (Lan, *APIResponse, error) { return r.ApiService.DatacentersLansPostExecute(r) } /* * DatacentersLansPost Create LANs - * Create a LAN within the data center. + * Creates a LAN within the data center. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param datacenterId The unique ID of the data center. * @return ApiDatacentersLansPostRequest @@ -1446,16 +1607,16 @@ func (a *LANsApiService) DatacentersLansPost(ctx _context.Context, datacenterId /* * Execute executes the request - * @return LanPost + * @return Lan */ -func (a *LANsApiService) DatacentersLansPostExecute(r ApiDatacentersLansPostRequest) (LanPost, *APIResponse, error) { +func (a *LANsApiService) DatacentersLansPostExecute(r ApiDatacentersLansPostRequest) (Lan, *APIResponse, error) { var ( localVarHTTPMethod = _nethttp.MethodPost localVarPostBody interface{} localVarFormFileName string localVarFileName string localVarFileBytes []byte - localVarReturnValue LanPost + localVarReturnValue Lan ) localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "LANsApiService.DatacentersLansPost") diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/api_load_balancers.go b/vendor/github.com/ionos-cloud/sdk-go/v6/api_load_balancers.go index 90a055de0..3dbc558d6 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/api_load_balancers.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/api_load_balancers.go @@ -624,7 +624,7 @@ func (r ApiDatacentersLoadbalancersBalancednicsPostRequest) Execute() (Nic, *API /* * DatacentersLoadbalancersBalancednicsPost Attach balanced NICs - * Attach an existing NIC to the specified Load Balancer. + * Attachs an existing NIC to the specified Load Balancer. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param datacenterId The unique ID of the data center. * @param loadbalancerId The unique ID of the Load Balancer. @@ -1573,8 +1573,8 @@ func (r ApiDatacentersLoadbalancersPostRequest) Execute() (Loadbalancer, *APIRes } /* - * DatacentersLoadbalancersPost Create Load Balancers - * Create a Load Balancer within the data center. + * DatacentersLoadbalancersPost Create a Load Balancer + * Creates a Load Balancer within the data center. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param datacenterId The unique ID of the data center. * @return ApiDatacentersLoadbalancersPostRequest @@ -1757,8 +1757,8 @@ func (r ApiDatacentersLoadbalancersPutRequest) Execute() (Loadbalancer, *APIResp } /* - * DatacentersLoadbalancersPut Modify Load Balancers - * Modify the properties of the specified Load Balancer within the data center. + * DatacentersLoadbalancersPut Modify a Load Balancer by ID + * Modifies the properties of the specified Load Balancer within the data center. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param datacenterId The unique ID of the data center. * @param loadbalancerId The unique ID of the Load Balancer. diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/api_locations.go b/vendor/github.com/ionos-cloud/sdk-go/v6/api_locations.go index ed344bfcf..687d5b515 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/api_locations.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/api_locations.go @@ -54,8 +54,8 @@ func (r ApiLocationsFindByRegionIdRequest) Execute() (Locations, *APIResponse, e } /* - * LocationsFindByRegionId List locations within regions - * List locations by the region ID. + * LocationsFindByRegionId Get Locations within a Region + * Retrieves the available locations in a region specified by its ID. The 'regionId' consists of the two character identifier of the region (country), e.g., 'de'. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param regionId The unique ID of the region. * @return ApiLocationsFindByRegionIdRequest @@ -228,8 +228,8 @@ func (r ApiLocationsFindByRegionIdAndIdRequest) Execute() (Location, *APIRespons } /* - * LocationsFindByRegionIdAndId Retrieve specified locations - * Retrieve the properties of the specified location + * LocationsFindByRegionIdAndId Get Location by ID + * Retrieves the information about the location specified by its ID. The 'locationId' consists of the three-digit identifier of the city according to the IATA code. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param regionId The unique ID of the region. * @param locationId The unique ID of the location. @@ -425,11 +425,19 @@ func (r ApiLocationsGetRequest) Execute() (Locations, *APIResponse, error) { } /* - * LocationsGet List locations - * List the available locations for provisioning your virtual data centers. - * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). - * @return ApiLocationsGetRequest - */ +* LocationsGet Get Locations +* Retrieves the available physical locations where you can deploy cloud resources in a VDC. + +A location is identified by a combination of the following characters: + +* a two-character **regionId**, which represents a country (example: 'de') + +* a three-character **locationId**, which represents a city. The 'locationId' is typically based on the IATA code of the city's airport (example: 'txl'). + +>Note that 'locations' are read-only and cannot be changed. +* @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). +* @return ApiLocationsGetRequest +*/ func (a *LocationsApiService) LocationsGet(ctx _context.Context) ApiLocationsGetRequest { return ApiLocationsGetRequest{ ApiService: a, diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/api_nat_gateways.go b/vendor/github.com/ionos-cloud/sdk-go/v6/api_nat_gateways.go index 6495ae4cd..28677e79d 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/api_nat_gateways.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/api_nat_gateways.go @@ -1145,8 +1145,8 @@ func (r ApiDatacentersNatgatewaysFlowlogsPostRequest) Execute() (FlowLog, *APIRe } /* - * DatacentersNatgatewaysFlowlogsPost Create NAT Gateway Flow Logs - * Add a new Flow Log for the specified NAT Gateway. + * DatacentersNatgatewaysFlowlogsPost Create a NAT Gateway Flow Log + * Adds a new Flow Log to the specified NAT Gateway. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param datacenterId The unique ID of the data center. * @param natGatewayId The unique ID of the NAT Gateway. @@ -1907,8 +1907,8 @@ func (r ApiDatacentersNatgatewaysPostRequest) Execute() (NatGateway, *APIRespons } /* - - DatacentersNatgatewaysPost Create NAT Gateways - - Create a NAT Gateway within the data center. + - DatacentersNatgatewaysPost Create a NAT Gateway + - Creates a NAT Gateway within the data center. This operation is restricted to contract owner, admin, and users with 'createInternetAccess' privileges. - @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). @@ -2844,8 +2844,8 @@ func (r ApiDatacentersNatgatewaysRulesPatchRequest) Execute() (NatGatewayRule, * } /* - * DatacentersNatgatewaysRulesPatch Partially modify NAT Gateway rules - * Update the properties of the specified NAT Gateway rule. + * DatacentersNatgatewaysRulesPatch Partially Modify a NAT Gateway Rule by ID + * Updates the properties of the specified NAT Gateway rule. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param datacenterId The unique ID of the data center. * @param natGatewayId The unique ID of the NAT Gateway. @@ -3034,8 +3034,8 @@ func (r ApiDatacentersNatgatewaysRulesPostRequest) Execute() (NatGatewayRule, *A } /* - * DatacentersNatgatewaysRulesPost Create NAT Gateway rules - * Create a rule for the specified NAT Gateway. + * DatacentersNatgatewaysRulesPost Create a NAT Gateway Rule + * Creates a rule for the specified NAT Gateway. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param datacenterId The unique ID of the data center. * @param natGatewayId The unique ID of the NAT Gateway. @@ -3222,7 +3222,7 @@ func (r ApiDatacentersNatgatewaysRulesPutRequest) Execute() (NatGatewayRule, *AP } /* - * DatacentersNatgatewaysRulesPut Modify NAT Gateway rules + * DatacentersNatgatewaysRulesPut Modify a NAT Gateway Rule by ID * Modify the specified NAT Gateway rule. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param datacenterId The unique ID of the data center. diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/api_network_interfaces.go b/vendor/github.com/ionos-cloud/sdk-go/v6/api_network_interfaces.go index e77269bc3..af7adb740 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/api_network_interfaces.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/api_network_interfaces.go @@ -840,8 +840,8 @@ func (r ApiDatacentersServersNicsPostRequest) Execute() (Nic, *APIResponse, erro } /* - * DatacentersServersNicsPost Create NICs - * Add a NIC to the specified server. The combined total of NICs and attached volumes cannot exceed 24 per server. + * DatacentersServersNicsPost Create a NIC + * Adds a NIC to the specified server. The combined total of NICs and attached volumes cannot exceed 24 per server. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param datacenterId The unique ID of the data center. * @param serverId The unique ID of the server. diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/api_network_load_balancers.go b/vendor/github.com/ionos-cloud/sdk-go/v6/api_network_load_balancers.go index 3636840ca..5fc162e1d 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/api_network_load_balancers.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/api_network_load_balancers.go @@ -1156,8 +1156,8 @@ func (r ApiDatacentersNetworkloadbalancersFlowlogsPostRequest) Execute() (FlowLo } /* - * DatacentersNetworkloadbalancersFlowlogsPost Create NLB Flow Logs - * Add a new Flow Log for the Network Load Balancer. + * DatacentersNetworkloadbalancersFlowlogsPost Create a NLB Flow Log + * Adds a new Flow Log for the Network Load Balancer. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param datacenterId The unique ID of the data center. * @param networkLoadBalancerId The unique ID of the Network Load Balancer. @@ -2288,8 +2288,8 @@ func (r ApiDatacentersNetworkloadbalancersForwardingrulesPostRequest) Execute() } /* - * DatacentersNetworkloadbalancersForwardingrulesPost Create NLB forwarding rules - * Create a forwarding rule for the specified Network Load Balancer. + * DatacentersNetworkloadbalancersForwardingrulesPost Create a NLB Forwarding Rule + * Creates a forwarding rule for the specified Network Load Balancer. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param datacenterId The unique ID of the data center. * @param networkLoadBalancerId The unique ID of the Network Load Balancer. @@ -3087,8 +3087,8 @@ func (r ApiDatacentersNetworkloadbalancersPostRequest) Execute() (NetworkLoadBal } /* - * DatacentersNetworkloadbalancersPost Create Network Load Balancers - * Create a Network Load Balancer within the data center. + * DatacentersNetworkloadbalancersPost Create a Network Load Balancer + * Creates a Network Load Balancer within the data center. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param datacenterId The unique ID of the data center. * @return ApiDatacentersNetworkloadbalancersPostRequest diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/api_private_cross_connects.go b/vendor/github.com/ionos-cloud/sdk-go/v6/api_private_cross_connects.go index 2b325cafa..d3d382f9c 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/api_private_cross_connects.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/api_private_cross_connects.go @@ -780,8 +780,8 @@ func (r ApiPccsPostRequest) Execute() (PrivateCrossConnect, *APIResponse, error) } /* - * PccsPost Create private Cross-Connects - * Create a private Cross-Connect. + * PccsPost Create a Private Cross-Connect + * Creates a private Cross-Connect. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @return ApiPccsPostRequest */ diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/api_servers.go b/vendor/github.com/ionos-cloud/sdk-go/v6/api_servers.go index f06898593..75ae6b688 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/api_servers.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/api_servers.go @@ -56,14 +56,18 @@ func (r ApiDatacentersServersCdromsDeleteRequest) Execute() (*APIResponse, error } /* - * DatacentersServersCdromsDelete Detach CD-ROMs - * Detach the specified CD-ROM from the server. - * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). - * @param datacenterId The unique ID of the data center. - * @param serverId The unique ID of the server. - * @param cdromId The unique ID of the CD-ROM. - * @return ApiDatacentersServersCdromsDeleteRequest - */ + - DatacentersServersCdromsDelete Detach a CD-ROM by ID + - Detachs the specified CD-ROM from the server. + +Detaching a CD-ROM deletes the CD-ROM. The image will not be deleted. + +Note that detaching a CD-ROM leads to a reset of the server. + - @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). + - @param datacenterId The unique ID of the data center. + - @param serverId The unique ID of the server. + - @param cdromId The unique ID of the CD-ROM. + - @return ApiDatacentersServersCdromsDeleteRequest +*/ func (a *ServersApiService) DatacentersServersCdromsDelete(ctx _context.Context, datacenterId string, serverId string, cdromId string) ApiDatacentersServersCdromsDeleteRequest { return ApiDatacentersServersCdromsDeleteRequest{ ApiService: a, @@ -225,8 +229,8 @@ func (r ApiDatacentersServersCdromsFindByIdRequest) Execute() (Image, *APIRespon } /* - * DatacentersServersCdromsFindById Retrieve attached CD-ROMs - * Retrieve the properties of the CD-ROM, attached to the specified server. + * DatacentersServersCdromsFindById Get Attached CD-ROM by ID + * Retrieves the properties of the CD-ROM attached to the specified server. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param datacenterId The unique ID of the data center. * @param serverId The unique ID of the server. @@ -437,8 +441,8 @@ func (r ApiDatacentersServersCdromsGetRequest) Execute() (Cdroms, *APIResponse, } /* - * DatacentersServersCdromsGet List attached CD-ROMs - * List all CD-ROMs, attached to the specified server. + * DatacentersServersCdromsGet Get Attached CD-ROMs + * Lists all CD-ROMs attached to the specified server. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param datacenterId The unique ID of the data center. * @param serverId The unique ID of the server. @@ -649,13 +653,17 @@ func (r ApiDatacentersServersCdromsPostRequest) Execute() (Image, *APIResponse, } /* - * DatacentersServersCdromsPost Attach CD-ROMs - * Attach a CD-ROM to an existing server. Up to two CD-ROMs can be attached to the same server. - * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). - * @param datacenterId The unique ID of the data center. - * @param serverId The unique ID of the server. - * @return ApiDatacentersServersCdromsPostRequest - */ + - DatacentersServersCdromsPost Attach a CD-ROM + - Attachs a CD-ROM to an existing server specified by its ID. + +CD-ROMs cannot be created stand-alone like volumes. They are either attached to a server or do not exist. They always have an ISO-Image associated; empty CD-ROMs can not be provisioned. It is possible to attach up to two CD-ROMs to the same server. + +Note that attaching a CD-ROM leads to a reset of the server. + - @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). + - @param datacenterId The unique ID of the data center. + - @param serverId The unique ID of the server. + - @return ApiDatacentersServersCdromsPostRequest +*/ func (a *ServersApiService) DatacentersServersCdromsPost(ctx _context.Context, datacenterId string, serverId string) ApiDatacentersServersCdromsPostRequest { return ApiDatacentersServersCdromsPostRequest{ ApiService: a, @@ -837,7 +845,7 @@ func (r ApiDatacentersServersDeleteRequest) Execute() (*APIResponse, error) { /* * DatacentersServersDelete Delete servers - * Delete the specified server in your data center. The attached storage volumes will not be removed — a separate API call must be made for these actions. + * Delete the specified server in your data center. The attached storage volumes will also be removed if the query parameter is set to true otherwise a separate API call must be made for these actions. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param datacenterId The unique ID of the data center. * @param serverId The unique ID of the server. @@ -1615,8 +1623,8 @@ func (r ApiDatacentersServersPostRequest) Execute() (Server, *APIResponse, error } /* - * DatacentersServersPost Create servers - * Create a server within the specified data center. You can also use this request to configure the boot volumes and connect to existing LANs at the same time. + * DatacentersServersPost Create a Server + * Creates a server within the specified data center. You can also use this request to configure the boot volumes and connect to existing LANs at the same time. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param datacenterId The unique ID of the data center. * @return ApiDatacentersServersPostRequest @@ -1799,8 +1807,8 @@ func (r ApiDatacentersServersPutRequest) Execute() (Server, *APIResponse, error) } /* - - DatacentersServersPut Modify servers - - Modify the properties of the specified server within the data center. + - DatacentersServersPut Modify a Server by ID + - Modifies the properties of the specified server within the data center. Starting with v5, the 'allowReboot' attribute is retired; while previously required for changing certain server properties, this behavior is now implicit, and the backend will perform this automatically. For example, in earlier versions, when the CPU family is changed, 'allowReboot' had to be set to 'true'; this is no longer required, the reboot will be performed automatically. - @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). @@ -2361,10 +2369,12 @@ func (r ApiDatacentersServersResumePostRequest) Execute() (*APIResponse, error) } /* - - DatacentersServersResumePost Resume Cubes instances - - Resume a suspended Cube instance; no billing event will be generated. + - DatacentersServersResumePost Resume a Cube Server by ID + - Resumes a suspended Cube Server specified by its ID. + +Since the suspended instance was not deleted the allocated resources continue to be billed. You can perform this operation only for Cube Servers. -This operation is only supported for the Cubes. +To check the status of the request, you can use the 'Location' HTTP header in the response (see 'Requests' for more information). - @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). - @param datacenterId The unique ID of the data center. - @param serverId The unique ID of the server. @@ -2528,13 +2538,21 @@ func (r ApiDatacentersServersStartPostRequest) Execute() (*APIResponse, error) { } /* - * DatacentersServersStartPost Start servers - * Start the specified server within the data center; if the server's public IP address has been deallocated, a new IP address will be assigned. - * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). - * @param datacenterId The unique ID of the data center. - * @param serverId The unique ID of the server. - * @return ApiDatacentersServersStartPostRequest - */ + - DatacentersServersStartPost Start an Enterprise Server by ID + - Starts the Enterprise Server specified by its ID. + +>Note that you cannot use this method to start a Cube Server. + +By starting the Enterprise Server, cores and RAM are provisioned, and the billing continues. + +If the server's public IPv4 address has been deallocated, a new IPv4 address will be assigned. IPv6 blocks and addresses will remain unchanged when stopping and starting a server. + +To check the status of the request, you can use the 'Location' HTTP header in the response (see 'Requests' for more information). + - @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). + - @param datacenterId The unique ID of the data center. + - @param serverId The unique ID of the server. + - @return ApiDatacentersServersStartPostRequest +*/ func (a *ServersApiService) DatacentersServersStartPost(ctx _context.Context, datacenterId string, serverId string) ApiDatacentersServersStartPostRequest { return ApiDatacentersServersStartPostRequest{ ApiService: a, @@ -2693,10 +2711,16 @@ func (r ApiDatacentersServersStopPostRequest) Execute() (*APIResponse, error) { } /* - - DatacentersServersStopPost Stop VMs - - Stop the specified server within the data center: the VM will be forcefully shut down, the billing will cease, and any allocated public IPs will be deallocated. + - DatacentersServersStopPost Stop an Enterprise Server by ID + - Stops the Enterprise Server specified by its ID. + +>Note that you cannot use this method to stop a Cube Server. + + By stopping the Enterprise Server, cores and RAM are freed and no longer charged. + +Public IPv4 IPs that are not reserved are returned to the IPv4 pool. IPv6 blocks and addresses will remain unchanged when stopping and starting a server. -This operation is not supported for the Cubes. +To check the status of the request, you can use the 'Location' HTTP header in the response (see 'Requests' for more information). - @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). - @param datacenterId The unique ID of the data center. - @param serverId The unique ID of the server. @@ -2860,10 +2884,12 @@ func (r ApiDatacentersServersSuspendPostRequest) Execute() (*APIResponse, error) } /* - - DatacentersServersSuspendPost Suspend Cubes instances - - Suspend the specified Cubes instance within the data center. The instance will not be deleted, and allocated resources will continue to be billed. + - DatacentersServersSuspendPost Suspend a Cube Server by ID + - Suspends the specified Cubes instance within the data center. -This operation is only supported for the Cubes. +The instance is not deleted and allocated resources continue to be billed. You can perform this operation only for Cube Servers. + +To check the status of the request, you can use the 'Location' HTTP header in the response (see 'Requests' for more information). - @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). - @param datacenterId The unique ID of the data center. - @param serverId The unique ID of the server. @@ -3240,15 +3266,13 @@ func (r ApiDatacentersServersUpgradePostRequest) Execute() (*APIResponse, error) } /* - - DatacentersServersUpgradePost Upgrade servers - - Upgrade the server version, if needed. To determine if an upgrade is available, execute the following call: - -'/datacenters/{datacenterId}/servers?upgradeNeeded=true' - - @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). - - @param datacenterId The unique ID of the data center. - - @param serverId The unique ID of the server. - - @return ApiDatacentersServersUpgradePostRequest -*/ + * DatacentersServersUpgradePost Upgrade a Server by ID + * Upgrades the server version. + * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). + * @param datacenterId The unique ID of the data center. + * @param serverId The unique ID of the server. + * @return ApiDatacentersServersUpgradePostRequest + */ func (a *ServersApiService) DatacentersServersUpgradePost(ctx _context.Context, datacenterId string, serverId string) ApiDatacentersServersUpgradePostRequest { return ApiDatacentersServersUpgradePostRequest{ ApiService: a, @@ -3408,14 +3432,16 @@ func (r ApiDatacentersServersVolumesDeleteRequest) Execute() (*APIResponse, erro } /* - * DatacentersServersVolumesDelete Detach volumes - * Detach the specified volume from the server without deleting it from the data center. A separate request must be made to perform the deletion. - * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). - * @param datacenterId The unique ID of the data center. - * @param serverId The unique ID of the server. - * @param volumeId The unique ID of the volume. - * @return ApiDatacentersServersVolumesDeleteRequest - */ + - DatacentersServersVolumesDelete Detach a Volume by ID + - Detachs the specified volume from the server. + +Note that only the volume's connection to the specified server is disconnected. If you want to delete the volume, you must submit a separate request to perform the deletion. + - @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). + - @param datacenterId The unique ID of the data center. + - @param serverId The unique ID of the server. + - @param volumeId The unique ID of the volume. + - @return ApiDatacentersServersVolumesDeleteRequest +*/ func (a *ServersApiService) DatacentersServersVolumesDelete(ctx _context.Context, datacenterId string, serverId string, volumeId string) ApiDatacentersServersVolumesDeleteRequest { return ApiDatacentersServersVolumesDeleteRequest{ ApiService: a, @@ -3577,8 +3603,8 @@ func (r ApiDatacentersServersVolumesFindByIdRequest) Execute() (Volume, *APIResp } /* - * DatacentersServersVolumesFindById Retrieve attached volumes - * Retrieve the properties of the volume, attached to the specified server. + * DatacentersServersVolumesFindById Get Attached Volume by ID + * Retrieves the properties of the volume attached to the specified server. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param datacenterId The unique ID of the data center. * @param serverId The unique ID of the server. @@ -3789,8 +3815,8 @@ func (r ApiDatacentersServersVolumesGetRequest) Execute() (AttachedVolumes, *API } /* - * DatacentersServersVolumesGet List attached volumes - * List all volumes, attached to the specified server. + * DatacentersServersVolumesGet Get Attached Volumes + * Lists all volumes attached to the specified server. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param datacenterId The unique ID of the data center. * @param serverId The unique ID of the server. @@ -4001,12 +4027,14 @@ func (r ApiDatacentersServersVolumesPostRequest) Execute() (Volume, *APIResponse } /* - - DatacentersServersVolumesPost Attach volumes - - Attach an existing storage volume to the specified server. + - DatacentersServersVolumesPost Attach a Volume to a Server + - Attachs an existing storage volume to the specified server. + +You can attach an existing volume in the VDC to a server. To move a volume from one server to another, you must first detach the volume from the first server and attach it to the second server. -A volume scan also be created and attached in one step by providing the new volume description as payload. +It is also possible to create and attach a volume in one step by simply providing a new volume description as a payload. The only difference is the URL; see 'Creating a Volume' for details about volumes. -The combined total of attached volumes and NICs cannot exceed 24 per server. +Note that the combined total of attached volumes and NICs cannot exceed 24 per server. - @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). - @param datacenterId The unique ID of the data center. - @param serverId The unique ID of the server. diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/api_snapshots.go b/vendor/github.com/ionos-cloud/sdk-go/v6/api_snapshots.go index bc68062bf..2a6302b8a 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/api_snapshots.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/api_snapshots.go @@ -781,8 +781,8 @@ func (r ApiSnapshotsPutRequest) Execute() (Snapshot, *APIResponse, error) { } /* - * SnapshotsPut Modify snapshots - * Modify the properties of the specified snapshot. + * SnapshotsPut Modify a Snapshot by ID + * Modifies the properties of the specified snapshot. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param snapshotId The unique ID of the snapshot. * @return ApiSnapshotsPutRequest diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/api_target_groups.go b/vendor/github.com/ionos-cloud/sdk-go/v6/api_target_groups.go index 8a3f9fca3..0e111462f 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/api_target_groups.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/api_target_groups.go @@ -54,8 +54,8 @@ func (r ApiTargetGroupsDeleteRequest) Execute() (*APIResponse, error) { } /* - * TargetGroupsDelete Remove target groups - * Remove the specified target group. + * TargetGroupsDelete Delete a Target Group by ID + * Deletes the target group specified by its ID. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param targetGroupId The unique ID of the target group. * @return ApiTargetGroupsDeleteRequest @@ -215,8 +215,8 @@ func (r ApiTargetgroupsFindByTargetGroupIdRequest) Execute() (TargetGroup, *APIR } /* - * TargetgroupsFindByTargetGroupId Retrieve target groups - * Retrieve the properties of the specified target group. + * TargetgroupsFindByTargetGroupId Get a Target Group by ID + * Retrieves the properties of the target group specified by its ID. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param targetGroupId The unique ID of the target group. * @return ApiTargetgroupsFindByTargetGroupIdRequest @@ -419,11 +419,13 @@ func (r ApiTargetgroupsGetRequest) Execute() (TargetGroups, *APIResponse, error) } /* - * TargetgroupsGet List target groups - * List all target groups. - * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). - * @return ApiTargetgroupsGetRequest - */ + - TargetgroupsGet Get Target Groups + - Lists target groups. + +A target group is a set of one or more registered targets. You must specify an IP address, a port number, and a weight for each target. Any object with an IP address in your VDC can be a target, for example, a VM, another load balancer, etc. You can register a target with multiple target groups. + - @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). + - @return ApiTargetgroupsGetRequest +*/ func (a *TargetGroupsApiService) TargetgroupsGet(ctx _context.Context) ApiTargetgroupsGetRequest { return ApiTargetgroupsGetRequest{ ApiService: a, @@ -624,8 +626,8 @@ func (r ApiTargetgroupsPatchRequest) Execute() (TargetGroup, *APIResponse, error } /* - * TargetgroupsPatch Partially modify target groups - * Update the properties of the specified target group. + * TargetgroupsPatch Partially Modify a Target Group by ID + * Updates the properties of the target group specified by its ID. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param targetGroupId The unique ID of the target group. * @return ApiTargetgroupsPatchRequest @@ -806,8 +808,8 @@ func (r ApiTargetgroupsPostRequest) Execute() (TargetGroup, *APIResponse, error) } /* - * TargetgroupsPost Create target groups - * Create a target group. + * TargetgroupsPost Create a Target Group + * Creates a target group. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @return ApiTargetgroupsPostRequest */ @@ -986,8 +988,8 @@ func (r ApiTargetgroupsPutRequest) Execute() (TargetGroup, *APIResponse, error) } /* - * TargetgroupsPut Modify target groups - * Modify the properties of the specified target group. + * TargetgroupsPut Modify a Target Group by ID + * Modifies the properties of the target group specified by its ID. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param targetGroupId The unique ID of the target group. * @return ApiTargetgroupsPutRequest diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/api_templates.go b/vendor/github.com/ionos-cloud/sdk-go/v6/api_templates.go index 0fc0f6832..31823f0e5 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/api_templates.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/api_templates.go @@ -44,14 +44,12 @@ func (r ApiTemplatesFindByIdRequest) Execute() (Template, *APIResponse, error) { } /* - - TemplatesFindById Retrieve Cubes Templates - - Retrieve the properties of the specified Cubes Template. - -This operation is only supported for the Cubes. - - @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). - - @param templateId The unique Template ID. - - @return ApiTemplatesFindByIdRequest -*/ + * TemplatesFindById Get Cubes Template by ID + * Retrieves the properties of the Cubes template specified by its ID. + * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). + * @param templateId The unique template ID. + * @return ApiTemplatesFindByIdRequest + */ func (a *TemplatesApiService) TemplatesFindById(ctx _context.Context, templateId string) ApiTemplatesFindByIdRequest { return ApiTemplatesFindByIdRequest{ ApiService: a, @@ -219,12 +217,14 @@ func (r ApiTemplatesGetRequest) Execute() (Templates, *APIResponse, error) { } /* - - TemplatesGet List Cubes Templates - - List all of the available Cubes Templates. + - TemplatesGet Get Cubes Templates + - Retrieves all available templates. + +Templates provide a pre-defined configuration for Cube servers. -This operation is only supported for the Cubes. - - @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). - - @return ApiTemplatesGetRequest + >Templates are read-only and cannot be created, modified, or deleted by users. + * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). + * @return ApiTemplatesGetRequest */ func (a *TemplatesApiService) TemplatesGet(ctx _context.Context) ApiTemplatesGetRequest { return ApiTemplatesGetRequest{ diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/api_user_management.go b/vendor/github.com/ionos-cloud/sdk-go/v6/api_user_management.go index e67741ca1..e159bd5ad 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/api_user_management.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/api_user_management.go @@ -2468,8 +2468,8 @@ func (r ApiUmGroupsUsersPostRequest) Execute() (User, *APIResponse, error) { } /* - * UmGroupsUsersPost Add group members - * Add an existing user to the specified group. + * UmGroupsUsersPost Add a Group Member + * Adds an existing user to the specified group. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param groupId The unique ID of the group. * @return ApiUmGroupsUsersPostRequest diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/api_user_s3_keys.go b/vendor/github.com/ionos-cloud/sdk-go/v6/api_user_s3_keys.go index 1e7ca3bd8..e46bbd737 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/api_user_s3_keys.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/api_user_s3_keys.go @@ -784,8 +784,8 @@ func (r ApiUmUsersS3keysPutRequest) Execute() (S3Key, *APIResponse, error) { } /* - * UmUsersS3keysPut Modify S3 keys by key ID - * Enable or disable the specified user S3 key. + * UmUsersS3keysPut Modify a S3 Key by Key ID + * Enables or disables the specified user S3 key. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param userId The unique ID of the user. * @param keyId The unique ID of the S3 key. diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/api_volumes.go b/vendor/github.com/ionos-cloud/sdk-go/v6/api_volumes.go index 91b766237..b869d7fff 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/api_volumes.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/api_volumes.go @@ -1032,8 +1032,8 @@ func (r ApiDatacentersVolumesPostRequest) Execute() (Volume, *APIResponse, error } /* - * DatacentersVolumesPost Create volumes - * Create a storage volume within the specified data center. The volume will not be attached! Attaching volumes is described in the Servers section. + * DatacentersVolumesPost Create a Volume + * Creates a storage volume within the specified data center. The volume will not be attached! Attaching volumes is described in the Servers section. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param datacenterId The unique ID of the data center. * @return ApiDatacentersVolumesPostRequest @@ -1216,8 +1216,8 @@ func (r ApiDatacentersVolumesPutRequest) Execute() (Volume, *APIResponse, error) } /* - * DatacentersVolumesPut Modify volumes - * Modify the properties of the specified volume within the data center. + * DatacentersVolumesPut Modify a Volume by ID + * Modifies the properties of the specified volume within the data center. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param datacenterId The unique ID of the data center. * @param volumeId The unique ID of the volume. diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/client.go b/vendor/github.com/ionos-cloud/sdk-go/v6/client.go index 6f22ebf11..0bbdf2a6d 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/client.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/client.go @@ -53,7 +53,7 @@ const ( RequestStatusFailed = "FAILED" RequestStatusDone = "DONE" - Version = "6.1.4" + Version = "6.1.6" ) // Constants for APIs diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/configuration.go b/vendor/github.com/ionos-cloud/sdk-go/v6/configuration.go index 85916a372..d6ef150c2 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/configuration.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/configuration.go @@ -130,7 +130,7 @@ func NewConfiguration(username, password, token, hostUrl string) *Configuration cfg := &Configuration{ DefaultHeader: make(map[string]string), DefaultQueryParams: url.Values{}, - UserAgent: "ionos-cloud-sdk-go/v6.1.4", + UserAgent: "ionos-cloud-sdk-go/v6.1.6", Debug: false, Username: username, Password: password, diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer.go index 99b2a8f76..a63fe3431 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer.go @@ -20,7 +20,7 @@ type ApplicationLoadBalancer struct { Id *string `json:"id,omitempty"` // The type of object that has been created. Type *Type `json:"type,omitempty"` - // URL to the object representation (absolute path). + // The URL to the object representation (absolute path). Href *string `json:"href,omitempty"` Metadata *DatacenterElementMetadata `json:"metadata,omitempty"` Properties *ApplicationLoadBalancerProperties `json:"properties"` diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_forwarding_rule.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_forwarding_rule.go index 24fe200e1..34c30dfc6 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_forwarding_rule.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_forwarding_rule.go @@ -20,7 +20,7 @@ type ApplicationLoadBalancerForwardingRule struct { Id *string `json:"id,omitempty"` // The type of object that has been created. Type *Type `json:"type,omitempty"` - // URL to the object representation (absolute path). + // The URL to the object representation (absolute path). Href *string `json:"href,omitempty"` Metadata *DatacenterElementMetadata `json:"metadata,omitempty"` Properties *ApplicationLoadBalancerForwardingRuleProperties `json:"properties"` diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_forwarding_rule_properties.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_forwarding_rule_properties.go index 1a0ebe9fe..f73da694f 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_forwarding_rule_properties.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_forwarding_rule_properties.go @@ -18,17 +18,17 @@ import ( type ApplicationLoadBalancerForwardingRuleProperties struct { // The name of the Application Load Balancer forwarding rule. Name *string `json:"name"` - // Balancing protocol + // The balancing protocol. Protocol *string `json:"protocol"` - // Listening (inbound) IP + // The listening (inbound) IP. ListenerIp *string `json:"listenerIp"` - // Listening (inbound) port number; valid range is 1 to 65535. + // The listening (inbound) port number; the valid range is 1 to 65535. ListenerPort *int32 `json:"listenerPort"` // The maximum time in milliseconds to wait for the client to acknowledge or send data; default is 50,000 (50 seconds). ClientTimeout *int32 `json:"clientTimeout,omitempty"` // Array of items in the collection. ServerCertificates *[]string `json:"serverCertificates,omitempty"` - // An array of items in the collection. The original order of rules is perserved during processing, except for Forward-type rules are processed after the rules with other action defined. The relative order of Forward-type rules is also preserved during the processing. + // An array of items in the collection. The original order of rules is preserved during processing, except that rules of the 'FORWARD' type are processed after the rules with other defined actions. The relative order of the 'FORWARD' type rules is also preserved during the processing. HttpRules *[]ApplicationLoadBalancerHttpRule `json:"httpRules,omitempty"` } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_forwarding_rule_put.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_forwarding_rule_put.go index fead7a0e4..0328b229c 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_forwarding_rule_put.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_forwarding_rule_put.go @@ -20,7 +20,7 @@ type ApplicationLoadBalancerForwardingRulePut struct { Id *string `json:"id,omitempty"` // The type of object that has been created. Type *Type `json:"type,omitempty"` - // URL to the object representation (absolute path). + // The URL to the object representation (absolute path). Href *string `json:"href,omitempty"` Properties *ApplicationLoadBalancerForwardingRuleProperties `json:"properties"` } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_forwarding_rules.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_forwarding_rules.go index c5fe19840..14b1f86e5 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_forwarding_rules.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_forwarding_rules.go @@ -20,7 +20,7 @@ type ApplicationLoadBalancerForwardingRules struct { Id *string `json:"id,omitempty"` // The type of object that has been created. Type *Type `json:"type,omitempty"` - // URL to the object representation (absolute path). + // The URL to the object representation (absolute path). Href *string `json:"href,omitempty"` // Array of items in the collection. Items *[]ApplicationLoadBalancerForwardingRule `json:"items,omitempty"` diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_http_rule.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_http_rule.go index 44af9bda0..9c117202e 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_http_rule.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_http_rule.go @@ -18,21 +18,21 @@ import ( type ApplicationLoadBalancerHttpRule struct { // The unique name of the Application Load Balancer HTTP rule. Name *string `json:"name"` - // Type of the HTTP rule. + // The HTTP rule type. Type *string `json:"type"` - // The ID of the target group; mandatory and only valid for FORWARD actions. + // The ID of the target group; this parameter is mandatory and is valid only for 'FORWARD' actions. TargetGroup *string `json:"targetGroup,omitempty"` - // Default is false; valid only for REDIRECT actions. + // Indicates whether the query part of the URI should be dropped and is valid only for 'REDIRECT' actions. Default value is 'FALSE', the redirect URI does not contain any query parameters. DropQuery *bool `json:"dropQuery,omitempty"` - // The location for redirecting; mandatory and valid only for REDIRECT actions. + // The location for the redirection; this parameter is mandatory and valid only for 'REDIRECT' actions. Location *string `json:"location,omitempty"` - // Valid only for REDIRECT and STATIC actions. For REDIRECT actions, default is 301 and possible values are 301, 302, 303, 307, and 308. For STATIC actions, default is 503 and valid range is 200 to 599. + // The status code is for 'REDIRECT' and 'STATIC' actions only. If the HTTP rule is 'REDIRECT' the valid values are: 301, 302, 303, 307, 308; default value is '301'. If the HTTP rule is 'STATIC' the valid values are from the range 200-599; default value is '503'. StatusCode *int32 `json:"statusCode,omitempty"` - // The response message of the request; mandatory for STATIC actions. + // The response message of the request; this parameter is mandatory for 'STATIC' actions. ResponseMessage *string `json:"responseMessage,omitempty"` - // Valid only for STATIC actions. + // Specifies the content type and is valid only for 'STATIC' actions. ContentType *string `json:"contentType,omitempty"` - // An array of items in the collection.The action is only performed if each and every condition is met; if no conditions are set, the rule will always be performed. + // An array of items in the collection. The action will be executed only if each condition is met; the rule will always be applied if no conditions are set. Conditions *[]ApplicationLoadBalancerHttpRuleCondition `json:"conditions,omitempty"` } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_http_rule_condition.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_http_rule_condition.go index a53b76793..a61e46532 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_http_rule_condition.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_http_rule_condition.go @@ -16,15 +16,15 @@ import ( // ApplicationLoadBalancerHttpRuleCondition struct for ApplicationLoadBalancerHttpRuleCondition type ApplicationLoadBalancerHttpRuleCondition struct { - // Type of the HTTP rule condition. + // The HTTP rule condition type. Type *string `json:"type"` - // Matching rule for the HTTP rule condition attribute; mandatory for HEADER, PATH, QUERY, METHOD, HOST, and COOKIE types; must be null when type is SOURCE_IP. + // The matching rule for the HTTP rule condition attribute; this parameter is mandatory for 'HEADER', 'PATH', 'QUERY', 'METHOD', 'HOST', and 'COOKIE' types. It must be 'null' if the type is 'SOURCE_IP'. Condition *string `json:"condition"` - // Specifies whether the condition is negated or not; the default is False. + // Specifies whether the condition should be negated; the default value is 'FALSE'. Negate *bool `json:"negate,omitempty"` - // Must be null when type is PATH, METHOD, HOST, or SOURCE_IP. Key can only be set when type is COOKIES, HEADER, or QUERY. + // The key can only be set when the HTTP rule condition type is 'COOKIES', 'HEADER', or 'QUERY'. For the type 'PATH', 'METHOD', 'HOST', or 'SOURCE_IP' the value must be 'null'. Key *string `json:"key,omitempty"` - // Mandatory for conditions CONTAINS, EQUALS, MATCHES, STARTS_WITH, ENDS_WITH; must be null when condition is EXISTS; should be a valid CIDR if provided and if type is SOURCE_IP. + // This parameter is mandatory for the conditions 'CONTAINS', 'EQUALS', 'MATCHES', 'STARTS_WITH', 'ENDS_WITH', or if the type is 'SOURCE_IP'. Specify a valid CIDR. If the condition is 'EXISTS', the value must be 'null'. Value *string `json:"value,omitempty"` } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_properties.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_properties.go index 81a42ed99..5055570bd 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_properties.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_properties.go @@ -16,13 +16,13 @@ import ( // ApplicationLoadBalancerProperties struct for ApplicationLoadBalancerProperties type ApplicationLoadBalancerProperties struct { - // The name of the Application Load Balancer. + // The Application Load Balancer name. Name *string `json:"name"` - // ID of the listening (inbound) LAN. + // The ID of the listening (inbound) LAN. ListenerLan *int32 `json:"listenerLan"` - // Collection of the Application Load Balancer IP addresses. (Inbound and outbound) IPs of the listenerLan are customer-reserved public IPs for the public Load Balancers, and private IPs for the private Load Balancers. + // Collection of the Application Load Balancer IP addresses. (Inbound and outbound) IPs of the 'listenerLan' are customer-reserved public IPs for the public load balancers, and private IPs for the private load balancers. Ips *[]string `json:"ips,omitempty"` - // ID of the balanced private target LAN (outbound). + // The ID of the balanced private target LAN (outbound). TargetLan *int32 `json:"targetLan"` // Collection of private IP addresses with the subnet mask of the Application Load Balancer. IPs must contain valid a subnet mask. If no IP is provided, the system will generate an IP with /24 subnet. LbPrivateIps *[]string `json:"lbPrivateIps,omitempty"` diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_put.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_put.go index 5889ad962..ce188b2d8 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_put.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancer_put.go @@ -20,7 +20,7 @@ type ApplicationLoadBalancerPut struct { Id *string `json:"id,omitempty"` // The type of object that has been created. Type *Type `json:"type,omitempty"` - // URL to the object representation (absolute path). + // The URL to the object representation (absolute path). Href *string `json:"href,omitempty"` Properties *ApplicationLoadBalancerProperties `json:"properties"` } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancers.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancers.go index 41219e162..a3eff3d6a 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancers.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_application_load_balancers.go @@ -20,7 +20,7 @@ type ApplicationLoadBalancers struct { Id *string `json:"id,omitempty"` // The type of object that has been created. Type *Type `json:"type,omitempty"` - // URL to the object representation (absolute path). + // The URL to the object representation (absolute path). Href *string `json:"href,omitempty"` // Array of items in the collection. Items *[]ApplicationLoadBalancer `json:"items,omitempty"` diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_attached_volumes.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_attached_volumes.go index ed03aea69..3791f488e 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_attached_volumes.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_attached_volumes.go @@ -20,7 +20,7 @@ type AttachedVolumes struct { Id *string `json:"id,omitempty"` // The type of object that has been created. Type *Type `json:"type,omitempty"` - // URL to the object representation (absolute path). + // The URL to the object representation (absolute path). Href *string `json:"href,omitempty"` // Array of items in the collection. Items *[]Volume `json:"items,omitempty"` diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_contract_properties.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_contract_properties.go index a888e074f..522f61829 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_contract_properties.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_contract_properties.go @@ -18,9 +18,9 @@ import ( type ContractProperties struct { // The contract number. ContractNumber *int64 `json:"contractNumber,omitempty"` - // The owner of the contract. + // The contract owner's user name. Owner *string `json:"owner,omitempty"` - // The status of the contract. + // The contract status. Status *string `json:"status,omitempty"` // The registration domain of the contract. RegDomain *string `json:"regDomain,omitempty"` diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_contracts.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_contracts.go index 5695f7227..c6fc46e31 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_contracts.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_contracts.go @@ -20,7 +20,7 @@ type Contracts struct { Id *string `json:"id,omitempty"` // The type of object that has been created. Type *Type `json:"type,omitempty"` - // URL to the object representation (absolute path). + // The URL to the object representation (absolute path). Href *string `json:"href,omitempty"` // Array of items in the collection. Items *[]Contract `json:"items,omitempty"` diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_datacenter_element_metadata.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_datacenter_element_metadata.go index 6f504314d..3034ac67d 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_datacenter_element_metadata.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_datacenter_element_metadata.go @@ -31,7 +31,7 @@ type DatacenterElementMetadata struct { LastModifiedBy *string `json:"lastModifiedBy,omitempty"` // The unique ID of the user who last modified the resource. LastModifiedByUserId *string `json:"lastModifiedByUserId,omitempty"` - // State of the resource. *AVAILABLE* There are no pending modification requests for this item; *BUSY* There is at least one modification request pending and all following requests will be queued; *INACTIVE* Resource has been de-provisioned; *DEPLOYING* Resource state DEPLOYING - relevant for Kubernetes cluster/nodepool; *ACTIVE* Resource state ACTIVE - relevant for Kubernetes cluster/nodepool; *FAILED* Resource state FAILED - relevant for Kubernetes cluster/nodepool; *SUSPENDED* Resource state SUSPENDED - relevant for Kubernetes cluster/nodepool; *FAILED_SUSPENDED* Resource state FAILED_SUSPENDED - relevant for Kubernetes cluster; *UPDATING* Resource state UPDATING - relevant for Kubernetes cluster/nodepool; *FAILED_UPDATING* Resource state FAILED_UPDATING - relevant for Kubernetes cluster/nodepool; *DESTROYING* Resource state DESTROYING - relevant for Kubernetes cluster; *FAILED_DESTROYING* Resource state FAILED_DESTROYING - relevant for Kubernetes cluster/nodepool; *TERMINATED* Resource state TERMINATED - relevant for Kubernetes cluster/nodepool. + // State of the resource. *AVAILABLE* There are no pending modification requests for this item; *BUSY* There is at least one modification request pending and all following requests will be queued; *INACTIVE* Resource has been de-provisioned; *DEPLOYING* Resource state DEPLOYING - relevant for Kubernetes cluster/nodepool; *ACTIVE* Resource state ACTIVE - relevant for Kubernetes cluster/nodepool; *FAILED* Resource state FAILED - relevant for Kubernetes cluster/nodepool; *SUSPENDED* Resource state SUSPENDED - relevant for Kubernetes cluster/nodepool; *FAILED_SUSPENDED* Resource state FAILED_SUSPENDED - relevant for Kubernetes cluster; *UPDATING* Resource state UPDATING - relevant for Kubernetes cluster/nodepool; *FAILED_UPDATING* Resource state FAILED_UPDATING - relevant for Kubernetes cluster/nodepool; *DESTROYING* Resource state DESTROYING - relevant for Kubernetes cluster; *FAILED_DESTROYING* Resource state FAILED_DESTROYING - relevant for Kubernetes cluster/nodepool; *TERMINATED* Resource state TERMINATED - relevant for Kubernetes cluster/nodepool; *HIBERNATING* Resource state HIBERNATING - relevant for Kubernetes cluster/nodepool; *FAILED_HIBERNATING* Resource state FAILED_HIBERNATING - relevant for Kubernetes cluster/nodepool; *MAINTENANCE* Resource state MAINTENANCE - relevant for Kubernetes cluster/nodepool; *FAILED_HIBERNATING* Resource state FAILED_HIBERNATING - relevant for Kubernetes cluster/nodepool. State *string `json:"state,omitempty"` } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_firewallrule_properties.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_firewallrule_properties.go index 188c3e7f1..640cd23fb 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_firewallrule_properties.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_firewallrule_properties.go @@ -22,13 +22,15 @@ type FirewallruleProperties struct { Protocol *string `json:"protocol"` // Only traffic originating from the respective MAC address is allowed. Valid format: aa:bb:cc:dd:ee:ff. Value null allows traffic from any MAC address. SourceMac *string `json:"sourceMac,omitempty"` - // Only traffic originating from the respective IPv4 address is allowed. Value null allows traffic from any IP address. + // The IP version for this rule. If sourceIp or targetIp are specified, you can omit this value - the IP version will then be deduced from the IP address(es) used; if you specify it anyway, it must match the specified IP address(es). If neither sourceIp nor targetIp are specified, this rule allows traffic only for the specified IP version. If neither sourceIp, targetIp nor ipVersion are specified, this rule will only allow IPv4 traffic. + IpVersion *string `json:"ipVersion,omitempty"` + // Only traffic originating from the respective IP address (or CIDR block) is allowed. Value null allows traffic from any IP address (according to the selected ipVersion). SourceIp *string `json:"sourceIp,omitempty"` - // If the target NIC has multiple IP addresses, only the traffic directed to the respective IP address of the NIC is allowed. Value null Value null allows traffic to any target IP address. + // If the target NIC has multiple IP addresses, only the traffic directed to the respective IP address (or CIDR block) of the NIC is allowed. Value null allows traffic to any target IP address (according to the selected ipVersion). TargetIp *string `json:"targetIp,omitempty"` - // Defines the allowed code (from 0 to 254) if protocol ICMP is chosen. Value null allows all codes. + // Defines the allowed code (from 0 to 254) if protocol ICMP or ICMPv6 is chosen. Value null allows all codes. IcmpCode *int32 `json:"icmpCode,omitempty"` - // Defines the allowed type (from 0 to 254) if the protocol ICMP is chosen. Value null allows all types. + // Defines the allowed type (from 0 to 254) if the protocol ICMP or ICMPv6 is chosen. Value null allows all types. IcmpType *int32 `json:"icmpType,omitempty"` // Defines the start range of the allowed port (from 1 to 65534) if protocol TCP or UDP is chosen. Leave portRangeStart and portRangeEnd value null to allow all ports. PortRangeStart *int32 `json:"portRangeStart,omitempty"` @@ -172,6 +174,44 @@ func (o *FirewallruleProperties) HasSourceMac() bool { return false } +// GetIpVersion returns the IpVersion field value +// If the value is explicit nil, the zero value for string will be returned +func (o *FirewallruleProperties) GetIpVersion() *string { + if o == nil { + return nil + } + + return o.IpVersion + +} + +// GetIpVersionOk returns a tuple with the IpVersion field value +// and a boolean to check if the value has been set. +// NOTE: If the value is an explicit nil, `nil, true` will be returned +func (o *FirewallruleProperties) GetIpVersionOk() (*string, bool) { + if o == nil { + return nil, false + } + + return o.IpVersion, true +} + +// SetIpVersion sets field value +func (o *FirewallruleProperties) SetIpVersion(v string) { + + o.IpVersion = &v + +} + +// HasIpVersion returns a boolean if a field has been set. +func (o *FirewallruleProperties) HasIpVersion() bool { + if o != nil && o.IpVersion != nil { + return true + } + + return false +} + // GetSourceIp returns the SourceIp field value // If the value is explicit nil, the zero value for string will be returned func (o *FirewallruleProperties) GetSourceIp() *string { @@ -447,6 +487,9 @@ func (o FirewallruleProperties) MarshalJSON() ([]byte, error) { toSerialize["protocol"] = o.Protocol } toSerialize["sourceMac"] = o.SourceMac + if o.IpVersion != nil { + toSerialize["ipVersion"] = o.IpVersion + } toSerialize["sourceIp"] = o.SourceIp toSerialize["targetIp"] = o.TargetIp toSerialize["icmpCode"] = o.IcmpCode diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_flow_log.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_flow_log.go index ca6a5435c..e8d74517f 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_flow_log.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_flow_log.go @@ -20,7 +20,7 @@ type FlowLog struct { Id *string `json:"id,omitempty"` // The type of object that has been created. Type *Type `json:"type,omitempty"` - // URL to the object representation (absolute path). + // The URL to the object representation (absolute path). Href *string `json:"href,omitempty"` Metadata *DatacenterElementMetadata `json:"metadata,omitempty"` Properties *FlowLogProperties `json:"properties"` diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_flow_log_properties.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_flow_log_properties.go index 7d3df9fae..dca17c214 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_flow_log_properties.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_flow_log_properties.go @@ -16,13 +16,13 @@ import ( // FlowLogProperties struct for FlowLogProperties type FlowLogProperties struct { - // The name of the resource. + // The resource name. Name *string `json:"name"` // Specifies the traffic action pattern. Action *string `json:"action"` // Specifies the traffic direction pattern. Direction *string `json:"direction"` - // S3 bucket name of an existing IONOS Cloud S3 bucket. + // The S3 bucket name of an existing IONOS Cloud S3 bucket. Bucket *string `json:"bucket"` } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_flow_logs.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_flow_logs.go index 6ecbeac2b..48f070e31 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_flow_logs.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_flow_logs.go @@ -20,7 +20,7 @@ type FlowLogs struct { Id *string `json:"id,omitempty"` // The type of object that has been created. Type *Type `json:"type,omitempty"` - // URL to the object representation (absolute path). + // The URL to the object representation (absolute path). Href *string `json:"href,omitempty"` // Array of items in the collection. Items *[]FlowLog `json:"items,omitempty"` diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_group_properties.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_group_properties.go index f075b0bb6..c4c55d9b1 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_group_properties.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_group_properties.go @@ -16,7 +16,7 @@ import ( // GroupProperties struct for GroupProperties type GroupProperties struct { - // The name of the resource. + // The name of the resource. Name *string `json:"name,omitempty"` // Create data center privilege. CreateDataCenter *bool `json:"createDataCenter,omitempty"` @@ -44,6 +44,12 @@ type GroupProperties struct { AccessAndManageCertificates *bool `json:"accessAndManageCertificates,omitempty"` // Privilege for a group to manage DBaaS related functionality. ManageDBaaS *bool `json:"manageDBaaS,omitempty"` + // Privilege for a group to access and manage dns records. + AccessAndManageDns *bool `json:"accessAndManageDns,omitempty"` + // Privilege for group accessing container registry related functionality. + ManageRegistry *bool `json:"manageRegistry,omitempty"` + // Privilege for a group to access and manage Data Platform. + ManageDataplatform *bool `json:"manageDataplatform,omitempty"` } // NewGroupProperties instantiates a new GroupProperties object @@ -596,6 +602,120 @@ func (o *GroupProperties) HasManageDBaaS() bool { return false } +// GetAccessAndManageDns returns the AccessAndManageDns field value +// If the value is explicit nil, the zero value for bool will be returned +func (o *GroupProperties) GetAccessAndManageDns() *bool { + if o == nil { + return nil + } + + return o.AccessAndManageDns + +} + +// GetAccessAndManageDnsOk returns a tuple with the AccessAndManageDns field value +// and a boolean to check if the value has been set. +// NOTE: If the value is an explicit nil, `nil, true` will be returned +func (o *GroupProperties) GetAccessAndManageDnsOk() (*bool, bool) { + if o == nil { + return nil, false + } + + return o.AccessAndManageDns, true +} + +// SetAccessAndManageDns sets field value +func (o *GroupProperties) SetAccessAndManageDns(v bool) { + + o.AccessAndManageDns = &v + +} + +// HasAccessAndManageDns returns a boolean if a field has been set. +func (o *GroupProperties) HasAccessAndManageDns() bool { + if o != nil && o.AccessAndManageDns != nil { + return true + } + + return false +} + +// GetManageRegistry returns the ManageRegistry field value +// If the value is explicit nil, the zero value for bool will be returned +func (o *GroupProperties) GetManageRegistry() *bool { + if o == nil { + return nil + } + + return o.ManageRegistry + +} + +// GetManageRegistryOk returns a tuple with the ManageRegistry field value +// and a boolean to check if the value has been set. +// NOTE: If the value is an explicit nil, `nil, true` will be returned +func (o *GroupProperties) GetManageRegistryOk() (*bool, bool) { + if o == nil { + return nil, false + } + + return o.ManageRegistry, true +} + +// SetManageRegistry sets field value +func (o *GroupProperties) SetManageRegistry(v bool) { + + o.ManageRegistry = &v + +} + +// HasManageRegistry returns a boolean if a field has been set. +func (o *GroupProperties) HasManageRegistry() bool { + if o != nil && o.ManageRegistry != nil { + return true + } + + return false +} + +// GetManageDataplatform returns the ManageDataplatform field value +// If the value is explicit nil, the zero value for bool will be returned +func (o *GroupProperties) GetManageDataplatform() *bool { + if o == nil { + return nil + } + + return o.ManageDataplatform + +} + +// GetManageDataplatformOk returns a tuple with the ManageDataplatform field value +// and a boolean to check if the value has been set. +// NOTE: If the value is an explicit nil, `nil, true` will be returned +func (o *GroupProperties) GetManageDataplatformOk() (*bool, bool) { + if o == nil { + return nil, false + } + + return o.ManageDataplatform, true +} + +// SetManageDataplatform sets field value +func (o *GroupProperties) SetManageDataplatform(v bool) { + + o.ManageDataplatform = &v + +} + +// HasManageDataplatform returns a boolean if a field has been set. +func (o *GroupProperties) HasManageDataplatform() bool { + if o != nil && o.ManageDataplatform != nil { + return true + } + + return false +} + func (o GroupProperties) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} if o.Name != nil { @@ -640,6 +760,15 @@ func (o GroupProperties) MarshalJSON() ([]byte, error) { if o.ManageDBaaS != nil { toSerialize["manageDBaaS"] = o.ManageDBaaS } + if o.AccessAndManageDns != nil { + toSerialize["accessAndManageDns"] = o.AccessAndManageDns + } + if o.ManageRegistry != nil { + toSerialize["manageRegistry"] = o.ManageRegistry + } + if o.ManageDataplatform != nil { + toSerialize["manageDataplatform"] = o.ManageDataplatform + } return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_image_properties.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_image_properties.go index 94cc7e9ba..2ab0f5a65 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_image_properties.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_image_properties.go @@ -16,13 +16,13 @@ import ( // ImageProperties struct for ImageProperties type ImageProperties struct { - // The name of the resource. + // The resource name. Name *string `json:"name,omitempty"` // Human-readable description. Description *string `json:"description,omitempty"` - // Location of that image/snapshot. + // The location of this image/snapshot. Location *string `json:"location,omitempty"` - // The size of the image in GB. + // The image size in GB. Size *float32 `json:"size,omitempty"` // Hot-plug capable CPU (no reboot required). CpuHotPlug *bool `json:"cpuHotPlug,omitempty"` @@ -44,13 +44,13 @@ type ImageProperties struct { DiscScsiHotPlug *bool `json:"discScsiHotPlug,omitempty"` // Hot-unplug capable SCSI drive (no reboot required). Not supported with Windows VMs. DiscScsiHotUnplug *bool `json:"discScsiHotUnplug,omitempty"` - // OS type for this image. + // The OS type of this image. LicenceType *string `json:"licenceType"` // The image type. ImageType *string `json:"imageType,omitempty"` // Indicates whether the image is part of a public repository. Public *bool `json:"public,omitempty"` - // List of image aliases mapped for this Image + // List of image aliases mapped for this image ImageAliases *[]string `json:"imageAliases,omitempty"` // Cloud init compatibility. CloudInit *string `json:"cloudInit,omitempty"` diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_images.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_images.go index 42276184c..ff496fb19 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_images.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_images.go @@ -20,7 +20,7 @@ type Images struct { Id *string `json:"id,omitempty"` // The type of object that has been created. Type *Type `json:"type,omitempty"` - // URL to the object representation (absolute path). + // The URL to the object representation (absolute path). Href *string `json:"href,omitempty"` // Array of items in the collection. Items *[]Image `json:"items,omitempty"` diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_info.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_info.go index 123accc0b..19f4a1467 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_info.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_info.go @@ -16,11 +16,11 @@ import ( // Info struct for Info type Info struct { - // API entry point + // The API entry point. Href *string `json:"href,omitempty"` - // Name of the API + // The API name. Name *string `json:"name,omitempty"` - // Version of the API + // The API version. Version *string `json:"version,omitempty"` } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_auto_scaling.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_auto_scaling.go index 6ba3b8fde..24f95d7b3 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_auto_scaling.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_auto_scaling.go @@ -16,9 +16,9 @@ import ( // KubernetesAutoScaling struct for KubernetesAutoScaling type KubernetesAutoScaling struct { - // The minimum number of worker nodes that the managed node group can scale in. Should be set together with 'maxNodeCount'. Value for this attribute must be greater than equal to 1 and less than equal to maxNodeCount. + // The minimum number of working nodes that the managed node pool can scale must be >= 1 and >= nodeCount. Required if autoScaling is specified. MinNodeCount *int32 `json:"minNodeCount"` - // The maximum number of worker nodes that the managed node pool can scale-out. Should be set together with 'minNodeCount'. Value for this attribute must be greater than equal to 1 and minNodeCount. + // The maximum number of worker nodes that the managed node pool can scale in. Must be >= minNodeCount and must be >= nodeCount. Required if autoScaling is specified. MaxNodeCount *int32 `json:"maxNodeCount"` } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_cluster.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_cluster.go index 8931a1643..631bda922 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_cluster.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_cluster.go @@ -16,11 +16,11 @@ import ( // KubernetesCluster struct for KubernetesCluster type KubernetesCluster struct { - // The resource's unique identifier. + // The resource unique identifier. Id *string `json:"id,omitempty"` - // The type of object. + // The object type. Type *string `json:"type,omitempty"` - // URL to the object representation (absolute path). + // The URL to the object representation (absolute path). Href *string `json:"href,omitempty"` Metadata *DatacenterElementMetadata `json:"metadata,omitempty"` Properties *KubernetesClusterProperties `json:"properties"` diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_cluster_for_post.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_cluster_for_post.go index 8f375515a..1fbe331ad 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_cluster_for_post.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_cluster_for_post.go @@ -16,11 +16,11 @@ import ( // KubernetesClusterForPost struct for KubernetesClusterForPost type KubernetesClusterForPost struct { - // The resource's unique identifier. + // The resource unique identifier. Id *string `json:"id,omitempty"` - // The type of object. + // The object type. Type *string `json:"type,omitempty"` - // URL to the object representation (absolute path). + // The URL to the object representation (absolute path). Href *string `json:"href,omitempty"` Metadata *DatacenterElementMetadata `json:"metadata,omitempty"` Properties *KubernetesClusterPropertiesForPost `json:"properties"` diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_cluster_properties_for_post.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_cluster_properties_for_post.go index dd13952fa..b44a3227a 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_cluster_properties_for_post.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_cluster_properties_for_post.go @@ -18,12 +18,12 @@ import ( type KubernetesClusterPropertiesForPost struct { // A Kubernetes cluster name. Valid Kubernetes cluster name must be 63 characters or less and must be empty or begin and end with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_), dots (.), and alphanumerics between. Name *string `json:"name"` - // The Kubernetes version the cluster is running. This imposes restrictions on what Kubernetes versions can be run in a cluster's nodepools. Additionally, not all Kubernetes versions are viable upgrade targets for all prior versions. + // The Kubernetes version that the cluster is running. This limits which Kubernetes versions can run in a cluster's node pools. Also, not all Kubernetes versions are suitable upgrade targets for all earlier versions. K8sVersion *string `json:"k8sVersion,omitempty"` MaintenanceWindow *KubernetesMaintenanceWindow `json:"maintenanceWindow,omitempty"` - // Access to the K8s API server is restricted to these CIDRs. Traffic, internal to the cluster, is not affected by this restriction. If no allowlist is specified, access is not restricted. If an IP without subnet mask is provided, the default value is used: 32 for IPv4 and 128 for IPv6. + // Access to the K8s API server is restricted to these CIDRs. Intra-cluster traffic is not affected by this restriction. If no AllowList is specified, access is not limited. If an IP is specified without a subnet mask, the default value is 32 for IPv4 and 128 for IPv6. ApiSubnetAllowList *[]string `json:"apiSubnetAllowList,omitempty"` - // List of S3 bucket configured for K8s usage. For now it contains only an S3 bucket used to store K8s API audit logs + // List of S3 buckets configured for K8s usage. At the moment, it contains only one S3 bucket that is used to store K8s API audit logs. S3Buckets *[]S3Bucket `json:"s3Buckets,omitempty"` } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_cluster_properties_for_put.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_cluster_properties_for_put.go index c845c73bc..c2b9d44cd 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_cluster_properties_for_put.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_cluster_properties_for_put.go @@ -18,12 +18,12 @@ import ( type KubernetesClusterPropertiesForPut struct { // A Kubernetes cluster name. Valid Kubernetes cluster name must be 63 characters or less and must be empty or begin and end with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_), dots (.), and alphanumerics between. Name *string `json:"name"` - // The Kubernetes version the cluster is running. This imposes restrictions on what Kubernetes versions can be run in a cluster's nodepools. Additionally, not all Kubernetes versions are viable upgrade targets for all prior versions. + // The Kubernetes version that the cluster is running. This limits which Kubernetes versions can run in a cluster's node pools. Also, not all Kubernetes versions are suitable upgrade targets for all earlier versions. K8sVersion *string `json:"k8sVersion,omitempty"` MaintenanceWindow *KubernetesMaintenanceWindow `json:"maintenanceWindow,omitempty"` - // Access to the K8s API server is restricted to these CIDRs. Traffic, internal to the cluster, is not affected by this restriction. If no allowlist is specified, access is not restricted. If an IP without subnet mask is provided, the default value is used: 32 for IPv4 and 128 for IPv6. + // Access to the K8s API server is restricted to these CIDRs. Intra-cluster traffic is not affected by this restriction. If no AllowList is specified, access is not limited. If an IP is specified without a subnet mask, the default value is 32 for IPv4 and 128 for IPv6. ApiSubnetAllowList *[]string `json:"apiSubnetAllowList,omitempty"` - // List of S3 bucket configured for K8s usage. For now it contains only an S3 bucket used to store K8s API audit logs + // List of S3 buckets configured for K8s usage. At the moment, it contains only one S3 bucket that is used to store K8s API audit logs. S3Buckets *[]S3Bucket `json:"s3Buckets,omitempty"` } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_clusters.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_clusters.go index 4066927e6..f1416d0eb 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_clusters.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_clusters.go @@ -16,13 +16,13 @@ import ( // KubernetesClusters struct for KubernetesClusters type KubernetesClusters struct { - // A unique representation of the Kubernetes cluster as a resource collection. + // The unique representation of the K8s cluster as a resource collection. Id *string `json:"id,omitempty"` - // The type of resource within a collection. + // The resource type within a collection. Type *string `json:"type,omitempty"` - // URL to the collection representation (absolute path). + // The URL to the collection representation (absolute path). Href *string `json:"href,omitempty"` - // Array of items in the collection. + // Array of K8s clusters in the collection. Items *[]KubernetesCluster `json:"items,omitempty"` } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_maintenance_window.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_maintenance_window.go index e257e823b..d5decee54 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_maintenance_window.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_maintenance_window.go @@ -16,9 +16,9 @@ import ( // KubernetesMaintenanceWindow struct for KubernetesMaintenanceWindow type KubernetesMaintenanceWindow struct { - // The day of the week for a maintenance window. + // The weekday for a maintenance window. DayOfTheWeek *string `json:"dayOfTheWeek"` - // The time to use for a maintenance window. Accepted formats are: HH:mm:ss; HH:mm:ss\"Z\"; HH:mm:ssZ. This time may varies by 15 minutes. + // The time to use for a maintenance window. Accepted formats are: HH:mm:ss; HH:mm:ss\"Z\"; HH:mm:ssZ. This time may vary by 15 minutes. Time *string `json:"time"` } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node.go index b3d8e2b81..936864aec 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node.go @@ -18,9 +18,9 @@ import ( type KubernetesNode struct { // The resource's unique identifier. Id *string `json:"id,omitempty"` - // The type of object. + // The object type. Type *string `json:"type,omitempty"` - // URL to the object representation (absolute path). + // The URL to the object representation (absolute path). Href *string `json:"href,omitempty"` Metadata *KubernetesNodeMetadata `json:"metadata,omitempty"` Properties *KubernetesNodeProperties `json:"properties"` diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_metadata.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_metadata.go index 6d69ee2da..8110ddd61 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_metadata.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_metadata.go @@ -17,15 +17,15 @@ import ( // KubernetesNodeMetadata struct for KubernetesNodeMetadata type KubernetesNodeMetadata struct { - // Resource's Entity Tag as defined in http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.11 Entity Tag is also added as an 'ETag response header to requests which don't use 'depth' parameter. + // The resource entity tag as defined in http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.11 Entity tags are also added as 'ETag' response headers to requests that do not use the 'depth' parameter. Etag *string `json:"etag,omitempty"` - // The last time the resource was created. + // The date the resource was created. CreatedDate *IonosTime - // The last time the resource was modified. + // The date the resource was last modified. LastModifiedDate *IonosTime - // State of the resource. + // The resource state. State *string `json:"state,omitempty"` - // The last time the software was updated on the node. + // The date when the software on the node was last updated. LastSoftwareUpdatedDate *IonosTime } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pool.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pool.go index 04b859467..a634f0a8b 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pool.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pool.go @@ -18,9 +18,9 @@ import ( type KubernetesNodePool struct { // The resource's unique identifier. Id *string `json:"id,omitempty"` - // The type of object. + // The object type. Type *string `json:"type,omitempty"` - // URL to the object representation (absolute path). + // The URL to the object representation (absolute path). Href *string `json:"href,omitempty"` Metadata *DatacenterElementMetadata `json:"metadata,omitempty"` Properties *KubernetesNodePoolProperties `json:"properties"` diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pool_for_post.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pool_for_post.go index 8fd4e742d..9a89d3d18 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pool_for_post.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pool_for_post.go @@ -18,9 +18,9 @@ import ( type KubernetesNodePoolForPost struct { // The resource's unique identifier. Id *string `json:"id,omitempty"` - // The type of object. + // The object type. Type *string `json:"type,omitempty"` - // URL to the object representation (absolute path). + // The URL to the object representation (absolute path). Href *string `json:"href,omitempty"` Metadata *DatacenterElementMetadata `json:"metadata,omitempty"` Properties *KubernetesNodePoolPropertiesForPost `json:"properties"` diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pool_for_put.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pool_for_put.go index 46fda0f5f..10fa4f290 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pool_for_put.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pool_for_put.go @@ -18,9 +18,9 @@ import ( type KubernetesNodePoolForPut struct { // The resource's unique identifier. Id *string `json:"id,omitempty"` - // The type of object. + // The object type. Type *string `json:"type,omitempty"` - // URL to the object representation (absolute path). + // The URL to the object representation (absolute path). Href *string `json:"href,omitempty"` Metadata *DatacenterElementMetadata `json:"metadata,omitempty"` Properties *KubernetesNodePoolPropertiesForPut `json:"properties"` diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pool_lan.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pool_lan.go index 88b08990c..012c0f4b7 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pool_lan.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pool_lan.go @@ -16,11 +16,13 @@ import ( // KubernetesNodePoolLan struct for KubernetesNodePoolLan type KubernetesNodePoolLan struct { - // The LAN ID of an existing LAN at the related datacenter + // The datacenter ID, requires system privileges, for internal usage only + DatacenterId *string `json:"datacenterId,omitempty"` + // The LAN ID of an existing LAN at the related data center Id *int32 `json:"id"` - // Indicates if the Kubernetes node pool LAN will reserve an IP using DHCP. + // Specifies whether the Kubernetes node pool LAN reserves an IP with DHCP. Dhcp *bool `json:"dhcp,omitempty"` - // array of additional LANs attached to worker nodes + // The array of additional LANs attached to worker nodes. Routes *[]KubernetesNodePoolLanRoutes `json:"routes,omitempty"` } @@ -44,6 +46,44 @@ func NewKubernetesNodePoolLanWithDefaults() *KubernetesNodePoolLan { return &this } +// GetDatacenterId returns the DatacenterId field value +// If the value is explicit nil, the zero value for string will be returned +func (o *KubernetesNodePoolLan) GetDatacenterId() *string { + if o == nil { + return nil + } + + return o.DatacenterId + +} + +// GetDatacenterIdOk returns a tuple with the DatacenterId field value +// and a boolean to check if the value has been set. +// NOTE: If the value is an explicit nil, `nil, true` will be returned +func (o *KubernetesNodePoolLan) GetDatacenterIdOk() (*string, bool) { + if o == nil { + return nil, false + } + + return o.DatacenterId, true +} + +// SetDatacenterId sets field value +func (o *KubernetesNodePoolLan) SetDatacenterId(v string) { + + o.DatacenterId = &v + +} + +// HasDatacenterId returns a boolean if a field has been set. +func (o *KubernetesNodePoolLan) HasDatacenterId() bool { + if o != nil && o.DatacenterId != nil { + return true + } + + return false +} + // GetId returns the Id field value // If the value is explicit nil, the zero value for int32 will be returned func (o *KubernetesNodePoolLan) GetId() *int32 { @@ -160,6 +200,9 @@ func (o *KubernetesNodePoolLan) HasRoutes() bool { func (o KubernetesNodePoolLan) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} + if o.DatacenterId != nil { + toSerialize["datacenterId"] = o.DatacenterId + } if o.Id != nil { toSerialize["id"] = o.Id } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pool_properties.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pool_properties.go index e6fca9fa3..5e18701de 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pool_properties.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pool_properties.go @@ -18,35 +18,35 @@ import ( type KubernetesNodePoolProperties struct { // A Kubernetes node pool name. Valid Kubernetes node pool name must be 63 characters or less and must be empty or begin and end with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_), dots (.), and alphanumerics between. Name *string `json:"name"` - // A valid ID of the data center, to which user has access. + // The unique identifier of the VDC where the worker nodes of the node pool are provisioned.Note that the data center is located in the exact place where the parent cluster of the node pool is located. DatacenterId *string `json:"datacenterId"` - // The number of nodes that make up the node pool. + // The number of worker nodes of the node pool. NodeCount *int32 `json:"nodeCount"` - // A valid CPU family name. + // The CPU type for the nodes. CpuFamily *string `json:"cpuFamily"` - // The number of cores for the node. + // The total number of cores for the nodes. CoresCount *int32 `json:"coresCount"` - // The RAM size for the node. Must be set in multiples of 1024 MB, with minimum size is of 2048 MB. + // The RAM size for the nodes. Must be specified in multiples of 1024 MB, with a minimum size of 2048 MB. RamSize *int32 `json:"ramSize"` // The availability zone in which the target VM should be provisioned. AvailabilityZone *string `json:"availabilityZone"` - // The type of hardware for the volume. + // The storage type for the nodes. StorageType *string `json:"storageType"` - // The size of the volume in GB. The size should be greater than 10GB. + // The allocated volume size in GB. The allocated volume size in GB. To achieve good performance, we recommend a size greater than 100GB for SSD. StorageSize *int32 `json:"storageSize"` - // The Kubernetes version the nodepool is running. This imposes restrictions on what Kubernetes versions can be run in a cluster's nodepools. Additionally, not all Kubernetes versions are viable upgrade targets for all prior versions. + // The Kubernetes version running in the node pool. Note that this imposes restrictions on which Kubernetes versions can run in the node pools of a cluster. Also, not all Kubernetes versions are suitable upgrade targets for all earlier versions. K8sVersion *string `json:"k8sVersion,omitempty"` MaintenanceWindow *KubernetesMaintenanceWindow `json:"maintenanceWindow,omitempty"` AutoScaling *KubernetesAutoScaling `json:"autoScaling,omitempty"` - // array of additional LANs attached to worker nodes + // The array of existing private LANs to attach to worker nodes. Lans *[]KubernetesNodePoolLan `json:"lans,omitempty"` - // map of labels attached to node pool. + // The labels attached to the node pool. Labels *map[string]string `json:"labels,omitempty"` - // map of annotations attached to node pool. + // The annotations attached to the node pool. Annotations *map[string]string `json:"annotations,omitempty"` - // Optional array of reserved public IP addresses to be used by the nodes. IPs must be from same location as the data center used for the node pool. The array must contain one more IP than maximum number possible number of nodes (nodeCount+1 for fixed number of nodes or maxNodeCount+1 when auto scaling is used). The extra IP is used when the nodes are rebuilt. + // Optional array of reserved public IP addresses to be used by the nodes. The IPs must be from the exact location of the node pool's data center. If autoscaling is used, the array must contain one more IP than the maximum possible number of nodes (nodeCount+1 for a fixed number of nodes or maxNodeCount+1). The extra IP is used when the nodes are rebuilt. PublicIps *[]string `json:"publicIps,omitempty"` - // List of available versions for upgrading the node pool. + // The list of available versions for upgrading the node pool. AvailableUpgradeVersions *[]string `json:"availableUpgradeVersions,omitempty"` } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pool_properties_for_post.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pool_properties_for_post.go index 9716f7e9c..dff10d338 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pool_properties_for_post.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pool_properties_for_post.go @@ -18,33 +18,33 @@ import ( type KubernetesNodePoolPropertiesForPost struct { // A Kubernetes node pool name. Valid Kubernetes node pool name must be 63 characters or less and must be empty or begin and end with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_), dots (.), and alphanumerics between. Name *string `json:"name"` - // A valid ID of the data center, to which the user has access. + // The unique identifier of the VDC where the worker nodes of the node pool are provisioned.Note that the data center is located in the exact place where the parent cluster of the node pool is located. DatacenterId *string `json:"datacenterId"` - // The number of nodes that make up the node pool. + // The number of worker nodes of the node pool. NodeCount *int32 `json:"nodeCount"` - // A valid CPU family name. + // The CPU type for the nodes. CpuFamily *string `json:"cpuFamily"` - // The number of cores for the node. + // The total number of cores for the nodes. CoresCount *int32 `json:"coresCount"` - // The RAM size for the node. Must be set in multiples of 1024 MB, with minimum size is of 2048 MB. + // The RAM size for the nodes. Must be specified in multiples of 1024 MB, with a minimum size of 2048 MB. RamSize *int32 `json:"ramSize"` // The availability zone in which the target VM should be provisioned. AvailabilityZone *string `json:"availabilityZone"` - // The type of hardware for the volume. + // The storage type for the nodes. StorageType *string `json:"storageType"` - // The size of the volume in GB. The size should be greater than 10GB. + // The allocated volume size in GB. The allocated volume size in GB. To achieve good performance, we recommend a size greater than 100GB for SSD. StorageSize *int32 `json:"storageSize"` - // The Kubernetes version the nodepool is running. This imposes restrictions on what Kubernetes versions can be run in a cluster's nodepools. Additionally, not all Kubernetes versions are viable upgrade targets for all prior versions. + // The Kubernetes version running in the node pool. Note that this imposes restrictions on which Kubernetes versions can run in the node pools of a cluster. Also, not all Kubernetes versions are suitable upgrade targets for all earlier versions. K8sVersion *string `json:"k8sVersion,omitempty"` MaintenanceWindow *KubernetesMaintenanceWindow `json:"maintenanceWindow,omitempty"` AutoScaling *KubernetesAutoScaling `json:"autoScaling,omitempty"` - // array of additional LANs attached to worker nodes + // The array of existing private LANs to attach to worker nodes. Lans *[]KubernetesNodePoolLan `json:"lans,omitempty"` - // map of labels attached to node pool. + // The labels attached to the node pool. Labels *map[string]string `json:"labels,omitempty"` - // map of annotations attached to node pool. + // The annotations attached to the node pool. Annotations *map[string]string `json:"annotations,omitempty"` - // Optional array of reserved public IP addresses to be used by the nodes. IPs must be from same location as the data center used for the node pool. The array must contain one more IP than the maximum possible number of nodes (nodeCount+1 for fixed number of nodes or maxNodeCount+1 when auto scaling is used). The extra IP is used when the nodes are rebuilt. + // Optional array of reserved public IP addresses to be used by the nodes. The IPs must be from the exact location of the node pool's data center. If autoscaling is used, the array must contain one more IP than the maximum possible number of nodes (nodeCount+1 for a fixed number of nodes or maxNodeCount+1). The extra IP is used when the nodes are rebuilt. PublicIps *[]string `json:"publicIps,omitempty"` } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pool_properties_for_put.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pool_properties_for_put.go index 3b0081c3e..88c31def6 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pool_properties_for_put.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pool_properties_for_put.go @@ -18,19 +18,19 @@ import ( type KubernetesNodePoolPropertiesForPut struct { // A Kubernetes node pool name. Valid Kubernetes node pool name must be 63 characters or less and must be empty or begin and end with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_), dots (.), and alphanumerics between. Name *string `json:"name,omitempty"` - // The number of nodes that make up the node pool. + // The number of worker nodes of the node pool. NodeCount *int32 `json:"nodeCount"` - // The Kubernetes version the nodepool is running. This imposes restrictions on what Kubernetes versions can be run in a cluster's nodepools. Additionally, not all Kubernetes versions are viable upgrade targets for all prior versions. + // The Kubernetes version running in the node pool. Note that this imposes restrictions on which Kubernetes versions can run in the node pools of a cluster. Also, not all Kubernetes versions are suitable upgrade targets for all earlier versions. K8sVersion *string `json:"k8sVersion,omitempty"` MaintenanceWindow *KubernetesMaintenanceWindow `json:"maintenanceWindow,omitempty"` AutoScaling *KubernetesAutoScaling `json:"autoScaling,omitempty"` - // array of additional LANs attached to worker nodes + // The array of existing private LANs to attach to worker nodes. Lans *[]KubernetesNodePoolLan `json:"lans,omitempty"` - // map of labels attached to node pool. + // The labels attached to the node pool. Labels *map[string]string `json:"labels,omitempty"` - // map of annotations attached to node pool. + // The annotations attached to the node pool. Annotations *map[string]string `json:"annotations,omitempty"` - // Optional array of reserved public IP addresses to be used by the nodes. IPs must be from same location as the data center used for the node pool. The array must contain one more IP than the maximum possible number of nodes (nodeCount+1 for fixed number of nodes or maxNodeCount+1 when auto scaling is used). The extra IP is used when the nodes are rebuilt. + // Optional array of reserved public IP addresses to be used by the nodes. The IPs must be from the exact location of the node pool's data center. If autoscaling is used, the array must contain one more IP than the maximum possible number of nodes (nodeCount+1 for a fixed number of nodes or maxNodeCount+1). The extra IP is used when the nodes are rebuilt. PublicIps *[]string `json:"publicIps,omitempty"` } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pools.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pools.go index 9e9d8d80e..61df80e6d 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pools.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_pools.go @@ -18,9 +18,9 @@ import ( type KubernetesNodePools struct { // A unique representation of the Kubernetes node pool as a resource collection. Id *string `json:"id,omitempty"` - // The type of resource within a collection. + // The resource type within a collection. Type *string `json:"type,omitempty"` - // URL to the collection representation (absolute path). + // The URL to the collection representation (absolute path). Href *string `json:"href,omitempty"` // Array of items in the collection. Items *[]KubernetesNodePool `json:"items,omitempty"` diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_properties.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_properties.go index eee05965c..652521484 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_properties.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_node_properties.go @@ -16,13 +16,13 @@ import ( // KubernetesNodeProperties struct for KubernetesNodeProperties type KubernetesNodeProperties struct { - // A Kubernetes node name. + // The Kubernetes node name. Name *string `json:"name"` - // A valid public IP. + // The public IP associated with the node. PublicIP *string `json:"publicIP,omitempty"` - // A valid private IP. + // The private IP associated with the node. PrivateIP *string `json:"privateIP,omitempty"` - // The Kubernetes version the nodepool is running. This imposes restrictions on what Kubernetes versions can be run in a cluster's nodepools. Additionally, not all Kubernetes versions are viable upgrade targets for all prior versions. + // The Kubernetes version running in the node pool. Note that this imposes restrictions on which Kubernetes versions can run in the node pools of a cluster. Also, not all Kubernetes versions are suitable upgrade targets for all earlier versions. K8sVersion *string `json:"k8sVersion"` } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_nodes.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_nodes.go index 8381bf4cc..e54f0c8d0 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_nodes.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_kubernetes_nodes.go @@ -18,9 +18,9 @@ import ( type KubernetesNodes struct { // A unique representation of the Kubernetes node pool as a resource collection. Id *string `json:"id,omitempty"` - // The type of resource within a collection. + // The resource type within a collection. Type *string `json:"type,omitempty"` - // URL to the collection representation (absolute path). + // The URL to the collection representation (absolute path). Href *string `json:"href,omitempty"` // Array of items in the collection. Items *[]KubernetesNode `json:"items,omitempty"` diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_lan_post.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_lan_post.go deleted file mode 100644 index 3af870c67..000000000 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_lan_post.go +++ /dev/null @@ -1,335 +0,0 @@ -/* - * CLOUD API - * - * IONOS Enterprise-grade Infrastructure as a Service (IaaS) solutions can be managed through the Cloud API, in addition or as an alternative to the \"Data Center Designer\" (DCD) browser-based tool. Both methods employ consistent concepts and features, deliver similar power and flexibility, and can be used to perform a multitude of management tasks, including adding servers, volumes, configuring networks, and so on. - * - * API version: 6.0 - */ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package ionoscloud - -import ( - "encoding/json" -) - -// LanPost struct for LanPost -type LanPost struct { - // The resource's unique identifier. - Id *string `json:"id,omitempty"` - // The type of object that has been created. - Type *Type `json:"type,omitempty"` - // URL to the object representation (absolute path). - Href *string `json:"href,omitempty"` - Metadata *DatacenterElementMetadata `json:"metadata,omitempty"` - Entities *LanEntities `json:"entities,omitempty"` - Properties *LanPropertiesPost `json:"properties"` -} - -// NewLanPost instantiates a new LanPost object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewLanPost(properties LanPropertiesPost) *LanPost { - this := LanPost{} - - this.Properties = &properties - - return &this -} - -// NewLanPostWithDefaults instantiates a new LanPost object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewLanPostWithDefaults() *LanPost { - this := LanPost{} - return &this -} - -// GetId returns the Id field value -// If the value is explicit nil, the zero value for string will be returned -func (o *LanPost) GetId() *string { - if o == nil { - return nil - } - - return o.Id - -} - -// GetIdOk returns a tuple with the Id field value -// and a boolean to check if the value has been set. -// NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *LanPost) GetIdOk() (*string, bool) { - if o == nil { - return nil, false - } - - return o.Id, true -} - -// SetId sets field value -func (o *LanPost) SetId(v string) { - - o.Id = &v - -} - -// HasId returns a boolean if a field has been set. -func (o *LanPost) HasId() bool { - if o != nil && o.Id != nil { - return true - } - - return false -} - -// GetType returns the Type field value -// If the value is explicit nil, the zero value for Type will be returned -func (o *LanPost) GetType() *Type { - if o == nil { - return nil - } - - return o.Type - -} - -// GetTypeOk returns a tuple with the Type field value -// and a boolean to check if the value has been set. -// NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *LanPost) GetTypeOk() (*Type, bool) { - if o == nil { - return nil, false - } - - return o.Type, true -} - -// SetType sets field value -func (o *LanPost) SetType(v Type) { - - o.Type = &v - -} - -// HasType returns a boolean if a field has been set. -func (o *LanPost) HasType() bool { - if o != nil && o.Type != nil { - return true - } - - return false -} - -// GetHref returns the Href field value -// If the value is explicit nil, the zero value for string will be returned -func (o *LanPost) GetHref() *string { - if o == nil { - return nil - } - - return o.Href - -} - -// GetHrefOk returns a tuple with the Href field value -// and a boolean to check if the value has been set. -// NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *LanPost) GetHrefOk() (*string, bool) { - if o == nil { - return nil, false - } - - return o.Href, true -} - -// SetHref sets field value -func (o *LanPost) SetHref(v string) { - - o.Href = &v - -} - -// HasHref returns a boolean if a field has been set. -func (o *LanPost) HasHref() bool { - if o != nil && o.Href != nil { - return true - } - - return false -} - -// GetMetadata returns the Metadata field value -// If the value is explicit nil, the zero value for DatacenterElementMetadata will be returned -func (o *LanPost) GetMetadata() *DatacenterElementMetadata { - if o == nil { - return nil - } - - return o.Metadata - -} - -// GetMetadataOk returns a tuple with the Metadata field value -// and a boolean to check if the value has been set. -// NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *LanPost) GetMetadataOk() (*DatacenterElementMetadata, bool) { - if o == nil { - return nil, false - } - - return o.Metadata, true -} - -// SetMetadata sets field value -func (o *LanPost) SetMetadata(v DatacenterElementMetadata) { - - o.Metadata = &v - -} - -// HasMetadata returns a boolean if a field has been set. -func (o *LanPost) HasMetadata() bool { - if o != nil && o.Metadata != nil { - return true - } - - return false -} - -// GetEntities returns the Entities field value -// If the value is explicit nil, the zero value for LanEntities will be returned -func (o *LanPost) GetEntities() *LanEntities { - if o == nil { - return nil - } - - return o.Entities - -} - -// GetEntitiesOk returns a tuple with the Entities field value -// and a boolean to check if the value has been set. -// NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *LanPost) GetEntitiesOk() (*LanEntities, bool) { - if o == nil { - return nil, false - } - - return o.Entities, true -} - -// SetEntities sets field value -func (o *LanPost) SetEntities(v LanEntities) { - - o.Entities = &v - -} - -// HasEntities returns a boolean if a field has been set. -func (o *LanPost) HasEntities() bool { - if o != nil && o.Entities != nil { - return true - } - - return false -} - -// GetProperties returns the Properties field value -// If the value is explicit nil, the zero value for LanPropertiesPost will be returned -func (o *LanPost) GetProperties() *LanPropertiesPost { - if o == nil { - return nil - } - - return o.Properties - -} - -// GetPropertiesOk returns a tuple with the Properties field value -// and a boolean to check if the value has been set. -// NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *LanPost) GetPropertiesOk() (*LanPropertiesPost, bool) { - if o == nil { - return nil, false - } - - return o.Properties, true -} - -// SetProperties sets field value -func (o *LanPost) SetProperties(v LanPropertiesPost) { - - o.Properties = &v - -} - -// HasProperties returns a boolean if a field has been set. -func (o *LanPost) HasProperties() bool { - if o != nil && o.Properties != nil { - return true - } - - return false -} - -func (o LanPost) MarshalJSON() ([]byte, error) { - toSerialize := map[string]interface{}{} - if o.Id != nil { - toSerialize["id"] = o.Id - } - if o.Type != nil { - toSerialize["type"] = o.Type - } - if o.Href != nil { - toSerialize["href"] = o.Href - } - if o.Metadata != nil { - toSerialize["metadata"] = o.Metadata - } - if o.Entities != nil { - toSerialize["entities"] = o.Entities - } - if o.Properties != nil { - toSerialize["properties"] = o.Properties - } - return json.Marshal(toSerialize) -} - -type NullableLanPost struct { - value *LanPost - isSet bool -} - -func (v NullableLanPost) Get() *LanPost { - return v.value -} - -func (v *NullableLanPost) Set(val *LanPost) { - v.value = val - v.isSet = true -} - -func (v NullableLanPost) IsSet() bool { - return v.isSet -} - -func (v *NullableLanPost) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableLanPost(val *LanPost) *NullableLanPost { - return &NullableLanPost{value: val, isSet: true} -} - -func (v NullableLanPost) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableLanPost) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_lan_properties_post.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_lan_properties_post.go deleted file mode 100644 index 3492c2079..000000000 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_lan_properties_post.go +++ /dev/null @@ -1,250 +0,0 @@ -/* - * CLOUD API - * - * IONOS Enterprise-grade Infrastructure as a Service (IaaS) solutions can be managed through the Cloud API, in addition or as an alternative to the \"Data Center Designer\" (DCD) browser-based tool. Both methods employ consistent concepts and features, deliver similar power and flexibility, and can be used to perform a multitude of management tasks, including adding servers, volumes, configuring networks, and so on. - * - * API version: 6.0 - */ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package ionoscloud - -import ( - "encoding/json" -) - -// LanPropertiesPost struct for LanPropertiesPost -type LanPropertiesPost struct { - // The name of the resource. - Name *string `json:"name,omitempty"` - // IP failover configurations for lan - IpFailover *[]IPFailover `json:"ipFailover,omitempty"` - // The unique identifier of the private Cross-Connect the LAN is connected to, if any. - Pcc *string `json:"pcc,omitempty"` - // This LAN faces the public Internet. - Public *bool `json:"public,omitempty"` -} - -// NewLanPropertiesPost instantiates a new LanPropertiesPost object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewLanPropertiesPost() *LanPropertiesPost { - this := LanPropertiesPost{} - - return &this -} - -// NewLanPropertiesPostWithDefaults instantiates a new LanPropertiesPost object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewLanPropertiesPostWithDefaults() *LanPropertiesPost { - this := LanPropertiesPost{} - return &this -} - -// GetName returns the Name field value -// If the value is explicit nil, the zero value for string will be returned -func (o *LanPropertiesPost) GetName() *string { - if o == nil { - return nil - } - - return o.Name - -} - -// GetNameOk returns a tuple with the Name field value -// and a boolean to check if the value has been set. -// NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *LanPropertiesPost) GetNameOk() (*string, bool) { - if o == nil { - return nil, false - } - - return o.Name, true -} - -// SetName sets field value -func (o *LanPropertiesPost) SetName(v string) { - - o.Name = &v - -} - -// HasName returns a boolean if a field has been set. -func (o *LanPropertiesPost) HasName() bool { - if o != nil && o.Name != nil { - return true - } - - return false -} - -// GetIpFailover returns the IpFailover field value -// If the value is explicit nil, the zero value for []IPFailover will be returned -func (o *LanPropertiesPost) GetIpFailover() *[]IPFailover { - if o == nil { - return nil - } - - return o.IpFailover - -} - -// GetIpFailoverOk returns a tuple with the IpFailover field value -// and a boolean to check if the value has been set. -// NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *LanPropertiesPost) GetIpFailoverOk() (*[]IPFailover, bool) { - if o == nil { - return nil, false - } - - return o.IpFailover, true -} - -// SetIpFailover sets field value -func (o *LanPropertiesPost) SetIpFailover(v []IPFailover) { - - o.IpFailover = &v - -} - -// HasIpFailover returns a boolean if a field has been set. -func (o *LanPropertiesPost) HasIpFailover() bool { - if o != nil && o.IpFailover != nil { - return true - } - - return false -} - -// GetPcc returns the Pcc field value -// If the value is explicit nil, the zero value for string will be returned -func (o *LanPropertiesPost) GetPcc() *string { - if o == nil { - return nil - } - - return o.Pcc - -} - -// GetPccOk returns a tuple with the Pcc field value -// and a boolean to check if the value has been set. -// NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *LanPropertiesPost) GetPccOk() (*string, bool) { - if o == nil { - return nil, false - } - - return o.Pcc, true -} - -// SetPcc sets field value -func (o *LanPropertiesPost) SetPcc(v string) { - - o.Pcc = &v - -} - -// HasPcc returns a boolean if a field has been set. -func (o *LanPropertiesPost) HasPcc() bool { - if o != nil && o.Pcc != nil { - return true - } - - return false -} - -// GetPublic returns the Public field value -// If the value is explicit nil, the zero value for bool will be returned -func (o *LanPropertiesPost) GetPublic() *bool { - if o == nil { - return nil - } - - return o.Public - -} - -// GetPublicOk returns a tuple with the Public field value -// and a boolean to check if the value has been set. -// NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *LanPropertiesPost) GetPublicOk() (*bool, bool) { - if o == nil { - return nil, false - } - - return o.Public, true -} - -// SetPublic sets field value -func (o *LanPropertiesPost) SetPublic(v bool) { - - o.Public = &v - -} - -// HasPublic returns a boolean if a field has been set. -func (o *LanPropertiesPost) HasPublic() bool { - if o != nil && o.Public != nil { - return true - } - - return false -} - -func (o LanPropertiesPost) MarshalJSON() ([]byte, error) { - toSerialize := map[string]interface{}{} - if o.Name != nil { - toSerialize["name"] = o.Name - } - if o.IpFailover != nil { - toSerialize["ipFailover"] = o.IpFailover - } - if o.Pcc != nil { - toSerialize["pcc"] = o.Pcc - } - if o.Public != nil { - toSerialize["public"] = o.Public - } - return json.Marshal(toSerialize) -} - -type NullableLanPropertiesPost struct { - value *LanPropertiesPost - isSet bool -} - -func (v NullableLanPropertiesPost) Get() *LanPropertiesPost { - return v.value -} - -func (v *NullableLanPropertiesPost) Set(val *LanPropertiesPost) { - v.value = val - v.isSet = true -} - -func (v NullableLanPropertiesPost) IsSet() bool { - return v.isSet -} - -func (v *NullableLanPropertiesPost) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableLanPropertiesPost(val *LanPropertiesPost) *NullableLanPropertiesPost { - return &NullableLanPropertiesPost{value: val, isSet: true} -} - -func (v NullableLanPropertiesPost) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableLanPropertiesPost) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_location_properties.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_location_properties.go index 15326ccab..9e8e623b6 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_location_properties.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_location_properties.go @@ -16,13 +16,13 @@ import ( // LocationProperties struct for LocationProperties type LocationProperties struct { - // The name of the resource. + // The location name. Name *string `json:"name,omitempty"` - // List of features supported by the location + // A list of available features in the location. Features *[]string `json:"features,omitempty"` - // List of image aliases available for the location + // A list of image aliases available in the location. ImageAliases *[]string `json:"imageAliases,omitempty"` - // Array of features and CPU families available in a location + // A list of available CPU types and related resources available in the location. CpuArchitecture *[]CpuArchitectureProperties `json:"cpuArchitecture,omitempty"` } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_network_load_balancer_forwarding_rule_properties.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_network_load_balancer_forwarding_rule_properties.go index d8ba812ab..35e452787 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_network_load_balancer_forwarding_rule_properties.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_network_load_balancer_forwarding_rule_properties.go @@ -22,7 +22,7 @@ type NetworkLoadBalancerForwardingRuleProperties struct { Algorithm *string `json:"algorithm"` // Balancing protocol Protocol *string `json:"protocol"` - // Listening (inbound) IP + // Listening (inbound) IP. ListenerIp *string `json:"listenerIp"` // Listening (inbound) port number; valid range is 1 to 65535. ListenerPort *int32 `json:"listenerPort"` diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_nic_properties.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_nic_properties.go index 3e4f49a1e..20d0323ac 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_nic_properties.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_nic_properties.go @@ -34,8 +34,6 @@ type NicProperties struct { DeviceNumber *int32 `json:"deviceNumber,omitempty"` // The PCI slot number for the NIC. PciSlot *int32 `json:"pciSlot,omitempty"` - // The vnet ID that belongs to this NIC; Requires system privileges - Vnet *string `json:"vnet,omitempty"` } // NewNicProperties instantiates a new NicProperties object @@ -45,6 +43,8 @@ type NicProperties struct { func NewNicProperties(lan int32) *NicProperties { this := NicProperties{} + var dhcp bool = true + this.Dhcp = &dhcp this.Lan = &lan return &this @@ -55,6 +55,8 @@ func NewNicProperties(lan int32) *NicProperties { // but it doesn't guarantee that properties required by API are set func NewNicPropertiesWithDefaults() *NicProperties { this := NicProperties{} + var dhcp bool = true + this.Dhcp = &dhcp return &this } @@ -400,44 +402,6 @@ func (o *NicProperties) HasPciSlot() bool { return false } -// GetVnet returns the Vnet field value -// If the value is explicit nil, the zero value for string will be returned -func (o *NicProperties) GetVnet() *string { - if o == nil { - return nil - } - - return o.Vnet - -} - -// GetVnetOk returns a tuple with the Vnet field value -// and a boolean to check if the value has been set. -// NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *NicProperties) GetVnetOk() (*string, bool) { - if o == nil { - return nil, false - } - - return o.Vnet, true -} - -// SetVnet sets field value -func (o *NicProperties) SetVnet(v string) { - - o.Vnet = &v - -} - -// HasVnet returns a boolean if a field has been set. -func (o *NicProperties) HasVnet() bool { - if o != nil && o.Vnet != nil { - return true - } - - return false -} - func (o NicProperties) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} if o.Name != nil { @@ -465,9 +429,6 @@ func (o NicProperties) MarshalJSON() ([]byte, error) { if o.PciSlot != nil { toSerialize["pciSlot"] = o.PciSlot } - if o.Vnet != nil { - toSerialize["vnet"] = o.Vnet - } return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_resource_limits.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_resource_limits.go index 4acc22998..8dbbe386e 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_resource_limits.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_resource_limits.go @@ -16,49 +16,49 @@ import ( // ResourceLimits struct for ResourceLimits type ResourceLimits struct { - // The maximum number of cores per server. + // The maximum number of CPU cores per server. CoresPerServer *int32 `json:"coresPerServer"` - // The maximum number of cores per contract. + // The maximum number of CPU cores per contract. CoresPerContract *int32 `json:"coresPerContract"` - // The number of cores provisioned. + // The number of CPU cores provisioned. CoresProvisioned *int32 `json:"coresProvisioned"` - // The maximum RAM per server. + // The maximum amount of RAM (in MB) that can be provisioned for a particular server under this contract. RamPerServer *int32 `json:"ramPerServer"` - // The maximum RAM per contract. + // The maximum amount of RAM (in MB) that can be provisioned under this contract. RamPerContract *int32 `json:"ramPerContract"` - // RAM provisioned. + // The amount of RAM (in MB) provisioned under this contract. RamProvisioned *int32 `json:"ramProvisioned"` - // HDD limit per volume. + // The maximum size (in MB) of an idividual hard disk volume. HddLimitPerVolume *int64 `json:"hddLimitPerVolume"` - // HDD limit per contract. + // The maximum amount of disk space (in MB) that can be provided under this contract. HddLimitPerContract *int64 `json:"hddLimitPerContract"` - // HDD volume provisioned. + // The amount of hard disk space (in MB) that is currently provisioned. HddVolumeProvisioned *int64 `json:"hddVolumeProvisioned"` - // SSD limit per volume. + // The maximum size (in MB) of an individual solid state disk volume. SsdLimitPerVolume *int64 `json:"ssdLimitPerVolume"` - // SSD limit per contract. + // The maximum amount of solid state disk space (in MB) that can be provisioned under this contract. SsdLimitPerContract *int64 `json:"ssdLimitPerContract"` - // SSD volume provisioned. + // The amount of solid state disk space (in MB) that is currently provisioned. SsdVolumeProvisioned *int64 `json:"ssdVolumeProvisioned"` - // DAS (Direct Attached Storage) volume provisioned. + // The amount of DAS disk space (in MB) in a Cube server that is currently provisioned. DasVolumeProvisioned *int64 `json:"dasVolumeProvisioned"` - // Total reservable IP limit for the customer. + // The maximum number of static public IP addresses that can be reserved by this customer across contracts. ReservableIps *int32 `json:"reservableIps"` - // Reserved ips for the contract. + // The maximum number of static public IP addresses that can be reserved for this contract. ReservedIpsOnContract *int32 `json:"reservedIpsOnContract"` - // Reserved ips in use. + // The number of static public IP addresses in use. ReservedIpsInUse *int32 `json:"reservedIpsInUse"` - // K8s clusters total limit. + // The maximum number of Kubernetes clusters that can be created under this contract. K8sClusterLimitTotal *int32 `json:"k8sClusterLimitTotal"` - // K8s clusters provisioned. + // The amount of Kubernetes clusters that is currently provisioned. K8sClustersProvisioned *int32 `json:"k8sClustersProvisioned"` - // NLB total limit. + // The NLB total limit. NlbLimitTotal *int32 `json:"nlbLimitTotal"` - // NLBs provisioned. + // The NLBs provisioned. NlbProvisioned *int32 `json:"nlbProvisioned"` - // NAT Gateway total limit. + // The NAT Gateway total limit. NatGatewayLimitTotal *int32 `json:"natGatewayLimitTotal"` - // NAT Gateways provisioned. + // The NAT Gateways provisioned. NatGatewayProvisioned *int32 `json:"natGatewayProvisioned"` } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_s3_bucket.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_s3_bucket.go index cf175f538..bede84667 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_s3_bucket.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_s3_bucket.go @@ -16,7 +16,7 @@ import ( // S3Bucket struct for S3Bucket type S3Bucket struct { - // Name of the S3 bucket + // The name of the S3 bucket. Name *string `json:"name"` } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_server_properties.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_server_properties.go index 7c5940e40..4f292cde9 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_server_properties.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_server_properties.go @@ -20,21 +20,19 @@ type ServerProperties struct { TemplateUuid *string `json:"templateUuid,omitempty"` // The name of the resource. Name *string `json:"name,omitempty"` - // The total number of cores for the server. - Cores *int32 `json:"cores"` - // The memory size for the server in MB, such as 2048. Size must be specified in multiples of 256 MB with a minimum of 256 MB; however, if you set ramHotPlug to TRUE then you must use a minimum of 1024 MB. If you set the RAM size more than 240GB, then ramHotPlug will be set to FALSE and can not be set to TRUE unless RAM size not set to less than 240GB. - Ram *int32 `json:"ram"` - // The placement group ID that belongs to this server; Requires system privileges - PlacementGroupId *string `json:"placementGroupId,omitempty"` + // The total number of cores for the enterprise server. + Cores *int32 `json:"cores,omitempty"` + // The memory size for the enterprise server in MB, such as 2048. Size must be specified in multiples of 256 MB with a minimum of 256 MB; however, if you set ramHotPlug to TRUE then you must use a minimum of 1024 MB. If you set the RAM size more than 240GB, then ramHotPlug will be set to FALSE and can not be set to TRUE unless RAM size not set to less than 240GB. + Ram *int32 `json:"ram,omitempty"` // The availability zone in which the server should be provisioned. AvailabilityZone *string `json:"availabilityZone,omitempty"` // Status of the virtual machine. VmState *string `json:"vmState,omitempty"` BootCdrom *ResourceReference `json:"bootCdrom,omitempty"` BootVolume *ResourceReference `json:"bootVolume,omitempty"` - // CPU architecture on which server gets provisioned; not all CPU architectures are available in all datacenter regions; available CPU architectures can be retrieved from the datacenter resource. + // CPU architecture on which server gets provisioned; not all CPU architectures are available in all datacenter regions; available CPU architectures can be retrieved from the datacenter resource; must not be provided for CUBE servers. CpuFamily *string `json:"cpuFamily,omitempty"` - // server usages: ENTERPRISE or CUBE + // Server type. Type *string `json:"type,omitempty"` } @@ -42,12 +40,9 @@ type ServerProperties struct { // This constructor will assign default values to properties that have it defined, // and makes sure properties required by API are set, but the set of arguments // will change when the set of required properties is changed -func NewServerProperties(cores int32, ram int32) *ServerProperties { +func NewServerProperties() *ServerProperties { this := ServerProperties{} - this.Cores = &cores - this.Ram = &ram - return &this } @@ -211,44 +206,6 @@ func (o *ServerProperties) HasRam() bool { return false } -// GetPlacementGroupId returns the PlacementGroupId field value -// If the value is explicit nil, the zero value for string will be returned -func (o *ServerProperties) GetPlacementGroupId() *string { - if o == nil { - return nil - } - - return o.PlacementGroupId - -} - -// GetPlacementGroupIdOk returns a tuple with the PlacementGroupId field value -// and a boolean to check if the value has been set. -// NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *ServerProperties) GetPlacementGroupIdOk() (*string, bool) { - if o == nil { - return nil, false - } - - return o.PlacementGroupId, true -} - -// SetPlacementGroupId sets field value -func (o *ServerProperties) SetPlacementGroupId(v string) { - - o.PlacementGroupId = &v - -} - -// HasPlacementGroupId returns a boolean if a field has been set. -func (o *ServerProperties) HasPlacementGroupId() bool { - if o != nil && o.PlacementGroupId != nil { - return true - } - - return false -} - // GetAvailabilityZone returns the AvailabilityZone field value // If the value is explicit nil, the zero value for string will be returned func (o *ServerProperties) GetAvailabilityZone() *string { @@ -491,9 +448,6 @@ func (o ServerProperties) MarshalJSON() ([]byte, error) { if o.Ram != nil { toSerialize["ram"] = o.Ram } - if o.PlacementGroupId != nil { - toSerialize["placementGroupId"] = o.PlacementGroupId - } if o.AvailabilityZone != nil { toSerialize["availabilityZone"] = o.AvailabilityZone } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_target_group.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_target_group.go index e6e53ca9f..95d71efea 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_target_group.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_target_group.go @@ -20,7 +20,7 @@ type TargetGroup struct { Id *string `json:"id,omitempty"` // The type of object that has been created. Type *Type `json:"type,omitempty"` - // URL to the object representation (absolute path). + // The URL to the object representation (absolute path). Href *string `json:"href,omitempty"` Metadata *DatacenterElementMetadata `json:"metadata,omitempty"` Properties *TargetGroupProperties `json:"properties"` diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_target_group_health_check.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_target_group_health_check.go index 9f55b7d74..5c8dfdfc6 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_target_group_health_check.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_target_group_health_check.go @@ -16,11 +16,11 @@ import ( // TargetGroupHealthCheck struct for TargetGroupHealthCheck type TargetGroupHealthCheck struct { - // The maximum time in milliseconds to wait for a target to respond to a check. For target VMs with 'Check Interval' set, the lesser of the two values is used once the TCP connection is established. + // The maximum time in milliseconds is to wait for a target to respond to a check. For target VMs with a 'Check Interval' set, the smaller of the two values is used once the TCP connection is established. CheckTimeout *int32 `json:"checkTimeout,omitempty"` - // The interval in milliseconds between consecutive health checks; default is 2000. + // The interval in milliseconds between consecutive health checks; the default value is '2000'. CheckInterval *int32 `json:"checkInterval,omitempty"` - // The maximum number of attempts to reconnect to a target after a connection failure. Valid range is 0 to 65535, and default is three reconnection attempts. + // The maximum number of attempts to reconnect to a target after a connection failure. The valid range is '0 to 65535'; the default value is '3'. Retries *int32 `json:"retries,omitempty"` } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_target_group_http_health_check.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_target_group_http_health_check.go index ca2dd8f3e..274f7b911 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_target_group_http_health_check.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_target_group_http_health_check.go @@ -16,17 +16,17 @@ import ( // TargetGroupHttpHealthCheck struct for TargetGroupHttpHealthCheck type TargetGroupHttpHealthCheck struct { - // The path (destination URL) for the HTTP health check request; the default is /. + // The destination URL for HTTP the health check; the default is '/'. Path *string `json:"path,omitempty"` - // The method for the HTTP health check. + // The method used for the health check request. Method *string `json:"method,omitempty"` - // + // Specify the target's response type to match ALB's request. MatchType *string `json:"matchType"` - // The response returned by the request, depending on the match type. + // The response returned by the request. It can be a status code or a response body depending on the definition of 'matchType'. Response *string `json:"response"` - // + // Specifies whether to use a regular expression to parse the response body; the default value is 'FALSE'. By using regular expressions, you can flexibly customize the expected response from a healthy server. Regex *bool `json:"regex,omitempty"` - // + // Specifies whether to negate an individual entry; the default value is 'FALSE'. Negate *bool `json:"negate,omitempty"` } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_target_group_properties.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_target_group_properties.go index e5d19e596..3a79b8bc7 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_target_group_properties.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_target_group_properties.go @@ -16,11 +16,11 @@ import ( // TargetGroupProperties struct for TargetGroupProperties type TargetGroupProperties struct { - // The name of the target group. + // The target group name. Name *string `json:"name"` - // Balancing algorithm + // The balancing algorithm. A balancing algorithm consists of predefined rules with the logic that a load balancer uses to distribute network traffic between servers. - **Round Robin**: Targets are served alternately according to their weighting. - **Least Connection**: The target with the least active connection is served. - **Random**: The targets are served based on a consistent pseudorandom algorithm. - **Source IP**: It is ensured that the same client IP address reaches the same target. Algorithm *string `json:"algorithm"` - // Balancing protocol + // The forwarding protocol. Only the value 'HTTP' is allowed. Protocol *string `json:"protocol"` // Array of items in the collection. Targets *[]TargetGroupTarget `json:"targets,omitempty"` diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_target_group_put.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_target_group_put.go index a6221b157..e5f5afd56 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_target_group_put.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_target_group_put.go @@ -20,7 +20,7 @@ type TargetGroupPut struct { Id *string `json:"id,omitempty"` // The type of object that has been created. Type *Type `json:"type,omitempty"` - // URL to the object representation (absolute path). + // The URL to the object representation (absolute path). Href *string `json:"href,omitempty"` Properties *TargetGroupProperties `json:"properties"` } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_target_group_target.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_target_group_target.go index 58f95f132..bad8c42d5 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_target_group_target.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_target_group_target.go @@ -16,11 +16,11 @@ import ( // TargetGroupTarget struct for TargetGroupTarget type TargetGroupTarget struct { - // The IP of the balanced target VM. + // The IP address of the balanced target. Ip *string `json:"ip"` - // The port of the balanced target service; valid range is 1 to 65535. + // The port of the balanced target service; the valid range is 1 to 65535. Port *int32 `json:"port"` - // Traffic is distributed in proportion to target weight, relative to the combined weight of all targets. A target with higher weight receives a greater share of traffic. Valid range is 0 to 256 and default is 1; targets with weight of 0 do not participate in load balancing but still accept persistent connections. It is best use values in the middle of the range to leave room for later adjustments. + // The traffic is distributed proportionally to target weight, which is the ratio of the total weight of all targets. A target with higher weight receives a larger share of traffic. The valid range is from 0 to 256; the default value is '1'. Targets with a weight of '0' do not participate in load balancing but still accept persistent connections. We recommend using values in the middle range to leave room for later adjustments. Weight *int32 `json:"weight"` // When the health check is enabled, the target is available only when it accepts regular TCP or HTTP connection attempts for state checking. The state check consists of one connection attempt with the target's address and port. The default value is 'TRUE'. HealthCheckEnabled *bool `json:"healthCheckEnabled,omitempty"` diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_target_groups.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_target_groups.go index 8daf0d5cf..ca5778f54 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_target_groups.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_target_groups.go @@ -20,7 +20,7 @@ type TargetGroups struct { Id *string `json:"id,omitempty"` // The type of object that has been created. Type *Type `json:"type,omitempty"` - // URL to the object representation (absolute path). + // The URL to the object representation (absolute path). Href *string `json:"href,omitempty"` // Array of items in the collection. Items *[]TargetGroup `json:"items,omitempty"` diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_template.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_template.go index 709587e91..48156f106 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_template.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_template.go @@ -20,7 +20,7 @@ type Template struct { Id *string `json:"id,omitempty"` // The type of object that has been created. Type *Type `json:"type,omitempty"` - // URL to the object representation (absolute path). + // The URL to the object representation (absolute path). Href *string `json:"href,omitempty"` Metadata *DatacenterElementMetadata `json:"metadata,omitempty"` Properties *TemplateProperties `json:"properties"` diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_template_properties.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_template_properties.go index 4784c6102..5d661eee2 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_template_properties.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_template_properties.go @@ -16,7 +16,7 @@ import ( // TemplateProperties struct for TemplateProperties type TemplateProperties struct { - // The name of the resource. + // The resource name. Name *string `json:"name"` // The CPU cores count. Cores *float32 `json:"cores"` diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_templates.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_templates.go index d849d16c2..1d6e9640c 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_templates.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_templates.go @@ -20,7 +20,7 @@ type Templates struct { Id *string `json:"id,omitempty"` // The type of object that has been created. Type *Type `json:"type,omitempty"` - // URL to the object representation (absolute path). + // The URL to the object representation (absolute path). Href *string `json:"href,omitempty"` // Array of items in the collection. Items *[]Template `json:"items,omitempty"` diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_volume.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_volume.go index 52f4eca7a..d0b7b862e 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_volume.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_volume.go @@ -20,7 +20,7 @@ type Volume struct { Id *string `json:"id,omitempty"` // The type of object that has been created. Type *Type `json:"type,omitempty"` - // URL to the object representation (absolute path). + // The URL to the object representation (absolute path). Href *string `json:"href,omitempty"` Metadata *DatacenterElementMetadata `json:"metadata,omitempty"` Properties *VolumeProperties `json:"properties"` diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/model_volume_properties.go b/vendor/github.com/ionos-cloud/sdk-go/v6/model_volume_properties.go index e5ffe9c9d..15ed6a148 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/model_volume_properties.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/model_volume_properties.go @@ -69,7 +69,7 @@ func NewVolumeProperties(size float32) *VolumeProperties { this := VolumeProperties{} this.Size = &size - var bootOrder string = "AUTO" + var bootOrder = "AUTO" this.BootOrder = &bootOrder return &this @@ -80,7 +80,7 @@ func NewVolumeProperties(size float32) *VolumeProperties { // but it doesn't guarantee that properties required by API are set func NewVolumePropertiesWithDefaults() *VolumeProperties { this := VolumeProperties{} - var bootOrder string = "AUTO" + var bootOrder = "AUTO" this.BootOrder = &bootOrder return &this } @@ -986,9 +986,7 @@ func (o VolumeProperties) MarshalJSON() ([]byte, error) { if o.BootServer != nil { toSerialize["bootServer"] = o.BootServer } - if o.BootOrder != nil { - toSerialize["bootOrder"] = o.BootOrder - } + toSerialize["bootOrder"] = o.BootOrder return json.Marshal(toSerialize) } diff --git a/vendor/github.com/ionos-cloud/sdk-go/v6/utils.go b/vendor/github.com/ionos-cloud/sdk-go/v6/utils.go index aa9652cd5..ee096c731 100644 --- a/vendor/github.com/ionos-cloud/sdk-go/v6/utils.go +++ b/vendor/github.com/ionos-cloud/sdk-go/v6/utils.go @@ -12,10 +12,34 @@ package ionoscloud import ( "encoding/json" + "reflect" "strings" "time" ) +// ToPtr - returns a pointer to the given value. +func ToPtr[T any](v T) *T { + return &v +} + +// ToValue - returns the value of the pointer passed in +func ToValue[T any](ptr *T) T { + return *ptr +} + +// ToValueDefault - returns the value of the pointer passed in, or the default type value if the pointer is nil +func ToValueDefault[T any](ptr *T) T { + var defaultVal T + if ptr == nil { + return defaultVal + } + return *ptr +} + +func SliceToValueDefault[T any](ptrSlice *[]T) []T { + return append([]T{}, *ptrSlice...) +} + // PtrBool - returns a pointer to given boolean value. func PtrBool(v bool) *bool { return &v } @@ -742,3 +766,17 @@ func (t *IonosTime) UnmarshalJSON(data []byte) error { *t = IonosTime{tt} return nil } + +// IsNil checks if an input is nil +func IsNil(i interface{}) bool { + if i == nil { + return true + } + switch reflect.TypeOf(i).Kind() { + case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.UnsafePointer, reflect.Interface, reflect.Slice: + return reflect.ValueOf(i).IsNil() + case reflect.Array: + return reflect.ValueOf(i).IsZero() + } + return false +} diff --git a/vendor/github.com/linode/linodego/README.md b/vendor/github.com/linode/linodego/README.md index 550cc9ef5..cf73b4dfd 100644 --- a/vendor/github.com/linode/linodego/README.md +++ b/vendor/github.com/linode/linodego/README.md @@ -1,10 +1,9 @@ # linodego -![Build](https://img.shields.io/github/workflow/status/linode/linodego/Testing/main?label=tests) +![Tests](https://img.shields.io/github/actions/workflow/status/linode/linodego/test.yml?branch=main) [![Release](https://img.shields.io/github/v/release/linode/linodego)](https://github.com/linode/linodego/releases/latest) [![GoDoc](https://godoc.org/github.com/linode/linodego?status.svg)](https://godoc.org/github.com/linode/linodego) [![Go Report Card](https://goreportcard.com/badge/github.com/linode/linodego)](https://goreportcard.com/report/github.com/linode/linodego) -[![codecov](https://codecov.io/gh/linode/linodego/branch/main/graph/badge.svg)](https://codecov.io/gh/linode/linodego) Go client for [Linode REST v4 API](https://developers.linode.com/api/v4) diff --git a/vendor/github.com/linode/linodego/account.go b/vendor/github.com/linode/linodego/account.go index f6cca054e..03d3297fd 100644 --- a/vendor/github.com/linode/linodego/account.go +++ b/vendor/github.com/linode/linodego/account.go @@ -19,6 +19,7 @@ type Account struct { TaxID string `json:"tax_id"` Phone string `json:"phone"` CreditCard *CreditCard `json:"credit_card"` + EUUID string `json:"euuid"` } // CreditCard information associated with the Account. diff --git a/vendor/github.com/linode/linodego/account_logins.go b/vendor/github.com/linode/linodego/account_logins.go index 3fbf3cfb2..9b5f69a99 100644 --- a/vendor/github.com/linode/linodego/account_logins.go +++ b/vendor/github.com/linode/linodego/account_logins.go @@ -16,6 +16,7 @@ type Login struct { IP string `json:"ip"` Restricted bool `json:"restricted"` Username string `json:"username"` + Status string `json:"status"` } type LoginsPagedResponse struct { diff --git a/vendor/github.com/linode/linodego/account_payments.go b/vendor/github.com/linode/linodego/account_payments.go index 7b941fabf..a2fe17b64 100644 --- a/vendor/github.com/linode/linodego/account_payments.go +++ b/vendor/github.com/linode/linodego/account_payments.go @@ -16,7 +16,7 @@ type Payment struct { ID int `json:"id"` // The amount, in US dollars, of the Payment. - USD json.Number `json:"usd,Number"` + USD json.Number `json:"usd"` // When the Payment was made. Date *time.Time `json:"-"` @@ -28,7 +28,7 @@ type PaymentCreateOptions struct { CVV string `json:"cvv,omitempty"` // The amount, in US dollars, of the Payment - USD json.Number `json:"usd,Number"` + USD json.Number `json:"usd"` } // UnmarshalJSON implements the json.Unmarshaler interface diff --git a/vendor/github.com/linode/linodego/client.go b/vendor/github.com/linode/linodego/client.go index 1a6577778..bb992ea12 100644 --- a/vendor/github.com/linode/linodego/client.go +++ b/vendor/github.com/linode/linodego/client.go @@ -294,7 +294,7 @@ func (c *Client) InvalidateCache() { func (c *Client) InvalidateCacheEndpoint(endpoint string) error { u, err := url.Parse(endpoint) if err != nil { - return fmt.Errorf("failed to parse URL for caching: %s", err) + return fmt.Errorf("failed to parse URL for caching: %w", err) } c.cachedEntryLock.Lock() @@ -439,7 +439,7 @@ func NewClientFromEnv(hc *http.Client) (*Client, error) { // We should only load the config if the config file exists if _, err := os.Stat(configPath); err != nil { - return nil, fmt.Errorf("error loading config file %s: %s", configPath, err) + return nil, fmt.Errorf("error loading config file %s: %w", configPath, err) } err = client.preLoadConfig(configPath) diff --git a/vendor/github.com/linode/linodego/config.go b/vendor/github.com/linode/linodego/config.go index db36abc0c..0d0b2f507 100644 --- a/vendor/github.com/linode/linodego/config.go +++ b/vendor/github.com/linode/linodego/config.go @@ -65,7 +65,7 @@ func (c *Client) LoadConfig(options *LoadConfigOptions) error { if cfg.HasSection("default") { err := cfg.Section("default").MapTo(&defaultConfig) if err != nil { - return fmt.Errorf("failed to map default profile: %s", err) + return fmt.Errorf("failed to map default profile: %w", err) } } @@ -76,7 +76,7 @@ func (c *Client) LoadConfig(options *LoadConfigOptions) error { f := defaultConfig if err := profile.MapTo(&f); err != nil { - return fmt.Errorf("failed to map values: %s", err) + return fmt.Errorf("failed to map values: %w", err) } result[name] = f @@ -86,7 +86,7 @@ func (c *Client) LoadConfig(options *LoadConfigOptions) error { if !options.SkipLoadProfile { if err := c.UseProfile(profileOption); err != nil { - return fmt.Errorf("unable to use profile %s: %s", profileOption, err) + return fmt.Errorf("unable to use profile %s: %w", profileOption, err) } } diff --git a/vendor/github.com/linode/linodego/go.work.sum b/vendor/github.com/linode/linodego/go.work.sum index 5058f43d8..ea20c135a 100644 --- a/vendor/github.com/linode/linodego/go.work.sum +++ b/vendor/github.com/linode/linodego/go.work.sum @@ -34,14 +34,28 @@ github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1Cpa github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b h1:PxfKdU9lEEDYjdIzOtC4qFWgkU2rGHdKlKowJSMN9h0= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= k8s.io/api v0.18.3/go.mod h1:UOaMwERbqJMfeeeHc8XJKawj4P9TgDRnViIqqBeH2QA= k8s.io/apimachinery v0.18.3/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko= k8s.io/client-go v0.18.3/go.mod h1:4a/dpQEvzAhT1BbuWW09qvIaGw6Gbu1gZYiQZIi1DMw= diff --git a/vendor/github.com/linode/linodego/lke_clusters.go b/vendor/github.com/linode/linodego/lke_clusters.go index 6e9570d5b..f764c7c15 100644 --- a/vendor/github.com/linode/linodego/lke_clusters.go +++ b/vendor/github.com/linode/linodego/lke_clusters.go @@ -75,6 +75,12 @@ type LKEVersion struct { ID string `json:"id"` } +// LKEClusterRegenerateOptions fields are those accepted by RegenerateLKECluster +type LKEClusterRegenerateOptions struct { + KubeConfig bool `json:"kubeconfig"` + ServiceToken bool `json:"servicetoken"` +} + // UnmarshalJSON implements the json.Unmarshaler interface func (i *LKECluster) UnmarshalJSON(b []byte) error { type Mask LKECluster @@ -322,3 +328,26 @@ func (c *Client) RecycleLKEClusterNodes(ctx context.Context, clusterID int) erro _, err := coupleAPIErrors(c.R(ctx).Post(e)) return err } + +// RegenerateLKECluster regenerates the Kubeconfig file and/or the service account token for the specified LKE Cluster. +func (c *Client) RegenerateLKECluster(ctx context.Context, clusterID int, opts LKEClusterRegenerateOptions) (*LKECluster, error) { + body, err := json.Marshal(opts) + if err != nil { + return nil, err + } + + e := fmt.Sprintf("lke/clusters/%d/regenerate", clusterID) + req := c.R(ctx).SetResult(&LKECluster{}).SetBody(string(body)) + r, err := coupleAPIErrors(req.Post(e)) + if err != nil { + return nil, err + } + return r.Result().(*LKECluster), nil +} + +// DeleteLKEClusterServiceToken deletes and regenerate the service account token for a Cluster. +func (c *Client) DeleteLKEClusterServiceToken(ctx context.Context, clusterID int) error { + e := fmt.Sprintf("lke/clusters/%d/servicetoken", clusterID) + _, err := coupleAPIErrors(c.R(ctx).Delete(e)) + return err +} diff --git a/vendor/github.com/linode/linodego/object_storage_buckets.go b/vendor/github.com/linode/linodego/object_storage_buckets.go index 4d640b457..9567eac66 100644 --- a/vendor/github.com/linode/linodego/object_storage_buckets.go +++ b/vendor/github.com/linode/linodego/object_storage_buckets.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "fmt" + "net/url" "time" "github.com/go-resty/resty/v2" @@ -17,6 +18,8 @@ type ObjectStorageBucket struct { Created *time.Time `json:"-"` Hostname string `json:"hostname"` + Objects int `json:"objects"` + Size int `json:"size"` } // ObjectStorageBucketAccess holds Object Storage access info @@ -78,8 +81,12 @@ type ObjectStorageBucketsPagedResponse struct { } // endpoint gets the endpoint URL for ObjectStorageBucket -func (ObjectStorageBucketsPagedResponse) endpoint(_ ...any) string { - return "object-storage/buckets" +func (ObjectStorageBucketsPagedResponse) endpoint(args ...any) string { + endpoint := "object-storage/buckets" + if len(args) > 0 { + endpoint = fmt.Sprintf(endpoint+"/%s", args[0]) + } + return endpoint } func (resp *ObjectStorageBucketsPagedResponse) castResult(r *resty.Request, e string) (int, int, error) { @@ -102,8 +109,19 @@ func (c *Client) ListObjectStorageBuckets(ctx context.Context, opts *ListOptions return response.Data, nil } +// ListObjectStorageBucketsInCluster lists all ObjectStorageBuckets of a cluster +func (c *Client) ListObjectStorageBucketsInCluster(ctx context.Context, opts *ListOptions, clusterID string) ([]ObjectStorageBucket, error) { + response := ObjectStorageBucketsPagedResponse{} + err := c.listHelper(ctx, &response, opts, clusterID) + if err != nil { + return nil, err + } + return response.Data, nil +} + // GetObjectStorageBucket gets the ObjectStorageBucket with the provided label func (c *Client) GetObjectStorageBucket(ctx context.Context, clusterID, label string) (*ObjectStorageBucket, error) { + label = url.PathEscape(label) e := fmt.Sprintf("object-storage/buckets/%s/%s", clusterID, label) req := c.R(ctx).SetResult(&ObjectStorageBucket{}) r, err := coupleAPIErrors(req.Get(e)) @@ -131,6 +149,7 @@ func (c *Client) CreateObjectStorageBucket(ctx context.Context, opts ObjectStora // GetObjectStorageBucketAccess gets the current access config for a bucket func (c *Client) GetObjectStorageBucketAccess(ctx context.Context, clusterID, label string) (*ObjectStorageBucketAccess, error) { + label = url.PathEscape(label) e := fmt.Sprintf("object-storage/buckets/%s/%s/access", clusterID, label) req := c.R(ctx).SetResult(&ObjectStorageBucketAccess{}) r, err := coupleAPIErrors(req.Get(e)) @@ -148,6 +167,7 @@ func (c *Client) UpdateObjectStorageBucketAccess(ctx context.Context, clusterID, return err } + label = url.PathEscape(label) e := fmt.Sprintf("object-storage/buckets/%s/%s/access", clusterID, label) _, err = coupleAPIErrors(c.R(ctx).SetBody(string(body)).Post(e)) if err != nil { @@ -159,6 +179,7 @@ func (c *Client) UpdateObjectStorageBucketAccess(ctx context.Context, clusterID, // DeleteObjectStorageBucket deletes the ObjectStorageBucket with the specified label func (c *Client) DeleteObjectStorageBucket(ctx context.Context, clusterID, label string) error { + label = url.PathEscape(label) e := fmt.Sprintf("object-storage/buckets/%s/%s", clusterID, label) _, err := coupleAPIErrors(c.R(ctx).Delete(e)) return err diff --git a/vendor/github.com/linode/linodego/object_storage_object.go b/vendor/github.com/linode/linodego/object_storage_object.go index 612d4c8d3..f18f03688 100644 --- a/vendor/github.com/linode/linodego/object_storage_object.go +++ b/vendor/github.com/linode/linodego/object_storage_object.go @@ -4,14 +4,15 @@ import ( "context" "encoding/json" "fmt" + "net/url" ) type ObjectStorageObjectURLCreateOptions struct { Name string `json:"name"` Method string `json:"method"` - ContentType string `json:"content_type,omit_empty"` - ContentDisposition string `json:"content_disposition,omit_empty"` - ExpiresIn *int `json:"expires_in,omit_empty"` + ContentType string `json:"content_type,omitempty"` + ContentDisposition string `json:"content_disposition,omitempty"` + ExpiresIn *int `json:"expires_in,omitempty"` } type ObjectStorageObjectURL struct { @@ -35,6 +36,7 @@ func (c *Client) CreateObjectStorageObjectURL(ctx context.Context, objectID, lab return nil, err } + label = url.PathEscape(label) e := fmt.Sprintf("object-storage/buckets/%s/%s/object-url", objectID, label) req := c.R(ctx).SetResult(&ObjectStorageObjectURL{}).SetBody(string(body)) r, err := coupleAPIErrors(req.Post(e)) @@ -42,6 +44,7 @@ func (c *Client) CreateObjectStorageObjectURL(ctx context.Context, objectID, lab } func (c *Client) GetObjectStorageObjectACLConfig(ctx context.Context, objectID, label, object string) (*ObjectStorageObjectACLConfig, error) { + label = url.PathEscape(label) e := fmt.Sprintf("object-storage/buckets/%s/%s/object-acl?name=%s", objectID, label, object) req := c.R(ctx).SetResult(&ObjectStorageObjectACLConfig{}) r, err := coupleAPIErrors(req.Get(e)) @@ -54,6 +57,7 @@ func (c *Client) UpdateObjectStorageObjectACLConfig(ctx context.Context, objectI return nil, err } + label = url.PathEscape(label) e := fmt.Sprintf("object-storage/buckets/%s/%s/object-acl", objectID, label) req := c.R(ctx).SetResult(&ObjectStorageObjectACLConfig{}).SetBody(string(body)) r, err := coupleAPIErrors(req.Put(e)) diff --git a/vendor/github.com/linode/linodego/pagination.go b/vendor/github.com/linode/linodego/pagination.go index 7cc5c4277..2de2cfe32 100644 --- a/vendor/github.com/linode/linodego/pagination.go +++ b/vendor/github.com/linode/linodego/pagination.go @@ -39,7 +39,7 @@ func NewListOptions(page int, filter string) *ListOptions { func (l ListOptions) Hash() (string, error) { data, err := json.Marshal(l) if err != nil { - return "", fmt.Errorf("failed to cache ListOptions: %s", err) + return "", fmt.Errorf("failed to cache ListOptions: %w", err) } h := sha256.New() diff --git a/vendor/github.com/linode/linodego/profile_logins.go b/vendor/github.com/linode/linodego/profile_logins.go new file mode 100644 index 000000000..ce5d87bde --- /dev/null +++ b/vendor/github.com/linode/linodego/profile_logins.go @@ -0,0 +1,83 @@ +package linodego + +import ( + "context" + "encoding/json" + "fmt" + "time" + + "github.com/go-resty/resty/v2" + "github.com/linode/linodego/internal/parseabletime" +) + +// Profile represents a Profile object +type ProfileLogin struct { + Datetime *time.Time `json:"datetime"` + ID int `json:"id"` + IP string `json:"ip"` + Restricted bool `json:"restricted"` + Status string `json:"status"` + Username string `json:"username"` +} + +type ProfileLoginsPagedResponse struct { + *PageOptions + Data []ProfileLogin `json:"data"` +} + +func (ProfileLoginsPagedResponse) endpoint(_ ...any) string { + return "profile/logins" +} + +func (resp *ProfileLoginsPagedResponse) castResult(r *resty.Request, e string) (int, int, error) { + res, err := coupleAPIErrors(r.SetResult(ProfileLoginsPagedResponse{}).Get(e)) + if err != nil { + return 0, 0, err + } + castedRes := res.Result().(*ProfileLoginsPagedResponse) + resp.Data = append(resp.Data, castedRes.Data...) + return castedRes.Pages, castedRes.Results, nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface +func (i *ProfileLogin) UnmarshalJSON(b []byte) error { + type Mask ProfileLogin + + l := struct { + *Mask + Datetime *parseabletime.ParseableTime `json:"datetime"` + }{ + Mask: (*Mask)(i), + } + + if err := json.Unmarshal(b, &l); err != nil { + return err + } + + i.Datetime = (*time.Time)(l.Datetime) + + return nil +} + +// GetProfileLogin returns the Profile Login of the authenticated user +func (c *Client) GetProfileLogin(ctx context.Context, id int) (*ProfileLogin, error) { + e := fmt.Sprintf("profile/logins/%d", id) + + req := c.R(ctx).SetResult(&ProfileLogin{}) + r, err := coupleAPIErrors(req.Get(e)) + if err != nil { + return nil, err + } + return r.Result().(*ProfileLogin), nil +} + +// ListProfileLogins lists Profile Logins of the authenticated user +func (c *Client) ListProfileLogins(ctx context.Context, opts *ListOptions) ([]ProfileLogin, error) { + response := ProfileLoginsPagedResponse{} + err := c.listHelper(ctx, &response, opts) + if err != nil { + return nil, err + } + + return response.Data, nil +} diff --git a/vendor/github.com/linode/linodego/tags.go b/vendor/github.com/linode/linodego/tags.go index 53a7dff8f..396708e2f 100644 --- a/vendor/github.com/linode/linodego/tags.go +++ b/vendor/github.com/linode/linodego/tags.go @@ -5,6 +5,7 @@ import ( "encoding/json" "errors" "fmt" + "net/url" "github.com/go-resty/resty/v2" ) @@ -147,6 +148,7 @@ func (i *TaggedObject) fixData() (*TaggedObject, error) { // ListTaggedObjects lists Tagged Objects func (c *Client) ListTaggedObjects(ctx context.Context, label string, opts *ListOptions) (TaggedObjectList, error) { response := TaggedObjectsPagedResponse{} + label = url.PathEscape(label) err := c.listHelper(ctx, &response, opts, label) if err != nil { return nil, err @@ -219,6 +221,7 @@ func (c *Client) CreateTag(ctx context.Context, opts TagCreateOptions) (*Tag, er // DeleteTag deletes the Tag with the specified id func (c *Client) DeleteTag(ctx context.Context, label string) error { + label = url.PathEscape(label) e := fmt.Sprintf("tags/%s", label) _, err := coupleAPIErrors(c.R(ctx).Delete(e)) return err diff --git a/vendor/github.com/linode/linodego/vlans.go b/vendor/github.com/linode/linodego/vlans.go index 30db80db0..7f6433b0d 100644 --- a/vendor/github.com/linode/linodego/vlans.go +++ b/vendor/github.com/linode/linodego/vlans.go @@ -74,12 +74,12 @@ func (c *Client) GetVLANIPAMAddress(ctx context.Context, linodeID int, vlanLabel f.AddField(Eq, "interfaces", vlanLabel) vlanFilter, err := f.MarshalJSON() if err != nil { - return "", fmt.Errorf("Unable to convert VLAN label: %s to a filterable object: %s", vlanLabel, err) + return "", fmt.Errorf("Unable to convert VLAN label: %s to a filterable object: %w", vlanLabel, err) } cfgs, err := c.ListInstanceConfigs(ctx, linodeID, &ListOptions{Filter: string(vlanFilter)}) if err != nil { - return "", fmt.Errorf("Fetching configs for instance %v failed: %s", linodeID, err) + return "", fmt.Errorf("Fetching configs for instance %v failed: %w", linodeID, err) } interfaces := cfgs[0].Interfaces diff --git a/vendor/github.com/linode/linodego/waitfor.go b/vendor/github.com/linode/linodego/waitfor.go index c6ca1e2b6..60fbf9ec3 100644 --- a/vendor/github.com/linode/linodego/waitfor.go +++ b/vendor/github.com/linode/linodego/waitfor.go @@ -41,7 +41,7 @@ func (client Client) WaitForInstanceStatus(ctx context.Context, instanceID int, return instance, nil } case <-ctx.Done(): - return nil, fmt.Errorf("Error waiting for Instance %d status %s: %s", instanceID, status, ctx.Err()) + return nil, fmt.Errorf("Error waiting for Instance %d status %s: %w", instanceID, status, ctx.Err()) } } } @@ -77,7 +77,7 @@ func (client Client) WaitForInstanceDiskStatus(ctx context.Context, instanceID i } } case <-ctx.Done(): - return nil, fmt.Errorf("Error waiting for Instance %d Disk %d status %s: %s", instanceID, diskID, status, ctx.Err()) + return nil, fmt.Errorf("Error waiting for Instance %d Disk %d status %s: %w", instanceID, diskID, status, ctx.Err()) } } } @@ -104,7 +104,7 @@ func (client Client) WaitForVolumeStatus(ctx context.Context, volumeID int, stat return volume, nil } case <-ctx.Done(): - return nil, fmt.Errorf("Error waiting for Volume %d status %s: %s", volumeID, status, ctx.Err()) + return nil, fmt.Errorf("Error waiting for Volume %d status %s: %w", volumeID, status, ctx.Err()) } } } @@ -131,7 +131,7 @@ func (client Client) WaitForSnapshotStatus(ctx context.Context, instanceID int, return snapshot, nil } case <-ctx.Done(): - return nil, fmt.Errorf("Error waiting for Instance %d Snapshot %d status %s: %s", instanceID, snapshotID, status, ctx.Err()) + return nil, fmt.Errorf("Error waiting for Instance %d Snapshot %d status %s: %w", instanceID, snapshotID, status, ctx.Err()) } } } @@ -164,7 +164,7 @@ func (client Client) WaitForVolumeLinodeID(ctx context.Context, volumeID int, li return volume, nil } case <-ctx.Done(): - return nil, fmt.Errorf("Error waiting for Volume %d to have Instance %v: %s", volumeID, linodeID, ctx.Err()) + return nil, fmt.Errorf("Error waiting for Volume %d to have Instance %v: %w", volumeID, linodeID, ctx.Err()) } } } @@ -191,7 +191,7 @@ func (client Client) WaitForLKEClusterStatus(ctx context.Context, clusterID int, return cluster, nil } case <-ctx.Done(): - return nil, fmt.Errorf("Error waiting for Cluster %d status %s: %s", clusterID, status, ctx.Err()) + return nil, fmt.Errorf("Error waiting for Cluster %d status %s: %w", clusterID, status, ctx.Err()) } } } @@ -234,7 +234,7 @@ func (client Client) WaitForLKEClusterConditions( lkeKubeConfig, err := client.GetLKEClusterKubeconfig(ctx, clusterID) if err != nil { - return fmt.Errorf("failed to get Kubeconfig for LKE cluster %d: %s", clusterID, err) + return fmt.Errorf("failed to get Kubeconfig for LKE cluster %d: %w", clusterID, err) } ticker := time.NewTicker(client.millisecondsPerPoll * time.Millisecond) @@ -260,7 +260,7 @@ func (client Client) WaitForLKEClusterConditions( } case <-ctx.Done(): - return fmt.Errorf("Error waiting for cluster %d conditions: %s", clusterID, ctx.Err()) + return fmt.Errorf("Error waiting for cluster %d conditions: %w", clusterID, ctx.Err()) } } } @@ -291,7 +291,7 @@ func (client Client) WaitForEventFinished(ctx context.Context, id any, entityTyp // All of the filter supported types have int ids filterableEntityID, err := strconv.Atoi(fmt.Sprintf("%v", id)) if err != nil { - return nil, fmt.Errorf("Error parsing Entity ID %q for optimized WaitForEventFinished EventType %q: %s", id, entityType, err) + return nil, fmt.Errorf("Error parsing Entity ID %q for optimized WaitForEventFinished EventType %q: %w", id, entityType, err) } filter.AddField(Eq, "entity.id", filterableEntityID) filter.AddField(Eq, "entity.type", entityType) @@ -397,7 +397,7 @@ func (client Client) WaitForEventFinished(ctx context.Context, id any, entityTyp lastLog = nextLog } case <-ctx.Done(): - return nil, fmt.Errorf("Error waiting for Event Status '%s' of %s %v action '%s': %s", EventFinished, titledEntityType, id, action, ctx.Err()) + return nil, fmt.Errorf("Error waiting for Event Status '%s' of %s %v action '%s': %w", EventFinished, titledEntityType, id, action, ctx.Err()) } } } @@ -424,7 +424,7 @@ func (client Client) WaitForImageStatus(ctx context.Context, imageID string, sta return image, nil } case <-ctx.Done(): - return nil, fmt.Errorf("failed to wait for Image %s status %s: %s", imageID, status, ctx.Err()) + return nil, fmt.Errorf("failed to wait for Image %s status %s: %w", imageID, status, ctx.Err()) } } } @@ -451,7 +451,7 @@ func (client Client) WaitForMySQLDatabaseBackup(ctx context.Context, dbID int, l } } case <-ctx.Done(): - return nil, fmt.Errorf("failed to wait for backup %s: %s", label, ctx.Err()) + return nil, fmt.Errorf("failed to wait for backup %s: %w", label, ctx.Err()) } } } @@ -478,7 +478,7 @@ func (client Client) WaitForMongoDatabaseBackup(ctx context.Context, dbID int, l } } case <-ctx.Done(): - return nil, fmt.Errorf("failed to wait for backup %s: %s", label, ctx.Err()) + return nil, fmt.Errorf("failed to wait for backup %s: %w", label, ctx.Err()) } } } @@ -505,7 +505,7 @@ func (client Client) WaitForPostgresDatabaseBackup(ctx context.Context, dbID int } } case <-ctx.Done(): - return nil, fmt.Errorf("failed to wait for backup %s: %s", label, ctx.Err()) + return nil, fmt.Errorf("failed to wait for backup %s: %w", label, ctx.Err()) } } } @@ -559,14 +559,14 @@ func (client Client) WaitForDatabaseStatus( currentStatus, err := statusHandler(ctx, client, dbID) if err != nil { - return fmt.Errorf("failed to get db status: %s", err) + return fmt.Errorf("failed to get db status: %w", err) } if currentStatus == status { return nil } case <-ctx.Done(): - return fmt.Errorf("failed to wait for database %d status: %s", dbID, ctx.Err()) + return fmt.Errorf("failed to wait for database %d status: %w", dbID, ctx.Err()) } } } @@ -585,7 +585,7 @@ func (client Client) NewEventPoller( } if err := result.PreTask(ctx); err != nil { - return nil, fmt.Errorf("failed to run pretask: %s", err) + return nil, fmt.Errorf("failed to run pretask: %w", err) } return &result, nil @@ -632,7 +632,7 @@ func (p *EventPoller) PreTask(ctx context.Context) error { PageOptions: &PageOptions{Page: 1}, }) if err != nil { - return fmt.Errorf("failed to list events: %s", err) + return fmt.Errorf("failed to list events: %w", err) } eventIDs := make(map[int]bool, len(events)) @@ -672,7 +672,7 @@ func (p *EventPoller) WaitForLatestUnknownEvent(ctx context.Context) (*Event, er case <-ticker.C: events, err := p.client.ListEvents(ctx, &listOpts) if err != nil { - return nil, fmt.Errorf("failed to list events: %s", err) + return nil, fmt.Errorf("failed to list events: %w", err) } for _, event := range events { @@ -685,7 +685,7 @@ func (p *EventPoller) WaitForLatestUnknownEvent(ctx context.Context) (*Event, er } } case <-ctx.Done(): - return nil, fmt.Errorf("failed to wait for event: %s", ctx.Err()) + return nil, fmt.Errorf("failed to wait for event: %w", ctx.Err()) } } } @@ -702,7 +702,7 @@ func (p *EventPoller) WaitForFinished( event, err := p.WaitForLatestUnknownEvent(ctx) if err != nil { - return nil, fmt.Errorf("failed to wait for event: %s", err) + return nil, fmt.Errorf("failed to wait for event: %w", err) } for { @@ -710,7 +710,7 @@ func (p *EventPoller) WaitForFinished( case <-ticker.C: event, err := p.client.GetEvent(ctx, event.ID) if err != nil { - return nil, fmt.Errorf("failed to get event: %s", err) + return nil, fmt.Errorf("failed to get event: %w", err) } switch event.Status { @@ -722,7 +722,7 @@ func (p *EventPoller) WaitForFinished( continue } case <-ctx.Done(): - return nil, fmt.Errorf("failed to wait for event: %s", ctx.Err()) + return nil, fmt.Errorf("failed to wait for event: %w", ctx.Err()) } } } diff --git a/vendor/github.com/miekg/dns/README.md b/vendor/github.com/miekg/dns/README.md index ec9540392..06bea9fab 100644 --- a/vendor/github.com/miekg/dns/README.md +++ b/vendor/github.com/miekg/dns/README.md @@ -78,7 +78,7 @@ A not-so-up-to-date-list-that-may-be-actually-current: * https://fleetdeck.io/ * https://github.com/markdingo/autoreverse * https://github.com/slackhq/nebula -* https://github.com/dnschecktool/dow-proxy +* https://addr.tools/ * https://dnscheck.tools/ * https://github.com/egbakou/domainverifier diff --git a/vendor/github.com/miekg/dns/client.go b/vendor/github.com/miekg/dns/client.go index f694ea589..9051ae007 100644 --- a/vendor/github.com/miekg/dns/client.go +++ b/vendor/github.com/miekg/dns/client.go @@ -106,7 +106,6 @@ func (c *Client) Dial(address string) (conn *Conn, err error) { } // DialContext connects to the address on the named network, with a context.Context. -// For TLS over TCP (DoT) the context isn't used yet. This will be enabled when Go 1.18 is released. func (c *Client) DialContext(ctx context.Context, address string) (conn *Conn, err error) { // create a new dialer with the appropriate timeout var d net.Dialer @@ -127,15 +126,11 @@ func (c *Client) DialContext(ctx context.Context, address string) (conn *Conn, e if useTLS { network = strings.TrimSuffix(network, "-tls") - // TODO(miekg): Enable after Go 1.18 is released, to be able to support two prev. releases. - /* - tlsDialer := tls.Dialer{ - NetDialer: &d, - Config: c.TLSConfig, - } - conn.Conn, err = tlsDialer.DialContext(ctx, network, address) - */ - conn.Conn, err = tls.DialWithDialer(&d, network, address, c.TLSConfig) + tlsDialer := tls.Dialer{ + NetDialer: &d, + Config: c.TLSConfig, + } + conn.Conn, err = tlsDialer.DialContext(ctx, network, address) } else { conn.Conn, err = d.DialContext(ctx, network, address) } diff --git a/vendor/github.com/miekg/dns/clientconfig.go b/vendor/github.com/miekg/dns/clientconfig.go index e11b630df..d00ac62fb 100644 --- a/vendor/github.com/miekg/dns/clientconfig.go +++ b/vendor/github.com/miekg/dns/clientconfig.go @@ -68,7 +68,7 @@ func ClientConfigFromReader(resolvconf io.Reader) (*ClientConfig, error) { } case "search": // set search path to given servers - c.Search = append([]string(nil), f[1:]...) + c.Search = cloneSlice(f[1:]) case "options": // magic options for _, s := range f[1:] { diff --git a/vendor/github.com/miekg/dns/defaults.go b/vendor/github.com/miekg/dns/defaults.go index f2cdbf430..75b17f0c1 100644 --- a/vendor/github.com/miekg/dns/defaults.go +++ b/vendor/github.com/miekg/dns/defaults.go @@ -208,7 +208,7 @@ func IsDomainName(s string) (labels int, ok bool) { } // check for \DDD - if i+3 < len(s) && isDigit(s[i+1]) && isDigit(s[i+2]) && isDigit(s[i+3]) { + if isDDD(s[i+1:]) { i += 3 begin += 3 } else { diff --git a/vendor/github.com/miekg/dns/dnssec.go b/vendor/github.com/miekg/dns/dnssec.go index ea01aa81f..1be87eae6 100644 --- a/vendor/github.com/miekg/dns/dnssec.go +++ b/vendor/github.com/miekg/dns/dnssec.go @@ -128,10 +128,6 @@ type dnskeyWireFmt struct { /* Nothing is left out */ } -func divRoundUp(a, b int) int { - return (a + b - 1) / b -} - // KeyTag calculates the keytag (or key-id) of the DNSKEY. func (k *DNSKEY) KeyTag() uint16 { if k == nil { @@ -417,11 +413,11 @@ func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error { return err } - sigbuf := rr.sigBuf() // Get the binary signature data - if rr.Algorithm == PRIVATEDNS { // PRIVATEOID - // TODO(miek) - // remove the domain name and assume its ours? - } + sigbuf := rr.sigBuf() // Get the binary signature data + // TODO(miek) + // remove the domain name and assume its ours? + // if rr.Algorithm == PRIVATEDNS { // PRIVATEOID + // } h, cryptohash, err := hashFromAlgorithm(rr.Algorithm) if err != nil { diff --git a/vendor/github.com/miekg/dns/edns.go b/vendor/github.com/miekg/dns/edns.go index 1c0677c82..b5bdac816 100644 --- a/vendor/github.com/miekg/dns/edns.go +++ b/vendor/github.com/miekg/dns/edns.go @@ -263,7 +263,7 @@ func (e *EDNS0_NSID) copy() EDNS0 { return &EDNS0_NSID{e.Code, e.Nsid} // o.Hdr.Name = "." // o.Hdr.Rrtype = dns.TypeOPT // e := new(dns.EDNS0_SUBNET) -// e.Code = dns.EDNS0SUBNET // by default this is filled in through unpacking OPT packets (unpackDataOpt) +// e.Code = dns.EDNS0SUBNET // by default this is filled in through unpacking OPT packets (unpackDataOpt) // e.Family = 1 // 1 for IPv4 source address, 2 for IPv6 // e.SourceNetmask = 32 // 32 for IPV4, 128 for IPv6 // e.SourceScope = 0 @@ -520,8 +520,8 @@ type EDNS0_DAU struct { // Option implements the EDNS0 interface. func (e *EDNS0_DAU) Option() uint16 { return EDNS0DAU } -func (e *EDNS0_DAU) pack() ([]byte, error) { return e.AlgCode, nil } -func (e *EDNS0_DAU) unpack(b []byte) error { e.AlgCode = b; return nil } +func (e *EDNS0_DAU) pack() ([]byte, error) { return cloneSlice(e.AlgCode), nil } +func (e *EDNS0_DAU) unpack(b []byte) error { e.AlgCode = cloneSlice(b); return nil } func (e *EDNS0_DAU) String() string { s := "" @@ -544,8 +544,8 @@ type EDNS0_DHU struct { // Option implements the EDNS0 interface. func (e *EDNS0_DHU) Option() uint16 { return EDNS0DHU } -func (e *EDNS0_DHU) pack() ([]byte, error) { return e.AlgCode, nil } -func (e *EDNS0_DHU) unpack(b []byte) error { e.AlgCode = b; return nil } +func (e *EDNS0_DHU) pack() ([]byte, error) { return cloneSlice(e.AlgCode), nil } +func (e *EDNS0_DHU) unpack(b []byte) error { e.AlgCode = cloneSlice(b); return nil } func (e *EDNS0_DHU) String() string { s := "" @@ -568,8 +568,8 @@ type EDNS0_N3U struct { // Option implements the EDNS0 interface. func (e *EDNS0_N3U) Option() uint16 { return EDNS0N3U } -func (e *EDNS0_N3U) pack() ([]byte, error) { return e.AlgCode, nil } -func (e *EDNS0_N3U) unpack(b []byte) error { e.AlgCode = b; return nil } +func (e *EDNS0_N3U) pack() ([]byte, error) { return cloneSlice(e.AlgCode), nil } +func (e *EDNS0_N3U) unpack(b []byte) error { e.AlgCode = cloneSlice(b); return nil } func (e *EDNS0_N3U) String() string { // Re-use the hash map @@ -646,30 +646,21 @@ type EDNS0_LOCAL struct { // Option implements the EDNS0 interface. func (e *EDNS0_LOCAL) Option() uint16 { return e.Code } + func (e *EDNS0_LOCAL) String() string { return strconv.FormatInt(int64(e.Code), 10) + ":0x" + hex.EncodeToString(e.Data) } + func (e *EDNS0_LOCAL) copy() EDNS0 { - b := make([]byte, len(e.Data)) - copy(b, e.Data) - return &EDNS0_LOCAL{e.Code, b} + return &EDNS0_LOCAL{e.Code, cloneSlice(e.Data)} } func (e *EDNS0_LOCAL) pack() ([]byte, error) { - b := make([]byte, len(e.Data)) - copied := copy(b, e.Data) - if copied != len(e.Data) { - return nil, ErrBuf - } - return b, nil + return cloneSlice(e.Data), nil } func (e *EDNS0_LOCAL) unpack(b []byte) error { - e.Data = make([]byte, len(b)) - copied := copy(e.Data, b) - if copied != len(b) { - return ErrBuf - } + e.Data = cloneSlice(b) return nil } @@ -732,14 +723,10 @@ type EDNS0_PADDING struct { // Option implements the EDNS0 interface. func (e *EDNS0_PADDING) Option() uint16 { return EDNS0PADDING } -func (e *EDNS0_PADDING) pack() ([]byte, error) { return e.Padding, nil } -func (e *EDNS0_PADDING) unpack(b []byte) error { e.Padding = b; return nil } +func (e *EDNS0_PADDING) pack() ([]byte, error) { return cloneSlice(e.Padding), nil } +func (e *EDNS0_PADDING) unpack(b []byte) error { e.Padding = cloneSlice(b); return nil } func (e *EDNS0_PADDING) String() string { return fmt.Sprintf("%0X", e.Padding) } -func (e *EDNS0_PADDING) copy() EDNS0 { - b := make([]byte, len(e.Padding)) - copy(b, e.Padding) - return &EDNS0_PADDING{b} -} +func (e *EDNS0_PADDING) copy() EDNS0 { return &EDNS0_PADDING{cloneSlice(e.Padding)} } // Extended DNS Error Codes (RFC 8914). const ( @@ -826,7 +813,7 @@ func (e *EDNS0_EDE) String() string { func (e *EDNS0_EDE) pack() ([]byte, error) { b := make([]byte, 2+len(e.ExtraText)) binary.BigEndian.PutUint16(b[0:], e.InfoCode) - copy(b[2:], []byte(e.ExtraText)) + copy(b[2:], e.ExtraText) return b, nil } diff --git a/vendor/github.com/miekg/dns/listen_no_reuseport.go b/vendor/github.com/miekg/dns/listen_no_reuseport.go index 65ac91021..6ed50f86b 100644 --- a/vendor/github.com/miekg/dns/listen_no_reuseport.go +++ b/vendor/github.com/miekg/dns/listen_no_reuseport.go @@ -1,5 +1,5 @@ -//go:build !go1.11 || (!aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd) -// +build !go1.11 !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd +//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd package dns diff --git a/vendor/github.com/miekg/dns/listen_reuseport.go b/vendor/github.com/miekg/dns/listen_reuseport.go index 89e6c98bc..89bac9034 100644 --- a/vendor/github.com/miekg/dns/listen_reuseport.go +++ b/vendor/github.com/miekg/dns/listen_reuseport.go @@ -1,5 +1,4 @@ -//go:build go1.11 && (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd) -// +build go1.11 +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd // +build aix darwin dragonfly freebsd linux netbsd openbsd package dns diff --git a/vendor/github.com/miekg/dns/msg.go b/vendor/github.com/miekg/dns/msg.go index f4d334e4f..d5049a4f9 100644 --- a/vendor/github.com/miekg/dns/msg.go +++ b/vendor/github.com/miekg/dns/msg.go @@ -252,7 +252,7 @@ loop: } // check for \DDD - if i+3 < ls && isDigit(bs[i+1]) && isDigit(bs[i+2]) && isDigit(bs[i+3]) { + if isDDD(bs[i+1:]) { bs[i] = dddToByte(bs[i+1:]) copy(bs[i+1:ls-3], bs[i+4:]) ls -= 3 @@ -448,7 +448,7 @@ Loop: return string(s), off1, nil } -func packTxt(txt []string, msg []byte, offset int, tmp []byte) (int, error) { +func packTxt(txt []string, msg []byte, offset int) (int, error) { if len(txt) == 0 { if offset >= len(msg) { return offset, ErrBuf @@ -458,10 +458,7 @@ func packTxt(txt []string, msg []byte, offset int, tmp []byte) (int, error) { } var err error for _, s := range txt { - if len(s) > len(tmp) { - return offset, ErrBuf - } - offset, err = packTxtString(s, msg, offset, tmp) + offset, err = packTxtString(s, msg, offset) if err != nil { return offset, err } @@ -469,32 +466,30 @@ func packTxt(txt []string, msg []byte, offset int, tmp []byte) (int, error) { return offset, nil } -func packTxtString(s string, msg []byte, offset int, tmp []byte) (int, error) { +func packTxtString(s string, msg []byte, offset int) (int, error) { lenByteOffset := offset - if offset >= len(msg) || len(s) > len(tmp) { + if offset >= len(msg) || len(s) > 256*4+1 /* If all \DDD */ { return offset, ErrBuf } offset++ - bs := tmp[:len(s)] - copy(bs, s) - for i := 0; i < len(bs); i++ { + for i := 0; i < len(s); i++ { if len(msg) <= offset { return offset, ErrBuf } - if bs[i] == '\\' { + if s[i] == '\\' { i++ - if i == len(bs) { + if i == len(s) { break } // check for \DDD - if i+2 < len(bs) && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) { - msg[offset] = dddToByte(bs[i:]) + if isDDD(s[i:]) { + msg[offset] = dddToByte(s[i:]) i += 2 } else { - msg[offset] = bs[i] + msg[offset] = s[i] } } else { - msg[offset] = bs[i] + msg[offset] = s[i] } offset++ } @@ -522,7 +517,7 @@ func packOctetString(s string, msg []byte, offset int, tmp []byte) (int, error) break } // check for \DDD - if i+2 < len(bs) && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) { + if isDDD(bs[i:]) { msg[offset] = dddToByte(bs[i:]) i += 2 } else { @@ -551,12 +546,11 @@ func unpackTxt(msg []byte, off0 int) (ss []string, off int, err error) { // Helpers for dealing with escaped bytes func isDigit(b byte) bool { return b >= '0' && b <= '9' } -func dddToByte(s []byte) byte { - _ = s[2] // bounds check hint to compiler; see golang.org/issue/14808 - return byte((s[0]-'0')*100 + (s[1]-'0')*10 + (s[2] - '0')) +func isDDD[T ~[]byte | ~string](s T) bool { + return len(s) >= 3 && isDigit(s[0]) && isDigit(s[1]) && isDigit(s[2]) } -func dddStringToByte(s string) byte { +func dddToByte[T ~[]byte | ~string](s T) byte { _ = s[2] // bounds check hint to compiler; see golang.org/issue/14808 return byte((s[0]-'0')*100 + (s[1]-'0')*10 + (s[2] - '0')) } @@ -866,7 +860,7 @@ func (dns *Msg) unpack(dh Header, msg []byte, off int) (err error) { // The header counts might have been wrong so we need to update it dh.Nscount = uint16(len(dns.Ns)) if err == nil { - dns.Extra, off, err = unpackRRslice(int(dh.Arcount), msg, off) + dns.Extra, _, err = unpackRRslice(int(dh.Arcount), msg, off) } // The header counts might have been wrong so we need to update it dh.Arcount = uint16(len(dns.Extra)) @@ -876,11 +870,11 @@ func (dns *Msg) unpack(dh Header, msg []byte, off int) (err error) { dns.Rcode |= opt.ExtendedRcode() } - if off != len(msg) { - // TODO(miek) make this an error? - // use PackOpt to let people tell how detailed the error reporting should be? - // println("dns: extra bytes in dns packet", off, "<", len(msg)) - } + // TODO(miek) make this an error? + // use PackOpt to let people tell how detailed the error reporting should be? + // if off != len(msg) { + // // println("dns: extra bytes in dns packet", off, "<", len(msg)) + // } return err } @@ -1024,7 +1018,7 @@ func escapedNameLen(s string) int { continue } - if i+3 < len(s) && isDigit(s[i+1]) && isDigit(s[i+2]) && isDigit(s[i+3]) { + if isDDD(s[i+1:]) { nameLen -= 3 i += 3 } else { @@ -1065,8 +1059,8 @@ func (dns *Msg) CopyTo(r1 *Msg) *Msg { r1.Compress = dns.Compress if len(dns.Question) > 0 { - r1.Question = make([]Question, len(dns.Question)) - copy(r1.Question, dns.Question) // TODO(miek): Question is an immutable value, ok to do a shallow-copy + // TODO(miek): Question is an immutable value, ok to do a shallow-copy + r1.Question = cloneSlice(dns.Question) } rrArr := make([]RR, len(dns.Answer)+len(dns.Ns)+len(dns.Extra)) diff --git a/vendor/github.com/miekg/dns/msg_helpers.go b/vendor/github.com/miekg/dns/msg_helpers.go index 42d5cd535..8582fc0ad 100644 --- a/vendor/github.com/miekg/dns/msg_helpers.go +++ b/vendor/github.com/miekg/dns/msg_helpers.go @@ -299,8 +299,7 @@ func unpackString(msg []byte, off int) (string, int, error) { } func packString(s string, msg []byte, off int) (int, error) { - txtTmp := make([]byte, 256*4+1) - off, err := packTxtString(s, msg, off, txtTmp) + off, err := packTxtString(s, msg, off) if err != nil { return len(msg), err } @@ -402,8 +401,7 @@ func unpackStringTxt(msg []byte, off int) ([]string, int, error) { } func packStringTxt(s []string, msg []byte, off int) (int, error) { - txtTmp := make([]byte, 256*4+1) // If the whole string consists out of \DDD we need this many. - off, err := packTxt(s, msg, off, txtTmp) + off, err := packTxt(s, msg, off) if err != nil { return len(msg), err } @@ -625,7 +623,7 @@ func unpackDataSVCB(msg []byte, off int) ([]SVCBKeyValue, int, error) { } func packDataSVCB(pairs []SVCBKeyValue, msg []byte, off int) (int, error) { - pairs = append([]SVCBKeyValue(nil), pairs...) + pairs = cloneSlice(pairs) sort.Slice(pairs, func(i, j int) bool { return pairs[i].Key() < pairs[j].Key() }) diff --git a/vendor/github.com/miekg/dns/scan_rr.go b/vendor/github.com/miekg/dns/scan_rr.go index 68dbff690..2d44a3987 100644 --- a/vendor/github.com/miekg/dns/scan_rr.go +++ b/vendor/github.com/miekg/dns/scan_rr.go @@ -1249,7 +1249,7 @@ func (rr *IPSECKEY) parse(c *zlexer, o string) *ParseError { rr.GatewayAddr, rr.GatewayHost, err = parseAddrHostUnion(l.token, o, rr.GatewayType) if err != nil { - return &ParseError{"", "AMTRELAY " + err.Error(), l} + return &ParseError{"", "IPSECKEY " + err.Error(), l} } c.Next() // zBlank diff --git a/vendor/github.com/miekg/dns/server.go b/vendor/github.com/miekg/dns/server.go index 508e9cb37..64e388546 100644 --- a/vendor/github.com/miekg/dns/server.go +++ b/vendor/github.com/miekg/dns/server.go @@ -224,7 +224,7 @@ type Server struct { // Maximum number of TCP queries before we close the socket. Default is maxTCPQueries (unlimited if -1). MaxTCPQueries int // Whether to set the SO_REUSEPORT socket option, allowing multiple listeners to be bound to a single address. - // It is only supported on go1.11+ and when using ListenAndServe. + // It is only supported on certain GOOSes and when using ListenAndServe. ReusePort bool // AcceptMsgFunc will check the incoming message and will reject it early in the process. // By default DefaultMsgAcceptFunc will be used. diff --git a/vendor/github.com/miekg/dns/svcb.go b/vendor/github.com/miekg/dns/svcb.go index e17132369..6d496d74d 100644 --- a/vendor/github.com/miekg/dns/svcb.go +++ b/vendor/github.com/miekg/dns/svcb.go @@ -289,7 +289,7 @@ func (s *SVCBMandatory) String() string { } func (s *SVCBMandatory) pack() ([]byte, error) { - codes := append([]SVCBKey(nil), s.Code...) + codes := cloneSlice(s.Code) sort.Slice(codes, func(i, j int) bool { return codes[i] < codes[j] }) @@ -328,9 +328,7 @@ func (s *SVCBMandatory) len() int { } func (s *SVCBMandatory) copy() SVCBKeyValue { - return &SVCBMandatory{ - append([]SVCBKey(nil), s.Code...), - } + return &SVCBMandatory{cloneSlice(s.Code)} } // SVCBAlpn pair is used to list supported connection protocols. @@ -481,9 +479,7 @@ func (s *SVCBAlpn) len() int { } func (s *SVCBAlpn) copy() SVCBKeyValue { - return &SVCBAlpn{ - append([]string(nil), s.Alpn...), - } + return &SVCBAlpn{cloneSlice(s.Alpn)} } // SVCBNoDefaultAlpn pair signifies no support for default connection protocols. @@ -595,6 +591,7 @@ func (s *SVCBIPv4Hint) unpack(b []byte) error { if len(b) == 0 || len(b)%4 != 0 { return errors.New("dns: svcbipv4hint: ipv4 address byte array length is not a multiple of 4") } + b = cloneSlice(b) x := make([]net.IP, 0, len(b)/4) for i := 0; i < len(b); i += 4 { x = append(x, net.IP(b[i:i+4])) @@ -635,12 +632,9 @@ func (s *SVCBIPv4Hint) parse(b string) error { func (s *SVCBIPv4Hint) copy() SVCBKeyValue { hint := make([]net.IP, len(s.Hint)) for i, ip := range s.Hint { - hint[i] = copyIP(ip) - } - - return &SVCBIPv4Hint{ - Hint: hint, + hint[i] = cloneSlice(ip) } + return &SVCBIPv4Hint{Hint: hint} } // SVCBECHConfig pair contains the ECHConfig structure defined in draft-ietf-tls-esni [RFC xxxx]. @@ -660,19 +654,18 @@ func (s *SVCBECHConfig) String() string { return toBase64(s.ECH) } func (s *SVCBECHConfig) len() int { return len(s.ECH) } func (s *SVCBECHConfig) pack() ([]byte, error) { - return append([]byte(nil), s.ECH...), nil + return cloneSlice(s.ECH), nil } func (s *SVCBECHConfig) copy() SVCBKeyValue { - return &SVCBECHConfig{ - append([]byte(nil), s.ECH...), - } + return &SVCBECHConfig{cloneSlice(s.ECH)} } func (s *SVCBECHConfig) unpack(b []byte) error { - s.ECH = append([]byte(nil), b...) + s.ECH = cloneSlice(b) return nil } + func (s *SVCBECHConfig) parse(b string) error { x, err := fromBase64([]byte(b)) if err != nil { @@ -715,6 +708,7 @@ func (s *SVCBIPv6Hint) unpack(b []byte) error { if len(b) == 0 || len(b)%16 != 0 { return errors.New("dns: svcbipv6hint: ipv6 address byte array length not a multiple of 16") } + b = cloneSlice(b) x := make([]net.IP, 0, len(b)/16) for i := 0; i < len(b); i += 16 { ip := net.IP(b[i : i+16]) @@ -758,12 +752,9 @@ func (s *SVCBIPv6Hint) parse(b string) error { func (s *SVCBIPv6Hint) copy() SVCBKeyValue { hint := make([]net.IP, len(s.Hint)) for i, ip := range s.Hint { - hint[i] = copyIP(ip) - } - - return &SVCBIPv6Hint{ - Hint: hint, + hint[i] = cloneSlice(ip) } + return &SVCBIPv6Hint{Hint: hint} } // SVCBDoHPath pair is used to indicate the URI template that the @@ -831,11 +822,11 @@ type SVCBLocal struct { func (s *SVCBLocal) Key() SVCBKey { return s.KeyCode } func (s *SVCBLocal) String() string { return svcbParamToStr(s.Data) } -func (s *SVCBLocal) pack() ([]byte, error) { return append([]byte(nil), s.Data...), nil } +func (s *SVCBLocal) pack() ([]byte, error) { return cloneSlice(s.Data), nil } func (s *SVCBLocal) len() int { return len(s.Data) } func (s *SVCBLocal) unpack(b []byte) error { - s.Data = append([]byte(nil), b...) + s.Data = cloneSlice(b) return nil } @@ -849,9 +840,7 @@ func (s *SVCBLocal) parse(b string) error { } func (s *SVCBLocal) copy() SVCBKeyValue { - return &SVCBLocal{s.KeyCode, - append([]byte(nil), s.Data...), - } + return &SVCBLocal{s.KeyCode, cloneSlice(s.Data)} } func (rr *SVCB) String() string { @@ -867,8 +856,8 @@ func (rr *SVCB) String() string { // areSVCBPairArraysEqual checks if SVCBKeyValue arrays are equal after sorting their // copies. arrA and arrB have equal lengths, otherwise zduplicate.go wouldn't call this function. func areSVCBPairArraysEqual(a []SVCBKeyValue, b []SVCBKeyValue) bool { - a = append([]SVCBKeyValue(nil), a...) - b = append([]SVCBKeyValue(nil), b...) + a = cloneSlice(a) + b = cloneSlice(b) sort.Slice(a, func(i, j int) bool { return a[i].Key() < a[j].Key() }) sort.Slice(b, func(i, j int) bool { return b[i].Key() < b[j].Key() }) for i, e := range a { diff --git a/vendor/github.com/miekg/dns/types.go b/vendor/github.com/miekg/dns/types.go index a34ab602f..03afeccda 100644 --- a/vendor/github.com/miekg/dns/types.go +++ b/vendor/github.com/miekg/dns/types.go @@ -198,7 +198,7 @@ const ( _CD = 1 << 4 // checking disabled ) -// Various constants used in the LOC RR. See RFC 1887. +// Various constants used in the LOC RR. See RFC 1876. const ( LOC_EQUATOR = 1 << 31 // RFC 1876, Section 2. LOC_PRIMEMERIDIAN = 1 << 31 // RFC 1876, Section 2. @@ -631,8 +631,8 @@ func nextByte(s string, offset int) (byte, int) { return 0, 0 case 2, 3: // too short to be \ddd default: // maybe \ddd - if isDigit(s[offset+1]) && isDigit(s[offset+2]) && isDigit(s[offset+3]) { - return dddStringToByte(s[offset+1:]), 4 + if isDDD(s[offset+1:]) { + return dddToByte(s[offset+1:]), 4 } } // not \ddd, just an RFC 1035 "quoted" character @@ -792,7 +792,10 @@ type LOC struct { // cmToM takes a cm value expressed in RFC 1876 SIZE mantissa/exponent // format and returns a string in m (two decimals for the cm). -func cmToM(m, e uint8) string { +func cmToM(x uint8) string { + m := x & 0xf0 >> 4 + e := x & 0x0f + if e < 2 { if e == 1 { m *= 10 @@ -848,10 +851,9 @@ func (rr *LOC) String() string { s += fmt.Sprintf("%.0fm ", alt) } - s += cmToM(rr.Size&0xf0>>4, rr.Size&0x0f) + "m " - s += cmToM(rr.HorizPre&0xf0>>4, rr.HorizPre&0x0f) + "m " - s += cmToM(rr.VertPre&0xf0>>4, rr.VertPre&0x0f) + "m" - + s += cmToM(rr.Size) + "m " + s += cmToM(rr.HorizPre) + "m " + s += cmToM(rr.VertPre) + "m" return s } @@ -1531,7 +1533,7 @@ func (a *APLPrefix) str() string { // equals reports whether two APL prefixes are identical. func (a *APLPrefix) equals(b *APLPrefix) bool { return a.Negation == b.Negation && - bytes.Equal(a.Network.IP, b.Network.IP) && + a.Network.IP.Equal(b.Network.IP) && bytes.Equal(a.Network.Mask, b.Network.Mask) } @@ -1599,21 +1601,19 @@ func euiToString(eui uint64, bits int) (hex string) { return } -// copyIP returns a copy of ip. -func copyIP(ip net.IP) net.IP { - p := make(net.IP, len(ip)) - copy(p, ip) - return p +// cloneSlice returns a shallow copy of s. +func cloneSlice[E any, S ~[]E](s S) S { + if s == nil { + return nil + } + return append(S(nil), s...) } // copyNet returns a copy of a subnet. func copyNet(n net.IPNet) net.IPNet { - m := make(net.IPMask, len(n.Mask)) - copy(m, n.Mask) - return net.IPNet{ - IP: copyIP(n.IP), - Mask: m, + IP: cloneSlice(n.IP), + Mask: cloneSlice(n.Mask), } } diff --git a/vendor/github.com/miekg/dns/udp_windows.go b/vendor/github.com/miekg/dns/udp_windows.go index 8dffc4bd0..a259b67e4 100644 --- a/vendor/github.com/miekg/dns/udp_windows.go +++ b/vendor/github.com/miekg/dns/udp_windows.go @@ -1,6 +1,9 @@ //go:build windows // +build windows +// TODO(tmthrgd): Remove this Windows-specific code if go.dev/issue/7175 and +// go.dev/issue/7174 are ever fixed. + package dns import "net" @@ -15,7 +18,6 @@ func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr } // ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a // net.UDPAddr. -// TODO(fastest963): Once go1.10 is released, use ReadMsgUDP. func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) { n, raddr, err := conn.ReadFrom(b) if err != nil { @@ -25,12 +27,9 @@ func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) { } // WriteToSessionUDP acts just like net.UDPConn.WriteTo(), but uses a *SessionUDP instead of a net.Addr. -// TODO(fastest963): Once go1.10 is released, use WriteMsgUDP. func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) { return conn.WriteTo(b, session.raddr) } -// TODO(fastest963): Once go1.10 is released and we can use *MsgUDP methods -// use the standard method in udp.go for these. func setUDPSocketOptions(*net.UDPConn) error { return nil } func parseDstFromOOB([]byte, net.IP) net.IP { return nil } diff --git a/vendor/github.com/miekg/dns/version.go b/vendor/github.com/miekg/dns/version.go index 556022216..f03a169c2 100644 --- a/vendor/github.com/miekg/dns/version.go +++ b/vendor/github.com/miekg/dns/version.go @@ -3,7 +3,7 @@ package dns import "fmt" // Version is current version of this library. -var Version = v{1, 1, 51} +var Version = v{1, 1, 53} // v holds the version of this library. type v struct { diff --git a/vendor/github.com/miekg/dns/ztypes.go b/vendor/github.com/miekg/dns/ztypes.go index 17543ea47..1b6f43200 100644 --- a/vendor/github.com/miekg/dns/ztypes.go +++ b/vendor/github.com/miekg/dns/ztypes.go @@ -263,6 +263,7 @@ func (rr *A) len(off int, compression map[string]struct{}) int { } return l } + func (rr *AAAA) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) if len(rr.AAAA) != 0 { @@ -270,12 +271,14 @@ func (rr *AAAA) len(off int, compression map[string]struct{}) int { } return l } + func (rr *AFSDB) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 2 // Subtype l += domainNameLen(rr.Hostname, off+l, compression, false) return l } + func (rr *AMTRELAY) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l++ // Precedence @@ -290,10 +293,12 @@ func (rr *AMTRELAY) len(off int, compression map[string]struct{}) int { } return l } + func (rr *ANY) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) return l } + func (rr *APL) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) for _, x := range rr.Prefixes { @@ -301,6 +306,7 @@ func (rr *APL) len(off int, compression map[string]struct{}) int { } return l } + func (rr *AVC) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) for _, x := range rr.Txt { @@ -308,6 +314,7 @@ func (rr *AVC) len(off int, compression map[string]struct{}) int { } return l } + func (rr *CAA) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l++ // Flag @@ -315,6 +322,7 @@ func (rr *CAA) len(off int, compression map[string]struct{}) int { l += len(rr.Value) return l } + func (rr *CERT) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 2 // Type @@ -323,21 +331,25 @@ func (rr *CERT) len(off int, compression map[string]struct{}) int { l += base64.StdEncoding.DecodedLen(len(rr.Certificate)) return l } + func (rr *CNAME) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += domainNameLen(rr.Target, off+l, compression, true) return l } + func (rr *DHCID) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += base64.StdEncoding.DecodedLen(len(rr.Digest)) return l } + func (rr *DNAME) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += domainNameLen(rr.Target, off+l, compression, false) return l } + func (rr *DNSKEY) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 2 // Flags @@ -346,6 +358,7 @@ func (rr *DNSKEY) len(off int, compression map[string]struct{}) int { l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) return l } + func (rr *DS) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 2 // KeyTag @@ -354,26 +367,31 @@ func (rr *DS) len(off int, compression map[string]struct{}) int { l += len(rr.Digest) / 2 return l } + func (rr *EID) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += len(rr.Endpoint) / 2 return l } + func (rr *EUI48) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 6 // Address return l } + func (rr *EUI64) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 8 // Address return l } + func (rr *GID) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 4 // Gid return l } + func (rr *GPOS) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += len(rr.Longitude) + 1 @@ -381,12 +399,14 @@ func (rr *GPOS) len(off int, compression map[string]struct{}) int { l += len(rr.Altitude) + 1 return l } + func (rr *HINFO) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += len(rr.Cpu) + 1 l += len(rr.Os) + 1 return l } + func (rr *HIP) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l++ // HitLength @@ -399,6 +419,7 @@ func (rr *HIP) len(off int, compression map[string]struct{}) int { } return l } + func (rr *IPSECKEY) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l++ // Precedence @@ -415,12 +436,14 @@ func (rr *IPSECKEY) len(off int, compression map[string]struct{}) int { l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) return l } + func (rr *KX) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 2 // Preference l += domainNameLen(rr.Exchanger, off+l, compression, false) return l } + func (rr *L32) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 2 // Preference @@ -429,12 +452,14 @@ func (rr *L32) len(off int, compression map[string]struct{}) int { } return l } + func (rr *L64) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 2 // Preference l += 8 // Locator64 return l } + func (rr *LOC) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l++ // Version @@ -446,49 +471,58 @@ func (rr *LOC) len(off int, compression map[string]struct{}) int { l += 4 // Altitude return l } + func (rr *LP) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 2 // Preference l += domainNameLen(rr.Fqdn, off+l, compression, false) return l } + func (rr *MB) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += domainNameLen(rr.Mb, off+l, compression, true) return l } + func (rr *MD) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += domainNameLen(rr.Md, off+l, compression, true) return l } + func (rr *MF) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += domainNameLen(rr.Mf, off+l, compression, true) return l } + func (rr *MG) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += domainNameLen(rr.Mg, off+l, compression, true) return l } + func (rr *MINFO) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += domainNameLen(rr.Rmail, off+l, compression, true) l += domainNameLen(rr.Email, off+l, compression, true) return l } + func (rr *MR) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += domainNameLen(rr.Mr, off+l, compression, true) return l } + func (rr *MX) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 2 // Preference l += domainNameLen(rr.Mx, off+l, compression, true) return l } + func (rr *NAPTR) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 2 // Order @@ -499,17 +533,20 @@ func (rr *NAPTR) len(off int, compression map[string]struct{}) int { l += domainNameLen(rr.Replacement, off+l, compression, false) return l } + func (rr *NID) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 2 // Preference l += 8 // NodeID return l } + func (rr *NIMLOC) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += len(rr.Locator) / 2 return l } + func (rr *NINFO) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) for _, x := range rr.ZSData { @@ -517,16 +554,19 @@ func (rr *NINFO) len(off int, compression map[string]struct{}) int { } return l } + func (rr *NS) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += domainNameLen(rr.Ns, off+l, compression, true) return l } + func (rr *NSAPPTR) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += domainNameLen(rr.Ptr, off+l, compression, false) return l } + func (rr *NSEC3PARAM) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l++ // Hash @@ -536,21 +576,25 @@ func (rr *NSEC3PARAM) len(off int, compression map[string]struct{}) int { l += len(rr.Salt) / 2 return l } + func (rr *NULL) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += len(rr.Data) return l } + func (rr *OPENPGPKEY) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) return l } + func (rr *PTR) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += domainNameLen(rr.Ptr, off+l, compression, true) return l } + func (rr *PX) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 2 // Preference @@ -558,11 +602,13 @@ func (rr *PX) len(off int, compression map[string]struct{}) int { l += domainNameLen(rr.Mapx400, off+l, compression, false) return l } + func (rr *RFC3597) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += len(rr.Rdata) / 2 return l } + func (rr *RKEY) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 2 // Flags @@ -571,12 +617,14 @@ func (rr *RKEY) len(off int, compression map[string]struct{}) int { l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) return l } + func (rr *RP) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += domainNameLen(rr.Mbox, off+l, compression, false) l += domainNameLen(rr.Txt, off+l, compression, false) return l } + func (rr *RRSIG) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 2 // TypeCovered @@ -590,12 +638,14 @@ func (rr *RRSIG) len(off int, compression map[string]struct{}) int { l += base64.StdEncoding.DecodedLen(len(rr.Signature)) return l } + func (rr *RT) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 2 // Preference l += domainNameLen(rr.Host, off+l, compression, false) return l } + func (rr *SMIMEA) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l++ // Usage @@ -604,6 +654,7 @@ func (rr *SMIMEA) len(off int, compression map[string]struct{}) int { l += len(rr.Certificate) / 2 return l } + func (rr *SOA) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += domainNameLen(rr.Ns, off+l, compression, true) @@ -615,6 +666,7 @@ func (rr *SOA) len(off int, compression map[string]struct{}) int { l += 4 // Minttl return l } + func (rr *SPF) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) for _, x := range rr.Txt { @@ -622,6 +674,7 @@ func (rr *SPF) len(off int, compression map[string]struct{}) int { } return l } + func (rr *SRV) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 2 // Priority @@ -630,6 +683,7 @@ func (rr *SRV) len(off int, compression map[string]struct{}) int { l += domainNameLen(rr.Target, off+l, compression, false) return l } + func (rr *SSHFP) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l++ // Algorithm @@ -637,6 +691,7 @@ func (rr *SSHFP) len(off int, compression map[string]struct{}) int { l += len(rr.FingerPrint) / 2 return l } + func (rr *SVCB) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 2 // Priority @@ -646,6 +701,7 @@ func (rr *SVCB) len(off int, compression map[string]struct{}) int { } return l } + func (rr *TA) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 2 // KeyTag @@ -654,12 +710,14 @@ func (rr *TA) len(off int, compression map[string]struct{}) int { l += len(rr.Digest) / 2 return l } + func (rr *TALINK) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += domainNameLen(rr.PreviousName, off+l, compression, false) l += domainNameLen(rr.NextName, off+l, compression, false) return l } + func (rr *TKEY) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += domainNameLen(rr.Algorithm, off+l, compression, false) @@ -673,6 +731,7 @@ func (rr *TKEY) len(off int, compression map[string]struct{}) int { l += len(rr.OtherData) / 2 return l } + func (rr *TLSA) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l++ // Usage @@ -681,6 +740,7 @@ func (rr *TLSA) len(off int, compression map[string]struct{}) int { l += len(rr.Certificate) / 2 return l } + func (rr *TSIG) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += domainNameLen(rr.Algorithm, off+l, compression, false) @@ -694,6 +754,7 @@ func (rr *TSIG) len(off int, compression map[string]struct{}) int { l += len(rr.OtherData) / 2 return l } + func (rr *TXT) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) for _, x := range rr.Txt { @@ -701,16 +762,19 @@ func (rr *TXT) len(off int, compression map[string]struct{}) int { } return l } + func (rr *UID) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 4 // Uid return l } + func (rr *UINFO) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += len(rr.Uinfo) + 1 return l } + func (rr *URI) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 2 // Priority @@ -718,11 +782,13 @@ func (rr *URI) len(off int, compression map[string]struct{}) int { l += len(rr.Target) return l } + func (rr *X25) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += len(rr.PSDNAddress) + 1 return l } + func (rr *ZONEMD) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 4 // Serial @@ -734,20 +800,31 @@ func (rr *ZONEMD) len(off int, compression map[string]struct{}) int { // copy() functions func (rr *A) copy() RR { - return &A{rr.Hdr, copyIP(rr.A)} + return &A{rr.Hdr, cloneSlice(rr.A)} } + func (rr *AAAA) copy() RR { - return &AAAA{rr.Hdr, copyIP(rr.AAAA)} + return &AAAA{rr.Hdr, cloneSlice(rr.AAAA)} } + func (rr *AFSDB) copy() RR { return &AFSDB{rr.Hdr, rr.Subtype, rr.Hostname} } + func (rr *AMTRELAY) copy() RR { - return &AMTRELAY{rr.Hdr, rr.Precedence, rr.GatewayType, copyIP(rr.GatewayAddr), rr.GatewayHost} + return &AMTRELAY{ + rr.Hdr, + rr.Precedence, + rr.GatewayType, + cloneSlice(rr.GatewayAddr), + rr.GatewayHost, + } } + func (rr *ANY) copy() RR { return &ANY{rr.Hdr} } + func (rr *APL) copy() RR { Prefixes := make([]APLPrefix, len(rr.Prefixes)) for i, e := range rr.Prefixes { @@ -755,153 +832,270 @@ func (rr *APL) copy() RR { } return &APL{rr.Hdr, Prefixes} } + func (rr *AVC) copy() RR { - Txt := make([]string, len(rr.Txt)) - copy(Txt, rr.Txt) - return &AVC{rr.Hdr, Txt} + return &AVC{rr.Hdr, cloneSlice(rr.Txt)} } + func (rr *CAA) copy() RR { - return &CAA{rr.Hdr, rr.Flag, rr.Tag, rr.Value} + return &CAA{ + rr.Hdr, + rr.Flag, + rr.Tag, + rr.Value, + } } + func (rr *CDNSKEY) copy() RR { return &CDNSKEY{*rr.DNSKEY.copy().(*DNSKEY)} } + func (rr *CDS) copy() RR { return &CDS{*rr.DS.copy().(*DS)} } + func (rr *CERT) copy() RR { - return &CERT{rr.Hdr, rr.Type, rr.KeyTag, rr.Algorithm, rr.Certificate} + return &CERT{ + rr.Hdr, + rr.Type, + rr.KeyTag, + rr.Algorithm, + rr.Certificate, + } } + func (rr *CNAME) copy() RR { return &CNAME{rr.Hdr, rr.Target} } + func (rr *CSYNC) copy() RR { - TypeBitMap := make([]uint16, len(rr.TypeBitMap)) - copy(TypeBitMap, rr.TypeBitMap) - return &CSYNC{rr.Hdr, rr.Serial, rr.Flags, TypeBitMap} + return &CSYNC{ + rr.Hdr, + rr.Serial, + rr.Flags, + cloneSlice(rr.TypeBitMap), + } } + func (rr *DHCID) copy() RR { return &DHCID{rr.Hdr, rr.Digest} } + func (rr *DLV) copy() RR { return &DLV{*rr.DS.copy().(*DS)} } + func (rr *DNAME) copy() RR { return &DNAME{rr.Hdr, rr.Target} } + func (rr *DNSKEY) copy() RR { - return &DNSKEY{rr.Hdr, rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey} + return &DNSKEY{ + rr.Hdr, + rr.Flags, + rr.Protocol, + rr.Algorithm, + rr.PublicKey, + } } + func (rr *DS) copy() RR { - return &DS{rr.Hdr, rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest} + return &DS{ + rr.Hdr, + rr.KeyTag, + rr.Algorithm, + rr.DigestType, + rr.Digest, + } } + func (rr *EID) copy() RR { return &EID{rr.Hdr, rr.Endpoint} } + func (rr *EUI48) copy() RR { return &EUI48{rr.Hdr, rr.Address} } + func (rr *EUI64) copy() RR { return &EUI64{rr.Hdr, rr.Address} } + func (rr *GID) copy() RR { return &GID{rr.Hdr, rr.Gid} } + func (rr *GPOS) copy() RR { - return &GPOS{rr.Hdr, rr.Longitude, rr.Latitude, rr.Altitude} + return &GPOS{ + rr.Hdr, + rr.Longitude, + rr.Latitude, + rr.Altitude, + } } + func (rr *HINFO) copy() RR { return &HINFO{rr.Hdr, rr.Cpu, rr.Os} } + func (rr *HIP) copy() RR { - RendezvousServers := make([]string, len(rr.RendezvousServers)) - copy(RendezvousServers, rr.RendezvousServers) - return &HIP{rr.Hdr, rr.HitLength, rr.PublicKeyAlgorithm, rr.PublicKeyLength, rr.Hit, rr.PublicKey, RendezvousServers} + return &HIP{ + rr.Hdr, + rr.HitLength, + rr.PublicKeyAlgorithm, + rr.PublicKeyLength, + rr.Hit, + rr.PublicKey, + cloneSlice(rr.RendezvousServers), + } } + func (rr *HTTPS) copy() RR { return &HTTPS{*rr.SVCB.copy().(*SVCB)} } + func (rr *IPSECKEY) copy() RR { - return &IPSECKEY{rr.Hdr, rr.Precedence, rr.GatewayType, rr.Algorithm, copyIP(rr.GatewayAddr), rr.GatewayHost, rr.PublicKey} + return &IPSECKEY{ + rr.Hdr, + rr.Precedence, + rr.GatewayType, + rr.Algorithm, + cloneSlice(rr.GatewayAddr), + rr.GatewayHost, + rr.PublicKey, + } } + func (rr *KEY) copy() RR { return &KEY{*rr.DNSKEY.copy().(*DNSKEY)} } + func (rr *KX) copy() RR { return &KX{rr.Hdr, rr.Preference, rr.Exchanger} } + func (rr *L32) copy() RR { - return &L32{rr.Hdr, rr.Preference, copyIP(rr.Locator32)} + return &L32{rr.Hdr, rr.Preference, cloneSlice(rr.Locator32)} } + func (rr *L64) copy() RR { return &L64{rr.Hdr, rr.Preference, rr.Locator64} } + func (rr *LOC) copy() RR { - return &LOC{rr.Hdr, rr.Version, rr.Size, rr.HorizPre, rr.VertPre, rr.Latitude, rr.Longitude, rr.Altitude} + return &LOC{ + rr.Hdr, + rr.Version, + rr.Size, + rr.HorizPre, + rr.VertPre, + rr.Latitude, + rr.Longitude, + rr.Altitude, + } } + func (rr *LP) copy() RR { return &LP{rr.Hdr, rr.Preference, rr.Fqdn} } + func (rr *MB) copy() RR { return &MB{rr.Hdr, rr.Mb} } + func (rr *MD) copy() RR { return &MD{rr.Hdr, rr.Md} } + func (rr *MF) copy() RR { return &MF{rr.Hdr, rr.Mf} } + func (rr *MG) copy() RR { return &MG{rr.Hdr, rr.Mg} } + func (rr *MINFO) copy() RR { return &MINFO{rr.Hdr, rr.Rmail, rr.Email} } + func (rr *MR) copy() RR { return &MR{rr.Hdr, rr.Mr} } + func (rr *MX) copy() RR { return &MX{rr.Hdr, rr.Preference, rr.Mx} } + func (rr *NAPTR) copy() RR { - return &NAPTR{rr.Hdr, rr.Order, rr.Preference, rr.Flags, rr.Service, rr.Regexp, rr.Replacement} + return &NAPTR{ + rr.Hdr, + rr.Order, + rr.Preference, + rr.Flags, + rr.Service, + rr.Regexp, + rr.Replacement, + } } + func (rr *NID) copy() RR { return &NID{rr.Hdr, rr.Preference, rr.NodeID} } + func (rr *NIMLOC) copy() RR { return &NIMLOC{rr.Hdr, rr.Locator} } + func (rr *NINFO) copy() RR { - ZSData := make([]string, len(rr.ZSData)) - copy(ZSData, rr.ZSData) - return &NINFO{rr.Hdr, ZSData} + return &NINFO{rr.Hdr, cloneSlice(rr.ZSData)} } + func (rr *NS) copy() RR { return &NS{rr.Hdr, rr.Ns} } + func (rr *NSAPPTR) copy() RR { return &NSAPPTR{rr.Hdr, rr.Ptr} } + func (rr *NSEC) copy() RR { - TypeBitMap := make([]uint16, len(rr.TypeBitMap)) - copy(TypeBitMap, rr.TypeBitMap) - return &NSEC{rr.Hdr, rr.NextDomain, TypeBitMap} + return &NSEC{rr.Hdr, rr.NextDomain, cloneSlice(rr.TypeBitMap)} } + func (rr *NSEC3) copy() RR { - TypeBitMap := make([]uint16, len(rr.TypeBitMap)) - copy(TypeBitMap, rr.TypeBitMap) - return &NSEC3{rr.Hdr, rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt, rr.HashLength, rr.NextDomain, TypeBitMap} + return &NSEC3{ + rr.Hdr, + rr.Hash, + rr.Flags, + rr.Iterations, + rr.SaltLength, + rr.Salt, + rr.HashLength, + rr.NextDomain, + cloneSlice(rr.TypeBitMap), + } } + func (rr *NSEC3PARAM) copy() RR { - return &NSEC3PARAM{rr.Hdr, rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt} + return &NSEC3PARAM{ + rr.Hdr, + rr.Hash, + rr.Flags, + rr.Iterations, + rr.SaltLength, + rr.Salt, + } } + func (rr *NULL) copy() RR { return &NULL{rr.Hdr, rr.Data} } + func (rr *OPENPGPKEY) copy() RR { return &OPENPGPKEY{rr.Hdr, rr.PublicKey} } + func (rr *OPT) copy() RR { Option := make([]EDNS0, len(rr.Option)) for i, e := range rr.Option { @@ -909,86 +1103,205 @@ func (rr *OPT) copy() RR { } return &OPT{rr.Hdr, Option} } + func (rr *PTR) copy() RR { return &PTR{rr.Hdr, rr.Ptr} } + func (rr *PX) copy() RR { - return &PX{rr.Hdr, rr.Preference, rr.Map822, rr.Mapx400} + return &PX{ + rr.Hdr, + rr.Preference, + rr.Map822, + rr.Mapx400, + } } + func (rr *RFC3597) copy() RR { return &RFC3597{rr.Hdr, rr.Rdata} } + func (rr *RKEY) copy() RR { - return &RKEY{rr.Hdr, rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey} + return &RKEY{ + rr.Hdr, + rr.Flags, + rr.Protocol, + rr.Algorithm, + rr.PublicKey, + } } + func (rr *RP) copy() RR { return &RP{rr.Hdr, rr.Mbox, rr.Txt} } + func (rr *RRSIG) copy() RR { - return &RRSIG{rr.Hdr, rr.TypeCovered, rr.Algorithm, rr.Labels, rr.OrigTtl, rr.Expiration, rr.Inception, rr.KeyTag, rr.SignerName, rr.Signature} + return &RRSIG{ + rr.Hdr, + rr.TypeCovered, + rr.Algorithm, + rr.Labels, + rr.OrigTtl, + rr.Expiration, + rr.Inception, + rr.KeyTag, + rr.SignerName, + rr.Signature, + } } + func (rr *RT) copy() RR { return &RT{rr.Hdr, rr.Preference, rr.Host} } + func (rr *SIG) copy() RR { return &SIG{*rr.RRSIG.copy().(*RRSIG)} } + func (rr *SMIMEA) copy() RR { - return &SMIMEA{rr.Hdr, rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate} + return &SMIMEA{ + rr.Hdr, + rr.Usage, + rr.Selector, + rr.MatchingType, + rr.Certificate, + } } + func (rr *SOA) copy() RR { - return &SOA{rr.Hdr, rr.Ns, rr.Mbox, rr.Serial, rr.Refresh, rr.Retry, rr.Expire, rr.Minttl} + return &SOA{ + rr.Hdr, + rr.Ns, + rr.Mbox, + rr.Serial, + rr.Refresh, + rr.Retry, + rr.Expire, + rr.Minttl, + } } + func (rr *SPF) copy() RR { - Txt := make([]string, len(rr.Txt)) - copy(Txt, rr.Txt) - return &SPF{rr.Hdr, Txt} + return &SPF{rr.Hdr, cloneSlice(rr.Txt)} } + func (rr *SRV) copy() RR { - return &SRV{rr.Hdr, rr.Priority, rr.Weight, rr.Port, rr.Target} + return &SRV{ + rr.Hdr, + rr.Priority, + rr.Weight, + rr.Port, + rr.Target, + } } + func (rr *SSHFP) copy() RR { - return &SSHFP{rr.Hdr, rr.Algorithm, rr.Type, rr.FingerPrint} + return &SSHFP{ + rr.Hdr, + rr.Algorithm, + rr.Type, + rr.FingerPrint, + } } + func (rr *SVCB) copy() RR { Value := make([]SVCBKeyValue, len(rr.Value)) for i, e := range rr.Value { Value[i] = e.copy() } - return &SVCB{rr.Hdr, rr.Priority, rr.Target, Value} + return &SVCB{ + rr.Hdr, + rr.Priority, + rr.Target, + Value, + } } + func (rr *TA) copy() RR { - return &TA{rr.Hdr, rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest} + return &TA{ + rr.Hdr, + rr.KeyTag, + rr.Algorithm, + rr.DigestType, + rr.Digest, + } } + func (rr *TALINK) copy() RR { return &TALINK{rr.Hdr, rr.PreviousName, rr.NextName} } + func (rr *TKEY) copy() RR { - return &TKEY{rr.Hdr, rr.Algorithm, rr.Inception, rr.Expiration, rr.Mode, rr.Error, rr.KeySize, rr.Key, rr.OtherLen, rr.OtherData} + return &TKEY{ + rr.Hdr, + rr.Algorithm, + rr.Inception, + rr.Expiration, + rr.Mode, + rr.Error, + rr.KeySize, + rr.Key, + rr.OtherLen, + rr.OtherData, + } } + func (rr *TLSA) copy() RR { - return &TLSA{rr.Hdr, rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate} + return &TLSA{ + rr.Hdr, + rr.Usage, + rr.Selector, + rr.MatchingType, + rr.Certificate, + } } + func (rr *TSIG) copy() RR { - return &TSIG{rr.Hdr, rr.Algorithm, rr.TimeSigned, rr.Fudge, rr.MACSize, rr.MAC, rr.OrigId, rr.Error, rr.OtherLen, rr.OtherData} + return &TSIG{ + rr.Hdr, + rr.Algorithm, + rr.TimeSigned, + rr.Fudge, + rr.MACSize, + rr.MAC, + rr.OrigId, + rr.Error, + rr.OtherLen, + rr.OtherData, + } } + func (rr *TXT) copy() RR { - Txt := make([]string, len(rr.Txt)) - copy(Txt, rr.Txt) - return &TXT{rr.Hdr, Txt} + return &TXT{rr.Hdr, cloneSlice(rr.Txt)} } + func (rr *UID) copy() RR { return &UID{rr.Hdr, rr.Uid} } + func (rr *UINFO) copy() RR { return &UINFO{rr.Hdr, rr.Uinfo} } + func (rr *URI) copy() RR { - return &URI{rr.Hdr, rr.Priority, rr.Weight, rr.Target} + return &URI{ + rr.Hdr, + rr.Priority, + rr.Weight, + rr.Target, + } } + func (rr *X25) copy() RR { return &X25{rr.Hdr, rr.PSDNAddress} } + func (rr *ZONEMD) copy() RR { - return &ZONEMD{rr.Hdr, rr.Serial, rr.Scheme, rr.Hash, rr.Digest} + return &ZONEMD{ + rr.Hdr, + rr.Serial, + rr.Scheme, + rr.Hash, + rr.Digest, + } } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter/README.md index 83221bca0..e80b62911 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter/README.md +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter/README.md @@ -30,8 +30,8 @@ The following exporter configuration parameters are supported. |:---------------------------------------------|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| ------- | | `log_group_name` | Customized log group name which supports `{ClusterName}` and `{TaskId}` placeholders. One valid example is `/aws/metrics/{ClusterName}`. It will search for `ClusterName` (or `aws.ecs.cluster.name`) resource attribute in the metrics data and replace with the actual cluster name. If none of them are found in the resource attribute map, `{ClusterName}` will be replaced by `undefined`. Similar way, for the `{TaskId}`, it searches for `TaskId` (or `aws.ecs.task.id`) key in the resource attribute map. For `{NodeName}`, it searches for `NodeName` (or `k8s.node.name`) |"/metrics/default"| | `log_stream_name` | Customized log stream name which supports `{TaskId}`, `{ClusterName}`, `{NodeName}`, `{ContainerInstanceId}`, and `{TaskDefinitionFamily}` placeholders. One valid example is `{TaskId}`. It will search for `TaskId` (or `aws.ecs.task.id`) resource attribute in the metrics data and replace with the actual task id. If none of them are found in the resource attribute map, `{TaskId}` will be replaced by `undefined`. Similarly, for the `{TaskDefinitionFamily}`, it searches for `TaskDefinitionFamily` (or `aws.ecs.task.family`). For the `{ClusterName}`, it searches for `ClusterName` (or `aws.ecs.cluster.name`). For `{NodeName}`, it searches for `NodeName` (or `k8s.node.name`). For `{ContainerInstanceId}`, it searches for `ContainerInstanceId` (or `aws.ecs.container.instance.id`). (Note: ContainerInstanceId (or `aws.ecs.container.instance.id`) only works for AWS ECS EC2 launch type. |"otel-stream"| -| `log_retention` | LogRetention is the option to set the log retention policy for only newly created CloudWatch Log Groups. Defaults to Never Expire if not specified or set to 0. Possible values for retention in days are 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 2192, 2557, 2922, 3288, or 3653. |"Never Expire"| -| `tags` | Tags is the option to set tags for the CloudWatch Log Group. If specified, please add at most 50 tags. Input is a string to string map like so: { 'key': 'value' }. Keys must be between 1-128 characters and follow the regex pattern: `^([\p{L}\p{Z}\p{N}_.:/=+\-@]+)$`(alphanumerics, whitespace, and _.:/=+-!). Values must be between 1-256 characters and follow the regex pattern: `^([\p{L}\p{Z}\p{N}_.:/=+\-@]*)$`(alphanumerics, whitespace, and _.:/=+-!). [Link to tagging restrictions](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_CreateLogGroup.html#:~:text=Required%3A%20Yes-,tags,-The%20key%2Dvalue) | No tags set | +| `log_retention` | LogRetention is the option to set the log retention policy for only newly created CloudWatch Log Groups. Defaults to Never Expire if not specified or set to 0. Possible values for retention in days are 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 2192, 2557, 2922, 3288, or 3653. |"Never Expire"| +| `tags` | Tags is the option to set tags for the CloudWatch Log Group. If specified, please add at most 50 tags. Input is a string to string map like so: { 'key': 'value' }. Keys must be between 1-128 characters and follow the regex pattern: `^([\p{L}\p{Z}\p{N}_.:/=+\-@]+)$`(alphanumerics, whitespace, and _.:/=+-!). Values must be between 1-256 characters and follow the regex pattern: `^([\p{L}\p{Z}\p{N}_.:/=+\-@]*)$`(alphanumerics, whitespace, and _.:/=+-!). [Link to tagging restrictions](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_CreateLogGroup.html#:~:text=Required%3A%20Yes-,tags,-The%20key%2Dvalue) | No tags set | | `namespace` | Customized CloudWatch metrics namespace | "default" | | `endpoint` | Optionally override the default CloudWatch service endpoint. | | | `no_verify_ssl` | Enable or disable TLS certificate verification. | false | @@ -39,10 +39,10 @@ The following exporter configuration parameters are supported. | `region` | Send Structured Logs to AWS CloudWatch in a specific region. If this field is not present in config, environment variable "AWS_REGION" can then be used to set region. | determined by metadata | | `role_arn` | IAM role to upload segments to a different account. | | | `max_retries` | Maximum number of retries before abandoning an attempt to post data. | 1 | -| `dimension_rollup_option` | DimensionRollupOption is the option for metrics dimension rollup. Three options are available: `NoDimensionRollup`, `SingleDimensionRollupOnly` and `ZeroAndSingleDimensionRollup` |"ZeroAndSingleDimensionRollup" (Enable both zero dimension rollup and single dimension rollup)| +| `dimension_rollup_option` | DimensionRollupOption is the option for metrics dimension rollup. Three options are available: `NoDimensionRollup`, `SingleDimensionRollupOnly` and `ZeroAndSingleDimensionRollup`. The default value is `ZeroAndSingleDimensionRollup`. Enabling feature gate `awsemf.nodimrollupdefault` will set default to `NoDimensionRollup`. |"ZeroAndSingleDimensionRollup" (Enable both zero dimension rollup and single dimension rollup)| | `resource_to_telemetry_conversion` | "resource_to_telemetry_conversion" is the option for converting resource attributes to telemetry attributes. It has only one config onption- `enabled`. For metrics, if `enabled=true`, all the resource attributes will be converted to metric labels by default. See `Resource Attributes to Metric Labels` section below for examples. | `enabled=false` | | `output_destination` | "output_destination" is an option to specify the EMFExporter output. Currently, two options are available. "cloudwatch" or "stdout" | `cloudwatch` | -| `detailed_metrics` | Retain detailed datapoint values in exported metrics (e.g instead of exporting a quantile as a statistical value, preserve the quantile's population) | `false` | +| `detailed_metrics` | Retain detailed datapoint values in exported metrics (e.g instead of exporting a quantile as a statistical value, preserve the quantile's population) | `false` | | `parse_json_encoded_attr_values` | List of attribute keys whose corresponding values are JSON-encoded strings and will be converted to JSON structures in emf logs. For example, the attribute string value "{\\"x\\":5,\\"y\\":6}" will be converted to a json object: ```{"x": 5, "y": 6}``` | [ ] | | [`metric_declarations`](#metric_declaration) | List of rules for filtering exported metrics and their dimensions. | [ ] | | [`metric_descriptors`](#metric_descriptor) | List of rules for inserting or updating metric descriptors. | [ ] | diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter/datapoint.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter/datapoint.go index a3b1ccdd0..4aba2c5bb 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter/datapoint.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter/datapoint.go @@ -22,10 +22,10 @@ const ( summarySumSuffix = "_sum" ) -var ( - deltaMetricCalculator = aws.NewFloat64DeltaCalculator() - summaryMetricCalculator = aws.NewMetricCalculator(calculateSummaryDelta) -) +type emfCalculators struct { + delta aws.MetricCalculator + summary aws.MetricCalculator +} func calculateSummaryDelta(prev *aws.MetricValue, val interface{}, _ time.Time) (interface{}, bool) { metricEntry := val.(summaryMetricEntry) @@ -60,7 +60,7 @@ type dataPoints interface { // dataPoint: the adjusted data point // retained: indicates whether the data point is valid for further process // NOTE: It is an expensive call as it calculates the metric value. - CalculateDeltaDatapoints(i int, instrumentationScopeName string, detailedMetrics bool) (dataPoint []dataPoint, retained bool) + CalculateDeltaDatapoints(i int, instrumentationScopeName string, detailedMetrics bool, calculators *emfCalculators) (dataPoint []dataPoint, retained bool) } // deltaMetricMetadata contains the metadata required to perform rate/delta calculation @@ -106,7 +106,7 @@ type summaryMetricEntry struct { } // CalculateDeltaDatapoints retrieves the NumberDataPoint at the given index and performs rate/delta calculation if necessary. -func (dps numberDataPointSlice) CalculateDeltaDatapoints(i int, instrumentationScopeName string, _ bool) ([]dataPoint, bool) { +func (dps numberDataPointSlice) CalculateDeltaDatapoints(i int, instrumentationScopeName string, _ bool, calculators *emfCalculators) ([]dataPoint, bool) { metric := dps.NumberDataPointSlice.At(i) labels := createLabels(metric.Attributes(), instrumentationScopeName) timestampMs := unixNanoToMilliseconds(metric.Timestamp()) @@ -124,7 +124,7 @@ func (dps numberDataPointSlice) CalculateDeltaDatapoints(i int, instrumentationS if dps.adjustToDelta { var deltaVal interface{} mKey := aws.NewKey(dps.deltaMetricMetadata, labels) - deltaVal, retained = deltaMetricCalculator.Calculate(mKey, metricVal, metric.Timestamp().AsTime()) + deltaVal, retained = calculators.delta.Calculate(mKey, metricVal, metric.Timestamp().AsTime()) // If a delta to the previous data point could not be computed use the current metric value instead if !retained && dps.retainInitialValueForDelta { @@ -146,7 +146,7 @@ func (dps numberDataPointSlice) CalculateDeltaDatapoints(i int, instrumentationS } // CalculateDeltaDatapoints retrieves the HistogramDataPoint at the given index. -func (dps histogramDataPointSlice) CalculateDeltaDatapoints(i int, instrumentationScopeName string, _ bool) ([]dataPoint, bool) { +func (dps histogramDataPointSlice) CalculateDeltaDatapoints(i int, instrumentationScopeName string, _ bool, _ *emfCalculators) ([]dataPoint, bool) { metric := dps.HistogramDataPointSlice.At(i) labels := createLabels(metric.Attributes(), instrumentationScopeName) timestamp := unixNanoToMilliseconds(metric.Timestamp()) @@ -165,7 +165,7 @@ func (dps histogramDataPointSlice) CalculateDeltaDatapoints(i int, instrumentati } // CalculateDeltaDatapoints retrieves the ExponentialHistogramDataPoint at the given index. -func (dps exponentialHistogramDataPointSlice) CalculateDeltaDatapoints(idx int, instrumentationScopeName string, _ bool) ([]dataPoint, bool) { +func (dps exponentialHistogramDataPointSlice) CalculateDeltaDatapoints(idx int, instrumentationScopeName string, _ bool, _ *emfCalculators) ([]dataPoint, bool) { metric := dps.ExponentialHistogramDataPointSlice.At(idx) scale := metric.Scale() @@ -247,7 +247,7 @@ func (dps exponentialHistogramDataPointSlice) CalculateDeltaDatapoints(idx int, } // CalculateDeltaDatapoints retrieves the SummaryDataPoint at the given index and perform calculation with sum and count while retain the quantile value. -func (dps summaryDataPointSlice) CalculateDeltaDatapoints(i int, instrumentationScopeName string, detailedMetrics bool) ([]dataPoint, bool) { +func (dps summaryDataPointSlice) CalculateDeltaDatapoints(i int, instrumentationScopeName string, detailedMetrics bool, calculators *emfCalculators) ([]dataPoint, bool) { metric := dps.SummaryDataPointSlice.At(i) labels := createLabels(metric.Attributes(), instrumentationScopeName) timestampMs := unixNanoToMilliseconds(metric.Timestamp()) @@ -261,7 +261,7 @@ func (dps summaryDataPointSlice) CalculateDeltaDatapoints(i int, instrumentation if dps.adjustToDelta { var delta interface{} mKey := aws.NewKey(dps.deltaMetricMetadata, labels) - delta, retained = summaryMetricCalculator.Calculate(mKey, summaryMetricEntry{sum, count}, metric.Timestamp().AsTime()) + delta, retained = calculators.summary.Calculate(mKey, summaryMetricEntry{sum, count}, metric.Timestamp().AsTime()) // If a delta to the previous data point could not be computed use the current metric value instead if !retained && dps.retainInitialValueForDelta { @@ -333,6 +333,7 @@ func getDataPoints(pmd pmetric.Metric, metadata cWMetricMetadata, logger *zap.Lo var dps dataPoints + //exhaustive:enforce switch pmd.Type() { case pmetric.MetricTypeGauge: metric := pmd.Gauge() diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter/emf_exporter.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter/emf_exporter.go index f861c026e..97de506a8 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter/emf_exporter.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter/emf_exporter.go @@ -71,6 +71,10 @@ func newEmfExporter(config *Config, set exporter.CreateSettings) (*emfExporter, pusherMap: map[cwlogs.PusherKey]cwlogs.Pusher{}, } + config.logger.Warn("the default value for DimensionRollupOption will be changing to NoDimensionRollup" + + "in a future release. See https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/23997 for more" + + "information") + return emfExporter, nil } @@ -177,7 +181,7 @@ func (emf *emfExporter) shutdown(_ context.Context) error { } } - return nil + return emf.metricTranslator.Shutdown() } func wrapErrorIfBadRequest(err error) error { diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter/factory.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter/factory.go index 8741cb771..d0cf78a5d 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter/factory.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter/factory.go @@ -9,6 +9,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/exporter" "go.opentelemetry.io/collector/exporter/exporterhelper" + "go.opentelemetry.io/collector/featuregate" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter/internal/metadata" @@ -16,6 +17,11 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry" ) +var defaultNoRollupfg = featuregate.GlobalRegistry().MustRegister("awsemf.nodimrollupdefault", featuregate.StageAlpha, + featuregate.WithRegisterFromVersion("v0.83.0"), + featuregate.WithRegisterDescription("Changes the default AWS EMF Exporter Dimension rollup option to "+ + "NoDimensionRollup")) + // NewFactory creates a factory for AWS EMF exporter. func NewFactory() exporter.Factory { return exporter.NewFactory( @@ -26,12 +32,18 @@ func NewFactory() exporter.Factory { // CreateDefaultConfig creates the default configuration for exporter. func createDefaultConfig() component.Config { + var defaultDimensionRollupOption string + if defaultNoRollupfg.IsEnabled() { + defaultDimensionRollupOption = "NoDimensionRollup" + } else { + defaultDimensionRollupOption = "ZeroAndSingleDimensionRollup" + } return &Config{ AWSSessionSettings: awsutil.CreateDefaultSessionConfig(), LogGroupName: "", LogStreamName: "", Namespace: "", - DimensionRollupOption: "ZeroAndSingleDimensionRollup", + DimensionRollupOption: defaultDimensionRollupOption, Version: "1", RetainInitialValueOfDeltaMetric: false, OutputDestination: "cloudwatch", diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter/grouped_metric.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter/grouped_metric.go index 6161113f6..f26491218 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter/grouped_metric.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter/grouped_metric.go @@ -27,7 +27,7 @@ type metricInfo struct { } // addToGroupedMetric processes OT metrics and adds them into GroupedMetric buckets -func addToGroupedMetric(pmd pmetric.Metric, groupedMetrics map[interface{}]*groupedMetric, metadata cWMetricMetadata, patternReplaceSucceeded bool, logger *zap.Logger, descriptor map[string]MetricDescriptor, config *Config) error { +func addToGroupedMetric(pmd pmetric.Metric, groupedMetrics map[interface{}]*groupedMetric, metadata cWMetricMetadata, patternReplaceSucceeded bool, logger *zap.Logger, descriptor map[string]MetricDescriptor, config *Config, calculators *emfCalculators) error { dps := getDataPoints(pmd, metadata, logger) if dps == nil || dps.Len() == 0 { @@ -35,7 +35,7 @@ func addToGroupedMetric(pmd pmetric.Metric, groupedMetrics map[interface{}]*grou } for i := 0; i < dps.Len(); i++ { - dps, retained := dps.CalculateDeltaDatapoints(i, metadata.instrumentationScopeName, config.DetailedMetrics) + dps, retained := dps.CalculateDeltaDatapoints(i, metadata.instrumentationScopeName, config.DetailedMetrics, calculators) if !retained { continue } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter/metric_translator.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter/metric_translator.go index e1fd70a5b..2c3b817be 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter/metric_translator.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter/metric_translator.go @@ -10,9 +10,11 @@ import ( "time" "go.opentelemetry.io/collector/pdata/pmetric" + "go.uber.org/multierr" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/cwlogs" + aws "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/metrics" ) const ( @@ -85,6 +87,7 @@ type cWMetricMetadata struct { type metricTranslator struct { metricDescriptor map[string]MetricDescriptor + calculators *emfCalculators } func newMetricTranslator(config Config) metricTranslator { @@ -94,15 +97,27 @@ func newMetricTranslator(config Config) metricTranslator { } return metricTranslator{ metricDescriptor: mt, + calculators: &emfCalculators{ + delta: aws.NewFloat64DeltaCalculator(), + summary: aws.NewMetricCalculator(calculateSummaryDelta), + }, } } +func (mt metricTranslator) Shutdown() error { + var errs error + errs = multierr.Append(errs, mt.calculators.delta.Shutdown()) + errs = multierr.Append(errs, mt.calculators.summary.Shutdown()) + return errs +} + // translateOTelToGroupedMetric converts OT metrics to Grouped Metric format. func (mt metricTranslator) translateOTelToGroupedMetric(rm pmetric.ResourceMetrics, groupedMetrics map[interface{}]*groupedMetric, config *Config) error { timestamp := time.Now().UnixNano() / int64(time.Millisecond) var instrumentationScopeName string cWNamespace := getNamespace(rm, config.Namespace) logGroup, logStream, patternReplaceSucceeded := getLogInfo(rm, cWNamespace, config) + deltaInitialValue := config.RetainInitialValueOfDeltaMetric ilms := rm.ScopeMetrics() var metricReceiver string @@ -120,16 +135,17 @@ func (mt metricTranslator) translateOTelToGroupedMetric(rm pmetric.ResourceMetri metric := metrics.At(k) metadata := cWMetricMetadata{ groupedMetricMetadata: groupedMetricMetadata{ - namespace: cWNamespace, - timestampMs: timestamp, - logGroup: logGroup, - logStream: logStream, - metricDataType: metric.Type(), + namespace: cWNamespace, + timestampMs: timestamp, + logGroup: logGroup, + logStream: logStream, + metricDataType: metric.Type(), + retainInitialValueForDelta: deltaInitialValue, }, instrumentationScopeName: instrumentationScopeName, receiver: metricReceiver, } - err := addToGroupedMetric(metric, groupedMetrics, metadata, patternReplaceSucceeded, config.logger, mt.metricDescriptor, config) + err := addToGroupedMetric(metric, groupedMetrics, metadata, patternReplaceSucceeded, config.logger, mt.metricDescriptor, config, mt.calculators) if err != nil { return err } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter/internal/translator/cause.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter/internal/translator/cause.go index 477861a8a..b5d477a0e 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter/internal/translator/cause.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter/internal/translator/cause.go @@ -49,6 +49,10 @@ func makeCause(span ptrace.Span, attributes map[string]pcommon.Value, resource p if val, ok := resource.Attributes().Get(conventions.AttributeTelemetrySDKLanguage); ok { language = val.Str() } + isRemote := false + if span.Kind() == ptrace.SpanKindClient || span.Kind() == ptrace.SpanKindProducer { + isRemote = true + } var exceptions []awsxray.Exception for i := 0; i < span.Events().Len(); i++ { @@ -70,7 +74,7 @@ func makeCause(span ptrace.Span, attributes map[string]pcommon.Value, resource p stacktrace = val.Str() } - parsed := parseException(exceptionType, message, stacktrace, language) + parsed := parseException(exceptionType, message, stacktrace, isRemote, language) exceptions = append(exceptions, parsed...) } } @@ -117,24 +121,35 @@ func makeCause(span ptrace.Span, attributes map[string]pcommon.Value, resource p val, ok := span.Attributes().Get(conventions.AttributeHTTPStatusCode) switch { - case status.Code() != ptrace.StatusCodeError: - isError = false - isThrottle = false - isFault = false + // The segment status for http spans will be based on their http.statuscode as we found some http + // spans does not fill with status.Code() but always filled with http.statuscode case ok: code := val.Int() // We only differentiate between faults (server errors) and errors (client errors) for HTTP spans. - if code >= 400 && code <= 499 { + switch { + case code >= 400 && code <= 499: isError = true isFault = false if code == 429 { isThrottle = true } - } else { + case code >= 500 && code <= 599: + isError = false + isThrottle = false + isFault = true + case status.Code() == ptrace.StatusCodeError: isError = false isThrottle = false isFault = true + default: + isError = false + isThrottle = false + isFault = false } + case status.Code() != ptrace.StatusCodeError: + isError = false + isThrottle = false + isFault = false default: isError = false isThrottle = false @@ -144,12 +159,13 @@ func makeCause(span ptrace.Span, attributes map[string]pcommon.Value, resource p return isError, isFault, isThrottle, filtered, cause } -func parseException(exceptionType string, message string, stacktrace string, language string) []awsxray.Exception { +func parseException(exceptionType string, message string, stacktrace string, isRemote bool, language string) []awsxray.Exception { exceptions := make([]awsxray.Exception, 0, 1) segmentID := newSegmentID() exceptions = append(exceptions, awsxray.Exception{ ID: aws.String(hex.EncodeToString(segmentID[:])), Type: aws.String(exceptionType), + Remote: aws.Bool(isRemote), Message: aws.String(message), }) @@ -181,6 +197,7 @@ func fillJavaStacktrace(stacktrace string, exceptions []awsxray.Exception) []aws // Skip first line containing top level message exception := &exceptions[0] + isRemote := exception.Remote _, err := r.ReadLine() if err != nil { return exceptions @@ -248,6 +265,7 @@ func fillJavaStacktrace(stacktrace string, exceptions []awsxray.Exception) []aws exceptions = append(exceptions, awsxray.Exception{ ID: aws.String(hex.EncodeToString(segmentID[:])), Type: aws.String(causeType), + Remote: isRemote, Message: aws.String(causeMessage), Stack: nil, }) @@ -287,6 +305,7 @@ func fillPythonStacktrace(stacktrace string, exceptions []awsxray.Exception) []a } line := lines[lineIdx] exception := &exceptions[0] + isRemote := exception.Remote exception.Stack = nil for { @@ -344,6 +363,7 @@ func fillPythonStacktrace(stacktrace string, exceptions []awsxray.Exception) []a exceptions = append(exceptions, awsxray.Exception{ ID: aws.String(hex.EncodeToString(segmentID[:])), Type: aws.String(causeType), + Remote: isRemote, Message: aws.String(causeMessage), }) // when append causes `exceptions` to outgrow its existing diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/README.md index 9d760c14e..94de8e364 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/README.md +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/README.md @@ -7,7 +7,7 @@ | | [beta]: traces, metrics | | Distributions | [contrib], [aws], [observiq] | | Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Aexporter%2Fdatadog%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Aexporter%2Fdatadog) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Aexporter%2Fdatadog%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Aexporter%2Fdatadog) | -| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@mx-psi](https://www.github.com/mx-psi), [@gbbr](https://www.github.com/gbbr), [@dineshg13](https://www.github.com/dineshg13), [@liustanley](https://www.github.com/liustanley), [@songy23](https://www.github.com/songy23) | +| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@mx-psi](https://www.github.com/mx-psi), [@gbbr](https://www.github.com/gbbr), [@dineshg13](https://www.github.com/dineshg13), [@liustanley](https://www.github.com/liustanley), [@songy23](https://www.github.com/songy23), [@mackjmr](https://www.github.com/mackjmr) | [alpha]: https://github.com/open-telemetry/opentelemetry-collector#alpha [beta]: https://github.com/open-telemetry/opentelemetry-collector#beta diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/factory.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/factory.go index df0423b26..7beee5bdc 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/factory.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/factory.go @@ -275,6 +275,8 @@ func (f *factory) createMetricsExporter( exporterhelper.WithTimeout(exporterhelper.TimeoutSettings{Timeout: 0 * time.Second}), // We use our own custom mechanism for retries, since we hit several endpoints. exporterhelper.WithRetry(exporterhelper.RetrySettings{Enabled: false}), + // The metrics remapping code mutates data + exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: true}), exporterhelper.WithQueue(cfg.QueueSettings), exporterhelper.WithShutdown(func(context.Context) error { cancel() diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/hostmetadata/provider/provider.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/hostmetadata/provider/provider.go index 7cf09ed04..c713309cb 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/hostmetadata/provider/provider.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/hostmetadata/provider/provider.go @@ -37,32 +37,28 @@ func (p *chainProvider) Source(ctx context.Context) (source.Source, error) { for i, source := range p.priorityList { provider := p.providers[source] replies[i] = make(chan reply) - - go func(i int, source string) { - zapProvider := zap.String("provider", source) - p.logger.Debug("Trying out source provider", zapProvider) - + p.logger.Debug("Trying out source provider", zap.String("provider", source)) + go func(i int) { src, err := provider.Source(ctx) - if err != nil { - p.logger.Debug("Unavailable source provider", zapProvider, zap.Error(err)) - } - replies[i] <- reply{src: src, err: err} - }(i, source) + }(i) } // Check provider responses in order to ensure priority for i, ch := range replies { - reply := <-ch - if reply.err != nil { - // Provider was unavailable, error was logged on goroutine - continue - } + zapProvider := zap.String("provider", p.priorityList[i]) + select { + case <-ctx.Done(): + return source.Source{}, fmt.Errorf("context was cancelled: %w", ctx.Err()) + case reply := <-ch: + if reply.err != nil { + p.logger.Debug("Unavailable source provider", zapProvider, zap.Error(reply.err)) + continue + } - p.logger.Info("Resolved source", - zap.String("provider", p.priorityList[i]), zap.Any("source", reply.src), - ) - return reply.src, nil + p.logger.Info("Resolved source", zapProvider, zap.Any("source", reply.src)) + return reply.src, nil + } } return source.Source{}, fmt.Errorf("no source provider was available") diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/metrics/consumer.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/metrics/consumer.go index 18bf67868..0f7ad4602 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/metrics/consumer.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/metrics/consumer.go @@ -6,7 +6,7 @@ package metrics // import "github.com/open-telemetry/opentelemetry-collector-con import ( "context" - "github.com/DataDog/datadog-agent/pkg/trace/pb" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-api-client-go/v2/api/datadog" "github.com/DataDog/datadog-api-client-go/v2/api/datadogV2" "github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics" @@ -26,7 +26,7 @@ var _ metrics.APMStatsConsumer = (*Consumer)(nil) type Consumer struct { ms []datadogV2.MetricSeries sl sketches.SketchSeriesList - as []pb.ClientStatsPayload + as []*pb.ClientStatsPayload seenHosts map[string]struct{} seenTags map[string]struct{} } @@ -80,7 +80,7 @@ func (c *Consumer) runningMetrics(timestamp uint64, buildInfo component.BuildInf } // All gets all metrics (consumed metrics and running metrics). -func (c *Consumer) All(timestamp uint64, buildInfo component.BuildInfo, tags []string, metadata metrics.Metadata) ([]datadogV2.MetricSeries, sketches.SketchSeriesList, []pb.ClientStatsPayload) { +func (c *Consumer) All(timestamp uint64, buildInfo component.BuildInfo, tags []string, metadata metrics.Metadata) ([]datadogV2.MetricSeries, sketches.SketchSeriesList, []*pb.ClientStatsPayload) { series := c.ms series = append(series, c.runningMetrics(timestamp, buildInfo, metadata)...) if len(tags) == 0 { @@ -99,7 +99,7 @@ func (c *Consumer) All(timestamp uint64, buildInfo component.BuildInfo, tags []s } // ConsumeAPMStats implements metrics.APMStatsConsumer. -func (c *Consumer) ConsumeAPMStats(s pb.ClientStatsPayload) { +func (c *Consumer) ConsumeAPMStats(s *pb.ClientStatsPayload) { c.as = append(c.as, s) } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/metrics/consumer_deprecated.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/metrics/consumer_deprecated.go index 72f291cc9..a22db4b06 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/metrics/consumer_deprecated.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/metrics/consumer_deprecated.go @@ -6,7 +6,7 @@ package metrics // import "github.com/open-telemetry/opentelemetry-collector-con import ( "context" - "github.com/DataDog/datadog-agent/pkg/trace/pb" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics" "github.com/DataDog/opentelemetry-mapping-go/pkg/quantile" "go.opentelemetry.io/collector/component" @@ -25,7 +25,7 @@ var _ metrics.APMStatsConsumer = (*ZorkianConsumer)(nil) type ZorkianConsumer struct { ms []zorkian.Metric sl sketches.SketchSeriesList - as []pb.ClientStatsPayload + as []*pb.ClientStatsPayload seenHosts map[string]struct{} seenTags map[string]struct{} } @@ -72,7 +72,7 @@ func (c *ZorkianConsumer) runningMetrics(timestamp uint64, buildInfo component.B } // All gets all metrics (consumed metrics and running metrics). -func (c *ZorkianConsumer) All(timestamp uint64, buildInfo component.BuildInfo, tags []string) ([]zorkian.Metric, sketches.SketchSeriesList, []pb.ClientStatsPayload) { +func (c *ZorkianConsumer) All(timestamp uint64, buildInfo component.BuildInfo, tags []string) ([]zorkian.Metric, sketches.SketchSeriesList, []*pb.ClientStatsPayload) { series := c.ms series = append(series, c.runningMetrics(timestamp, buildInfo)...) if len(tags) == 0 { @@ -91,7 +91,7 @@ func (c *ZorkianConsumer) All(timestamp uint64, buildInfo component.BuildInfo, t } // ConsumeAPMStats implements metrics.APMStatsConsumer. -func (c *ZorkianConsumer) ConsumeAPMStats(s pb.ClientStatsPayload) { +func (c *ZorkianConsumer) ConsumeAPMStats(s *pb.ClientStatsPayload) { c.as = append(c.as, s) } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/logs_exporter.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/logs_exporter.go index 94881e9bb..ae775a78e 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/logs_exporter.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/logs_exporter.go @@ -22,6 +22,9 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/scrub" ) +// otelTag specifies a tag to be added to all logs sent from the Datadog exporter +const otelTag = "otel_source:datadog_exporter" + type logsExporter struct { params exporter.CreateSettings cfg *Config @@ -101,7 +104,7 @@ func (exp *logsExporter) consumeLogs(_ context.Context, ld plog.Logs) (err error } rsl := ld.ResourceLogs() - var payload []datadogV2.HTTPLogItem + var payloads []datadogV2.HTTPLogItem // Iterate over resource logs for i := 0; i < rsl.Len(); i++ { rl := rsl.At(i) @@ -113,9 +116,16 @@ func (exp *logsExporter) consumeLogs(_ context.Context, ld plog.Logs) (err error // iterate over Logs for k := 0; k < lsl.Len(); k++ { log := lsl.At(k) - payload = append(payload, logsmapping.Transform(log, res, exp.params.Logger)) + payload := logsmapping.Transform(log, res, exp.params.Logger) + ddtags := payload.GetDdtags() + if ddtags != "" { + payload.SetDdtags(ddtags + "," + otelTag) + } else { + payload.SetDdtags(otelTag) + } + payloads = append(payloads, payload) } } } - return exp.sender.SubmitLogs(exp.ctx, payload) + return exp.sender.SubmitLogs(exp.ctx, payloads) } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/metadata.yaml b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/metadata.yaml index 7d9fad584..474bdcade 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/metadata.yaml +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/metadata.yaml @@ -7,4 +7,4 @@ status: beta: [traces, metrics] distributions: [contrib, aws, observiq] codeowners: - active: [mx-psi, gbbr, dineshg13, liustanley, songy23] + active: [mx-psi, gbbr, dineshg13, liustanley, songy23, mackjmr] diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/metrics_exporter.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/metrics_exporter.go index 67a2c1dde..8f2445e51 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/metrics_exporter.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/metrics_exporter.go @@ -11,8 +11,8 @@ import ( "sync" "time" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/api" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/datadog-api-client-go/v2/api/datadogV2" "github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata" "github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes/source" @@ -219,7 +219,7 @@ func (exp *metricsExporter) PushMetricsData(ctx context.Context, md pmetric.Metr } var sl sketches.SketchSeriesList - var sp []pb.ClientStatsPayload + var sp []*pb.ClientStatsPayload if isMetricExportV2Enabled() { var ms []datadogV2.MetricSeries ms, sl, sp = consumer.(*metrics.Consumer).All(exp.getPushTime(), exp.params.BuildInfo, tags, metadata) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/dynatraceexporter/internal/serialization/serialization.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/dynatraceexporter/internal/serialization/serialization.go index 582b33048..a3f3b1b90 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/dynatraceexporter/internal/serialization/serialization.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/dynatraceexporter/internal/serialization/serialization.go @@ -19,7 +19,7 @@ func SerializeMetric(logger *zap.Logger, prefix string, metric pmetric.Metric, d ce := logger.Check(zap.DebugLevel, "SerializeMetric") var points int - + //exhaustive:enforce switch metric.Type() { case pmetric.MetricTypeGauge: metricLines = serializeGauge(logger, prefix, metric, defaultDimensions, staticDimensions, metricLines) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/README.md index b9612e896..d2a9d40a1 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/README.md +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/README.md @@ -86,8 +86,8 @@ The following settings can be optionally configured: - `requests_per_second` is the average number of requests per seconds. - `producer` - `max_message_bytes` (default = 1000000) the maximum permitted size of a message in bytes - - `required_acks` (default = 1) controls when a message is regarded as transmitted. https://pkg.go.dev/github.com/Shopify/sarama@v1.30.0#RequiredAcks - - `compression` (default = 'none') the compression used when producing messages to kafka. The options are: `none`, `gzip`, `snappy`, `lz4`, and `zstd` https://pkg.go.dev/github.com/Shopify/sarama@v1.30.0#CompressionCodec + - `required_acks` (default = 1) controls when a message is regarded as transmitted. https://pkg.go.dev/github.com/IBM/sarama@v1.30.0#RequiredAcks + - `compression` (default = 'none') the compression used when producing messages to kafka. The options are: `none`, `gzip`, `snappy`, `lz4`, and `zstd` https://pkg.go.dev/github.com/IBM/sarama@v1.30.0#CompressionCodec - `flush_max_messages` (default = 0) The maximum number of messages the producer will send in a single broker request. Example configuration: diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/authentication.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/authentication.go index b4ea9540b..51861f026 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/authentication.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/authentication.go @@ -8,7 +8,7 @@ import ( "crypto/sha512" "fmt" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "go.opentelemetry.io/collector/config/configtls" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/internal/awsmsk" diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/config.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/config.go index 67851a148..4ddffe6d9 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/config.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/config.go @@ -7,7 +7,7 @@ import ( "fmt" "time" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/exporter/exporterhelper" ) @@ -59,7 +59,7 @@ type Producer struct { MaxMessageBytes int `mapstructure:"max_message_bytes"` // RequiredAcks Number of acknowledgements required to assume that a message has been sent. - // https://pkg.go.dev/github.com/Shopify/sarama@v1.30.0#RequiredAcks + // https://pkg.go.dev/github.com/IBM/sarama@v1.30.0#RequiredAcks // The options are: // 0 -> NoResponse. doesn't send any response // 1 -> WaitForLocal. waits for only the local commit to succeed before responding ( default ) @@ -67,7 +67,7 @@ type Producer struct { RequiredAcks sarama.RequiredAcks `mapstructure:"required_acks"` // Compression Codec used to produce messages - // https://pkg.go.dev/github.com/Shopify/sarama@v1.30.0#CompressionCodec + // https://pkg.go.dev/github.com/IBM/sarama@v1.30.0#CompressionCodec // The options are: 'none', 'gzip', 'snappy', 'lz4', and 'zstd' Compression string `mapstructure:"compression"` @@ -104,6 +104,10 @@ func (cfg *Config) Validate() error { } func validateSASLConfig(c *SASLConfig) error { + if c == nil { + return nil + } + if c.Username == "" { return fmt.Errorf("auth.sasl.username is required") } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/factory.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/factory.go index 61d79c413..15d317652 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/factory.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/factory.go @@ -7,7 +7,7 @@ import ( "context" "time" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/exporter" diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/internal/awsmsk/iam_scram_client.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/internal/awsmsk/iam_scram_client.go index b0642e26d..0f17daebd 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/internal/awsmsk/iam_scram_client.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/internal/awsmsk/iam_scram_client.go @@ -10,7 +10,7 @@ import ( "strings" "time" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/aws/aws-sdk-go/aws/credentials" sign "github.com/aws/aws-sdk-go/aws/signer/v4" "go.uber.org/multierr" diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/jaeger_marshaler.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/jaeger_marshaler.go index 5c4e951e5..abc73c22f 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/jaeger_marshaler.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/jaeger_marshaler.go @@ -6,7 +6,7 @@ package kafkaexporter // import "github.com/open-telemetry/opentelemetry-collect import ( "bytes" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/gogo/protobuf/jsonpb" jaegerproto "github.com/jaegertracing/jaeger/model" "go.opentelemetry.io/collector/pdata/ptrace" diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/kafka_exporter.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/kafka_exporter.go index c1ffa2163..638dfc439 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/kafka_exporter.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/kafka_exporter.go @@ -8,7 +8,7 @@ import ( "errors" "fmt" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "go.opentelemetry.io/collector/consumer/consumererror" "go.opentelemetry.io/collector/exporter" "go.opentelemetry.io/collector/pdata/plog" diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/marshaler.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/marshaler.go index b2df7084f..38525f9fb 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/marshaler.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/marshaler.go @@ -4,7 +4,7 @@ package kafkaexporter // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter" import ( - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/ptrace" diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/pdata_marshaler.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/pdata_marshaler.go index 42387fe0d..d4511946b 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/pdata_marshaler.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/pdata_marshaler.go @@ -4,7 +4,7 @@ package kafkaexporter // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter" import ( - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/ptrace" diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/raw_marshaler.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/raw_marshaler.go index 8fc978dc5..4f5371410 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/raw_marshaler.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/raw_marshaler.go @@ -7,7 +7,7 @@ import ( "encoding/json" "errors" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/plog" ) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/scram_client.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/scram_client.go index 03bc161ea..b5c421de8 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/scram_client.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/scram_client.go @@ -4,7 +4,7 @@ package kafkaexporter // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter" import ( - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/xdg-go/scram" ) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter/resolver_dns.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter/resolver_dns.go index e4ab5b452..97d111d3d 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter/resolver_dns.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter/resolver_dns.go @@ -131,8 +131,8 @@ func (r *dnsResolver) resolve(ctx context.Context) ([]string, error) { _ = stats.RecordWithTags(ctx, resolverSuccessTrueMutators, mNumResolutions.M(1)) - var backends []string - for _, ip := range addrs { + backends := make([]string, len(addrs)) + for i, ip := range addrs { var backend string if ip.IP.To4() != nil { backend = ip.String() @@ -146,7 +146,7 @@ func (r *dnsResolver) resolve(ctx context.Context) ([]string, error) { backend = fmt.Sprintf("%s:%s", backend, r.port) } - backends = append(backends, backend) + backends[i] = backend } // keep it always in the same order diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/logzioexporter/logziospan.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/logzioexporter/logziospan.go index 90dddff56..58c82b68e 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/logzioexporter/logziospan.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/logzioexporter/logziospan.go @@ -35,9 +35,9 @@ type logzioSpan struct { } func getTagsValues(tags []model.KeyValue) []string { - var values []string + values := make([]string, len(tags)) for i := range tags { - values = append(values, tags[i].VStr) + values[i] = tags[i].VStr } return values } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter/collector.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter/collector.go index 957cf044d..eeaced19f 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter/collector.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter/collector.go @@ -272,7 +272,6 @@ func (c *collector) convertDoubleHistogram(metric pmetric.Metric, resourceAttrs } func (c *collector) createTargetInfoMetrics(resourceAttrs []pcommon.Map) ([]prometheus.Metric, error) { - var metrics []prometheus.Metric var lastErr error // deduplicate resourceAttrs by job and instance @@ -289,6 +288,7 @@ func (c *collector) createTargetInfoMetrics(resourceAttrs []pcommon.Map) ([]prom } } + metrics := make([]prometheus.Metric, 0, len(deduplicatedResourceAttrs)) for _, rAttributes := range deduplicatedResourceAttrs { // map ensures no duplicate label name labels := make(map[string]string, rAttributes.Len()+2) // +2 for job and instance labels. diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter/internal/dimensions/dimclient.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter/internal/dimensions/dimclient.go index 0be633880..8010013b2 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter/internal/dimensions/dimclient.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter/internal/dimensions/dimclient.go @@ -317,7 +317,8 @@ func (dc *DimensionClient) makePatchRequest(dim *DimensionUpdate) (*http.Request return nil, err } - req, err := http.NewRequest( + req, err := http.NewRequestWithContext( + context.Background(), "PATCH", strings.TrimRight(url.String(), "/")+"/_/sfxagent", bytes.NewReader(json)) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter/internal/translation/constants.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter/internal/translation/constants.go index 51cc26ccd..87b1a7959 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter/internal/translation/constants.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter/internal/translation/constants.go @@ -395,6 +395,14 @@ translation_rules: major: vmpage_io.swap.out minor: vmpage_io.memory.out +# convert from bytes to pages +- action: divide_int + scale_factors_int: + vmpage_io.swap.in: 4096 + vmpage_io.swap.out: 4096 + vmpage_io.memory.in: 4096 + vmpage_io.memory.out: 4096 + # process metric - action: copy_metrics mapping: diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter/internal/translation/dpfilters/filterset.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter/internal/translation/dpfilters/filterset.go index 6c835897d..853f0445e 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter/internal/translation/dpfilters/filterset.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter/internal/translation/dpfilters/filterset.go @@ -48,8 +48,8 @@ func NewFilterSet(excludes []MetricFilter, includes []MetricFilter) (*FilterSet, } func getDataPointFilters(metricFilters []MetricFilter) ([]*dataPointFilter, error) { - var out []*dataPointFilter - for _, f := range metricFilters { + out := make([]*dataPointFilter, len(metricFilters)) + for i, f := range metricFilters { dimSet, err := f.normalize() if err != nil { return nil, err @@ -60,7 +60,7 @@ func getDataPointFilters(metricFilters []MetricFilter) ([]*dataPointFilter, erro return nil, err } - out = append(out, dpf) + out[i] = dpf } return out, nil } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver/README.md index 5c0a33435..73bb1119e 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver/README.md +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver/README.md @@ -6,7 +6,7 @@ | Stability | [beta] | | Distributions | [contrib], [aws], [splunk], [sumo] | | Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Aextension%2Fecsobserver%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Aextension%2Fecsobserver) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Aextension%2Fecsobserver%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Aextension%2Fecsobserver) | -| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@dmitryax @rmfitzpatrick](https://www.github.com/dmitryax @rmfitzpatrick) | +| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@dmitryax](https://www.github.com/dmitryax), [@rmfitzpatrick](https://www.github.com/rmfitzpatrick) | [beta]: https://github.com/open-telemetry/opentelemetry-collector#beta [contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver/metadata.yaml b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver/metadata.yaml index 3781b4420..9f1e59fb8 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver/metadata.yaml +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver/metadata.yaml @@ -6,4 +6,4 @@ status: beta: [extension] distributions: [contrib, aws, splunk, sumo] codeowners: - active: [dmitryax @rmfitzpatrick] + active: [dmitryax, rmfitzpatrick] diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/metrics/metric_calculator.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/metrics/metric_calculator.go index 0f9866cb8..205ae5d39 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/metrics/metric_calculator.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/metrics/metric_calculator.go @@ -4,6 +4,7 @@ package metrics // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/metrics" import ( + "errors" "sync" "time" @@ -34,6 +35,7 @@ func calculateDelta(prev *MetricValue, val interface{}, _ time.Time) (interface{ } // MetricCalculator is a calculator used to adjust metric values based on its previous record. +// Shutdown() must be called to clean up goroutines before program exit. type MetricCalculator struct { // lock on write lock sync.Mutex @@ -43,6 +45,7 @@ type MetricCalculator struct { calculateFunc CalculateFunc } +// NewMetricCalculator Creates a metric calculator that enforces a five-minute time to live on cache entries. func NewMetricCalculator(calculateFunc CalculateFunc) MetricCalculator { return MetricCalculator{ cache: NewMapWithExpiry(cleanInterval), @@ -63,6 +66,11 @@ func (rm *MetricCalculator) Calculate(mKey Key, value interface{}, timestamp tim rm.lock.Lock() defer rm.lock.Unlock() + // need to also lock cache to avoid the cleanup from removing entries while they are being processed. + // This is only likely to happen when data points come in close to expiration date. + rm.cache.Lock() + defer rm.cache.Unlock() + prev, exists := cacheStore.Get(mKey) result, done = rm.calculateFunc(prev, value, timestamp) if !exists || done { @@ -74,6 +82,10 @@ func (rm *MetricCalculator) Calculate(mKey Key, value interface{}, timestamp tim return result, done } +func (rm *MetricCalculator) Shutdown() error { + return rm.cache.Shutdown() +} + type Key struct { MetricMetadata interface{} MetricLabels attribute.Distinct @@ -99,15 +111,21 @@ type MetricValue struct { Timestamp time.Time } -// MapWithExpiry act like a map which provide a method to clean up expired entries +// MapWithExpiry act like a map which provides a method to clean up expired entries. +// MapWithExpiry is not thread safe and locks must be managed by the owner of the Map by the use of Lock() and Unlock() type MapWithExpiry struct { - lock *sync.Mutex - ttl time.Duration - entries map[interface{}]*MetricValue + lock *sync.Mutex + ttl time.Duration + entries map[interface{}]*MetricValue + doneChan chan struct{} } +// NewMapWithExpiry automatically starts a sweeper to enforce the maps TTL. ShutDown() must be called to ensure that these +// go routines are properly cleaned up ShutDown() must be called. func NewMapWithExpiry(ttl time.Duration) *MapWithExpiry { - return &MapWithExpiry{lock: &sync.Mutex{}, ttl: ttl, entries: make(map[interface{}]*MetricValue)} + m := &MapWithExpiry{lock: &sync.Mutex{}, ttl: ttl, entries: make(map[interface{}]*MetricValue), doneChan: make(chan struct{})} + go m.sweep(m.CleanUp) + return m } func (m *MapWithExpiry) Get(key Key) (*MetricValue, bool) { @@ -119,6 +137,32 @@ func (m *MapWithExpiry) Set(key Key, value MetricValue) { m.entries[key] = &value } +func (m *MapWithExpiry) sweep(removeFunc func(time2 time.Time)) { + ticker := time.NewTicker(m.ttl) + for { + select { + case currentTime := <-ticker.C: + m.lock.Lock() + removeFunc(currentTime) + m.lock.Unlock() + case <-m.doneChan: + ticker.Stop() + return + } + } +} + +func (m *MapWithExpiry) Shutdown() error { + select { + case <-m.doneChan: + return errors.New("shutdown called on an already closed channel") + default: + close(m.doneChan) + + } + return nil +} + func (m *MapWithExpiry) CleanUp(now time.Time) { for k, v := range m.entries { if now.Sub(v.Timestamp) >= m.ttl { diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders/system/metadata.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders/system/metadata.go index 8568ed521..284b85b32 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders/system/metadata.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders/system/metadata.go @@ -13,6 +13,7 @@ import ( "github.com/Showmax/go-fqdn" conventions "go.opentelemetry.io/collector/semconv/v1.6.1" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/resource" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders/internal" @@ -45,6 +46,9 @@ type Provider interface { // FQDN returns the fully qualified domain name FQDN() (string, error) + // OSDescription returns a human readable description of the OS. + OSDescription(ctx context.Context) (string, error) + // OSType returns the host operating system OSType() (string, error) @@ -123,29 +127,33 @@ func (p systemMetadataProvider) reverseLookup(ipAddresses []string) (string, err return "", fmt.Errorf("reverseLookup failed to convert IP addresses to name: %w", err) } -func (p systemMetadataProvider) HostID(ctx context.Context) (string, error) { - res, err := p.newResource(ctx, - resource.WithHostID(), - ) - +func (p systemMetadataProvider) fromOption(ctx context.Context, opt resource.Option, semconv string) (string, error) { + res, err := p.newResource(ctx, opt) if err != nil { - return "", fmt.Errorf("failed to obtain host id: %w", err) + return "", fmt.Errorf("failed to obtain %q: %w", semconv, err) } iter := res.Iter() - for iter.Next() { - if iter.Attribute().Key == conventions.AttributeHostID { + if iter.Attribute().Key == attribute.Key(semconv) { v := iter.Attribute().Value.Emit() if v == "" { - return "", fmt.Errorf("empty host id") + return "", fmt.Errorf("empty %q", semconv) } return v, nil } } - return "", fmt.Errorf("failed to obtain host id") + return "", fmt.Errorf("failed to obtain %q", semconv) +} + +func (p systemMetadataProvider) HostID(ctx context.Context) (string, error) { + return p.fromOption(ctx, resource.WithHostID(), conventions.AttributeHostID) +} + +func (p systemMetadataProvider) OSDescription(ctx context.Context) (string, error) { + return p.fromOption(ctx, resource.WithOSDescription(), conventions.AttributeOSDescription) } func (systemMetadataProvider) HostArch() (string, error) { diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/README.md index 03e7ccdb1..d8f953db1 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/README.md +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/README.md @@ -172,9 +172,18 @@ When defining an OTTL function, if the function needs to take an Enum then the f Math Expressions represent arithmetic calculations. They support `+`, `-`, `*`, and `/`, along with `()` for grouping. -Math Expressions currently only support `int64` and `float64`. +Math Expressions currently support `int64`, `float64`, `time.Time` and `time.Duration`. +For `time.Time` and `time.Duration`, only `+` and `-` are supported with the following rules: + - A `time.Time` `-` a `time.Time` yields a `time.Duration`. + - A `time.Duration` `+` a `time.Time` yields a `time.Time`. + - A `time.Time` `+` a `time.Duration` yields a `time.Time`. + - A `time.Time` `-` a `time.Duration` yields a `time.Time`. + - A `time.Duration` `+` a `time.Duration` yields a `time.Duration`. + - A `time.Duration` `-` a `time.Duration` yields a `time.Duration`. + Math Expressions support `Paths` and `Editors` that return supported types. Note that `*` and `/` take precedence over `+` and `-`. +Also note that `time.Time` and `time.Duration` can only be used with `+` and `-`. Operations that share the same level of precedence will be executed in the order that they appear in the Math Expression. Math Expressions can be grouped with parentheses to override evaluation precedence. Math Expressions that mix `int64` and `float64` will result in an error. diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/expression.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/expression.go index cf615cfef..82e9b0407 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/expression.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/expression.go @@ -7,6 +7,7 @@ import ( "context" "encoding/hex" "fmt" + "reflect" "strconv" jsoniter "github.com/json-iterator/go" @@ -245,6 +246,56 @@ func (g StandardFloatGetter[K]) Get(ctx context.Context, tCtx K) (float64, error } } +// FunctionGetter uses a function factory to return an instantiated function as an Expr. +type FunctionGetter[K any] interface { + Get(args Arguments) (Expr[K], error) +} + +// StandardFunctionGetter is a basic implementation of FunctionGetter. +type StandardFunctionGetter[K any] struct { + fCtx FunctionContext + fact Factory[K] +} + +// Get takes an Arguments struct containing arguments the caller wants passed to the +// function and instantiates the function with those arguments. +// If there is a mismatch between the function's signature and the arguments the caller +// wants to pass to the function, an error is returned. +func (g StandardFunctionGetter[K]) Get(args Arguments) (Expr[K], error) { + if g.fact == nil { + return Expr[K]{}, fmt.Errorf("undefined function") + } + fArgs := g.fact.CreateDefaultArguments() + if reflect.TypeOf(fArgs).Kind() != reflect.Pointer { + return Expr[K]{}, fmt.Errorf("factory for %q must return a pointer to an Arguments value in its CreateDefaultArguments method", g.fact.Name()) + } + if reflect.TypeOf(args).Kind() != reflect.Pointer { + return Expr[K]{}, fmt.Errorf("%q must be pointer to an Arguments value", reflect.TypeOf(args).Kind()) + } + fArgsVal := reflect.ValueOf(fArgs).Elem() + argsVal := reflect.ValueOf(args).Elem() + if fArgsVal.NumField() != argsVal.NumField() { + return Expr[K]{}, fmt.Errorf("incorrect number of arguments. Expected: %d Received: %d", fArgsVal.NumField(), argsVal.NumField()) + } + for i := 0; i < fArgsVal.NumField(); i++ { + field := argsVal.Field(i) + argIndex, err := getArgumentIndex(i, argsVal) + if err != nil { + return Expr[K]{}, err + } + fArgIndex, err := getArgumentIndex(argIndex, fArgsVal) + if err != nil { + return Expr[K]{}, err + } + fArgsVal.Field(fArgIndex).Set(field) + } + fn, err := g.fact.CreateFunction(g.fCtx, fArgs) + if err != nil { + return Expr[K]{}, fmt.Errorf("couldn't create function: %w", err) + } + return Expr[K]{exprFunc: fn}, nil +} + // PMapGetter is a Getter that must return a pcommon.Map. type PMapGetter[K any] interface { // Get retrieves a pcommon.Map value. @@ -399,7 +450,7 @@ func (g StandardFloatLikeGetter[K]) Get(ctx context.Context, tCtx K) (*float64, return &result, nil } -// IntLikeGetter is a Getter that returns an int by converting the underlying value to an int if necessary. +// IntLikeGetter is a Getter that returns an int by converting the underlying value to an int if necessary type IntLikeGetter[K any] interface { // Get retrieves an int value. // Unlike `IntGetter`, the expectation is that the underlying value is converted to an int if possible. diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/functions.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/functions.go index 5cf65df79..967f80453 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/functions.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/functions.go @@ -47,42 +47,57 @@ func (p *Parser[K]) newFunctionCall(ed editor) (Expr[K], error) { return Expr[K]{exprFunc: fn}, err } +func getArgumentIndex(index int, args reflect.Value) (int, error) { + argsType := args.Type() + fieldTag, ok := argsType.Field(index).Tag.Lookup("ottlarg") + if !ok { + return 0, fmt.Errorf("no `ottlarg` struct tag on Arguments field %q", argsType.Field(index).Name) + } + argNum, err := strconv.Atoi(fieldTag) + if err != nil { + return 0, fmt.Errorf("ottlarg struct tag on field %q is not a valid integer: %w", argsType.Field(index).Name, err) + } + if argNum < 0 || argNum >= args.NumField() { + return 0, fmt.Errorf("ottlarg struct tag on field %q has value %d, but must be between 0 and %d", argsType.Field(index).Name, argNum, args.NumField()) + } + return argNum, nil +} + func (p *Parser[K]) buildArgs(ed editor, argsVal reflect.Value) error { if len(ed.Arguments) != argsVal.NumField() { return fmt.Errorf("incorrect number of arguments. Expected: %d Received: %d", argsVal.NumField(), len(ed.Arguments)) } - argsType := argsVal.Type() - for i := 0; i < argsVal.NumField(); i++ { field := argsVal.Field(i) fieldType := field.Type() - - fieldTag, ok := argsType.Field(i).Tag.Lookup("ottlarg") - - if !ok { - return fmt.Errorf("no `ottlarg` struct tag on Arguments field %q", argsType.Field(i).Name) - } - - argNum, err := strconv.Atoi(fieldTag) - + argNum, err := getArgumentIndex(i, argsVal) if err != nil { - return fmt.Errorf("ottlarg struct tag on field %q is not a valid integer: %w", argsType.Field(i).Name, err) + return err } - - if argNum < 0 || argNum >= len(ed.Arguments) { - return fmt.Errorf("ottlarg struct tag on field %q has value %d, but must be between 0 and %d", argsType.Field(i).Name, argNum, len(ed.Arguments)) - } - argVal := ed.Arguments[argNum] - var val any - if fieldType.Kind() == reflect.Slice { + switch { + case strings.HasPrefix(fieldType.Name(), "FunctionGetter"): + var name string + switch { + case argVal.Enum != nil: + name = string(*argVal.Enum) + case argVal.FunctionName != nil: + name = *argVal.FunctionName + default: + return fmt.Errorf("invalid function name given") + } + f, ok := p.functions[name] + if !ok { + return fmt.Errorf("undefined function %s", name) + } + val = StandardFunctionGetter[K]{fCtx: FunctionContext{Set: p.telemetrySettings}, fact: f} + case fieldType.Kind() == reflect.Slice: val, err = p.buildSliceArg(argVal, fieldType) - } else { + default: val, err = p.buildArg(argVal, fieldType) } - if err != nil { return fmt.Errorf("invalid argument at position %v: %w", i, err) } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/grammar.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/grammar.go index 3ddb1e1f8..73934c9b7 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/grammar.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/grammar.go @@ -225,7 +225,8 @@ type value struct { Bytes *byteSlice `parser:"| @Bytes"` String *string `parser:"| @String"` Bool *boolean `parser:"| @Boolean"` - Enum *EnumSymbol `parser:"| @Uppercase"` + Enum *EnumSymbol `parser:"| @Uppercase (?! Lowercase)"` + FunctionName *string `parser:"| @(Uppercase(Uppercase | Lowercase)*)"` List *list `parser:"| @@)"` } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/math.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/math.go index e26c298ec..897ed22c3 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/math.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/math.go @@ -6,6 +6,7 @@ package ottl // import "github.com/open-telemetry/opentelemetry-collector-contri import ( "context" "fmt" + "time" ) func (p *Parser[K]) evaluateMathExpression(expr *mathExpression) (Getter[K], error) { @@ -98,14 +99,68 @@ func attemptMathOperation[K any](lhs Getter[K], op mathOp, rhs Getter[K]) Getter default: return nil, fmt.Errorf("%v must be int64 or float64", y) } + case time.Time: + return performOpTime(newX, y, op) + case time.Duration: + return performOpDuration(newX, y, op) default: - return nil, fmt.Errorf("%v must be int64 or float64", x) + return nil, fmt.Errorf("%v must be int64, float64, time.Time or time.Duration", x) } }, }, } } +func performOpTime(x time.Time, y any, op mathOp) (any, error) { + switch op { + case ADD: + switch newY := y.(type) { + case time.Duration: + result := x.Add(newY) + return result, nil + default: + return nil, fmt.Errorf("time.Time must be added to time.Duration; found %v instead", y) + } + case SUB: + switch newY := y.(type) { + case time.Time: + result := x.Sub(newY) + return result, nil + case time.Duration: + result := x.Add(-1 * newY) + return result, nil + default: + return nil, fmt.Errorf("time.Time or time.Duration must be subtracted from time.Time; found %v instead", y) + } + } + return nil, fmt.Errorf("only addition and subtraction supported for time.Time and time.Duration") +} + +func performOpDuration(x time.Duration, y any, op mathOp) (any, error) { + switch op { + case ADD: + switch newY := y.(type) { + case time.Duration: + result := x + newY + return result, nil + case time.Time: + result := newY.Add(x) + return result, nil + default: + return nil, fmt.Errorf("time.Duration must be added to time.Duration or time.Time; found %v instead", y) + } + case SUB: + switch newY := y.(type) { + case time.Duration: + result := x - newY + return result, nil + default: + return nil, fmt.Errorf("time.Duration must be subtracted from time.Duration; found %v instead", y) + } + } + return nil, fmt.Errorf("only addition and subtraction supported for time.Time and time.Duration") +} + func performOp[N int64 | float64](x N, y N, op mathOp) (N, error) { switch op { case ADD: diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/parser.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/parser.go index d60626180..1b6335cbd 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/parser.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/parser.go @@ -102,7 +102,7 @@ func WithEnumParser[K any](parser EnumParser) Option[K] { // If parsing fails, returns an empty slice with a multierr error containing // an error per failed statement. func (p *Parser[K]) ParseStatements(statements []string) ([]*Statement[K], error) { - var parsedStatements []*Statement[K] + parsedStatements := make([]*Statement[K], 0, len(statements)) var parseErr error for _, statement := range statements { diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger/jaegerproto_to_traces.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger/jaegerproto_to_traces.go index 0eda60834..1ac848cfa 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger/jaegerproto_to_traces.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger/jaegerproto_to_traces.go @@ -73,7 +73,7 @@ func regroup(batches []*model.Batch) []*model.Batch { } } - var result []*model.Batch + result := make([]*model.Batch, 0, len(registry)) for _, v := range registry { result = append(result, v) } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus/normalize_name.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus/normalize_name.go index dfb965e97..3b1647aab 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus/normalize_name.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus/normalize_name.go @@ -37,11 +37,6 @@ var unitMap = map[string]string{ "MBy": "megabytes", "GBy": "gigabytes", "TBy": "terabytes", - "B": "bytes", - "KB": "kilobytes", - "MB": "megabytes", - "GB": "gigabytes", - "TB": "terabytes", // SI "m": "meters", @@ -56,7 +51,6 @@ var unitMap = map[string]string{ "Hz": "hertz", "1": "", "%": "percent", - "$": "dollars", } // The map that translates the "per" unit diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/signalfx/from_metrics.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/signalfx/from_metrics.go index e4e82c38c..c0b7714e8 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/signalfx/from_metrics.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/signalfx/from_metrics.go @@ -274,10 +274,10 @@ type dpsBuilder struct { pos int } -func newDpsBuilder(cap int) dpsBuilder { +func newDpsBuilder(capacity int) dpsBuilder { return dpsBuilder{ - baseOut: make([]sfxpb.DataPoint, cap), - out: make([]*sfxpb.DataPoint, 0, cap), + baseOut: make([]sfxpb.DataPoint, capacity), + out: make([]*sfxpb.DataPoint, 0, capacity), } } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin/zipkinv2/to_translator.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin/zipkinv2/to_translator.go index c34f80343..ed11b1297 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin/zipkinv2/to_translator.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin/zipkinv2/to_translator.go @@ -464,4 +464,9 @@ var statusCodeValue = map[string]int32{ "STATUS_CODE_UNSET": 0, "STATUS_CODE_OK": 1, "STATUS_CODE_ERROR": 2, + // As reported in https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/14965 + // The Zipkin exporter used a different set of names when serializing span state. + "Unset": 0, + "Ok": 1, + "Error": 2, } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor/attributes_metric.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor/attributes_metric.go index c913d1f85..72a4d9d03 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor/attributes_metric.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor/attributes_metric.go @@ -65,6 +65,7 @@ func (a *metricAttributesProcessor) processMetricAttributes(ctx context.Context, // This is a lot of repeated code, but since there is no single parent superclass // between metric data types, we can't use polymorphism. + //exhaustive:enforce switch m.Type() { case pmetric.MetricTypeGauge: dps := m.Gauge().DataPoints() diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/README.md index 09889fa71..9cc02b693 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/README.md +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/README.md @@ -82,6 +82,37 @@ processors: - 'severity_number < SEVERITY_NUMBER_WARN' ``` +#### Dropping non-HTTP spans +```yaml +processors: + filter/httponly: + error_mode: ignore + traces: + span: + - attributes["http.request.method"] == nil +``` + +#### Dropping HTTP spans +```yaml +processors: + filter/drophttp: + error_mode: ignore + traces: + span: + - attributes["http.request.method"] != nil +``` + +#### Dropping metrics with invalid type +```yaml +processors: + filter/dropempty: + error_mode: ignore + metrics: + metric: + - type == METRIC_DATA_TYPE_NONE +``` + + ### OTTL Functions The filter processor has access to all [OTTL Converter functions](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/ottlfuncs#converters) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/config.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/config.go index a60f67242..3f517382f 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/config.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/config.go @@ -50,7 +50,7 @@ type MetricFilters struct { // If both Include and Exclude are specified, Include filtering occurs first. Exclude *filterconfig.MetricMatchProperties `mapstructure:"exclude"` - // RegexpConfig specifies options for the Regexp match type + // RegexpConfig specifies options for the regexp match type RegexpConfig *regexp.Config `mapstructure:"regexp"` // MetricConditions is a list of OTTL conditions for an ottlmetric context. @@ -100,8 +100,8 @@ type LogMatchType string // These are the MatchTypes that users can specify for filtering // `plog.Log`s. const ( - Strict = LogMatchType(filterset.Strict) - Regexp = LogMatchType(filterset.Regexp) + strictType = LogMatchType(filterset.Strict) + regexpType = LogMatchType(filterset.Regexp) ) var severityToNumber = map[string]plog.SeverityNumber{ diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/metrics.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/metrics.go index 51fbb857c..31fae0dfa 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/metrics.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/metrics.go @@ -133,6 +133,7 @@ func (fmp *filterMetricProcessor) processMetrics(ctx context.Context, md pmetric } } if fmp.skipDataPointExpr != nil { + //exhaustive:enforce switch metric.Type() { case pmetric.MetricTypeSum: errors = multierr.Append(errors, fmp.handleNumberDataPoints(ctx, metric.Sum().DataPoints(), metric, smetrics.Metrics(), scope, resource)) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbytraceprocessor/storage_memory.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbytraceprocessor/storage_memory.go index 487277ba6..909164d12 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbytraceprocessor/storage_memory.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbytraceprocessor/storage_memory.go @@ -54,11 +54,11 @@ func (st *memoryStorage) get(traceID pcommon.TraceID) ([]ptrace.ResourceSpans, e return nil, nil } - var result []ptrace.ResourceSpans - for _, rs := range rss { + result := make([]ptrace.ResourceSpans, len(rss)) + for i, rs := range rss { newRS := ptrace.NewResourceSpans() rs.CopyTo(newRS) - result = append(result, newRS) + result[i] = newRS } return result, nil diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor/config.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor/config.go index c6052c103..ce194a13a 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor/config.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor/config.go @@ -66,6 +66,7 @@ type ExtractConfig struct { // k8s.statefulset.name, k8s.statefulset.uid, // k8s.container.name, container.image.name, // container.image.tag, container.id + // k8s.cluster.uid // // Specifying anything other than these values will result in an error. // By default, the following fields are extracted and added to spans, metrics and logs as attributes: diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor/internal/kube/client.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor/internal/kube/client.go index 6fc2519b0..019105eb2 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor/internal/kube/client.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor/internal/kube/client.go @@ -114,7 +114,15 @@ func New(logger *zap.Logger, apiCfg k8sconfig.APIConfig, rules ExtractionRules, } if newNamespaceInformer == nil { - newNamespaceInformer = newNamespaceSharedInformer + // if rules to extract metadata from namespace is configured use namespace shared informer containing + // all namespaces including kube-system which contains cluster uid information (kube-system-uid) + if c.extractNamespaceLabelsAnnotations() { + newNamespaceInformer = newNamespaceSharedInformer + } else { + // use kube-system shared informer to only watch kube-system namespace + // reducing overhead of watching all the namespaces + newNamespaceInformer = newKubeSystemSharedInformer + } } c.informer = newInformer(c.kc, c.Filters.Namespace, labelSelector, fieldSelector) @@ -132,11 +140,7 @@ func New(logger *zap.Logger, apiCfg k8sconfig.APIConfig, rules ExtractionRules, return nil, err } - if c.extractNamespaceLabelsAnnotations() { - c.namespaceInformer = newNamespaceInformer(c.kc) - } else { - c.namespaceInformer = NewNoOpInformer(c.kc) - } + c.namespaceInformer = newNamespaceInformer(c.kc) if rules.DeploymentName || rules.DeploymentUID { if newReplicaSetInformer == nil { @@ -212,13 +216,13 @@ func (c *WatchClient) handlePodAdd(obj interface{}) { observability.RecordPodTableSize(int64(podTableSize)) } -func (c *WatchClient) handlePodUpdate(_, new interface{}) { +func (c *WatchClient) handlePodUpdate(_, newPod interface{}) { observability.RecordPodUpdated() - if pod, ok := new.(*api_v1.Pod); ok { + if pod, ok := newPod.(*api_v1.Pod); ok { // TODO: update or remove based on whether container is ready/unready?. c.addOrUpdatePod(pod) } else { - c.logger.Error("object received was not of type api_v1.Pod", zap.Any("received", new)) + c.logger.Error("object received was not of type api_v1.Pod", zap.Any("received", newPod)) } podTableSize := len(c.Pods) observability.RecordPodTableSize(int64(podTableSize)) @@ -244,12 +248,12 @@ func (c *WatchClient) handleNamespaceAdd(obj interface{}) { } } -func (c *WatchClient) handleNamespaceUpdate(_, new interface{}) { +func (c *WatchClient) handleNamespaceUpdate(_, newNamespace interface{}) { observability.RecordNamespaceUpdated() - if namespace, ok := new.(*api_v1.Namespace); ok { + if namespace, ok := newNamespace.(*api_v1.Namespace); ok { c.addOrUpdateNamespace(namespace) } else { - c.logger.Error("object received was not of type api_v1.Namespace", zap.Any("received", new)) + c.logger.Error("object received was not of type api_v1.Namespace", zap.Any("received", newNamespace)) } } @@ -433,6 +437,14 @@ func (c *WatchClient) extractPodAttributes(pod *api_v1.Pod) map[string]string { tags[tagNodeName] = pod.Spec.NodeName } + if c.Rules.ClusterUID { + if val, ok := c.Namespaces["kube-system"]; ok { + tags[tagClusterUID] = val.NamespaceUID + } else { + c.logger.Debug("unable to find kube-system namespace, cluster uid will not be available") + } + } + for _, r := range c.Rules.Labels { r.extractFromPodMetadata(pod.Labels, tags, "k8s.pod.labels.%s") } @@ -836,12 +848,12 @@ func (c *WatchClient) handleReplicaSetAdd(obj interface{}) { } } -func (c *WatchClient) handleReplicaSetUpdate(_, new interface{}) { +func (c *WatchClient) handleReplicaSetUpdate(_, newRS interface{}) { observability.RecordReplicaSetUpdated() - if replicaset, ok := new.(*apps_v1.ReplicaSet); ok { + if replicaset, ok := newRS.(*apps_v1.ReplicaSet); ok { c.addOrUpdateReplicaSet(replicaset) } else { - c.logger.Error("object received was not of type apps_v1.ReplicaSet", zap.Any("received", new)) + c.logger.Error("object received was not of type apps_v1.ReplicaSet", zap.Any("received", newRS)) } } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor/internal/kube/informer.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor/internal/kube/informer.go index a8aaba82b..db5cf2208 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor/internal/kube/informer.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor/internal/kube/informer.go @@ -17,6 +17,8 @@ import ( "k8s.io/client-go/tools/cache" ) +const kubeSystemNamespace = "kube-system" + // InformerProvider defines a function type that returns a new SharedInformer. It is used to // allow passing custom shared informers to the watch client. type InformerProvider func( @@ -73,6 +75,27 @@ func informerWatchFuncWithSelectors(client kubernetes.Interface, namespace strin } } +// newKubeSystemSharedInformer watches only kube-system namespace +func newKubeSystemSharedInformer( + client kubernetes.Interface, +) cache.SharedInformer { + informer := cache.NewSharedInformer( + &cache.ListWatch{ + ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { + opts.FieldSelector = fields.OneTermEqualSelector("metadata.name", kubeSystemNamespace).String() + return client.CoreV1().Namespaces().List(context.Background(), opts) + }, + WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { + opts.FieldSelector = fields.OneTermEqualSelector("metadata.name", kubeSystemNamespace).String() + return client.CoreV1().Namespaces().Watch(context.Background(), opts) + }, + }, + &api_v1.Namespace{}, + watchSyncPeriod, + ) + return informer +} + func newNamespaceSharedInformer( client kubernetes.Interface, ) cache.SharedInformer { diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor/internal/kube/kube.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor/internal/kube/kube.go index 202d8ea36..6b046b599 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor/internal/kube/kube.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor/internal/kube/kube.go @@ -22,6 +22,7 @@ const ( tagNodeName = "k8s.node.name" tagStartTime = "k8s.pod.start_time" tagHostName = "k8s.pod.hostname" + tagClusterUID = "k8s.cluster.uid" // MetadataFromPod is used to specify to extract metadata/labels/annotations from pod MetadataFromPod = "pod" // MetadataFromNamespace is used to specify to extract metadata/labels/annotations from namespace @@ -203,6 +204,7 @@ type ExtractionRules struct { ContainerID bool ContainerImageName bool ContainerImageTag bool + ClusterUID bool Annotations []FieldExtractionRule Labels []FieldExtractionRule diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor/internal/metadata/generated_config.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor/internal/metadata/generated_config.go index a34a0aa67..5a56421b3 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor/internal/metadata/generated_config.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor/internal/metadata/generated_config.go @@ -12,6 +12,7 @@ type ResourceAttributesConfig struct { ContainerID ResourceAttributeConfig `mapstructure:"container.id"` ContainerImageName ResourceAttributeConfig `mapstructure:"container.image.name"` ContainerImageTag ResourceAttributeConfig `mapstructure:"container.image.tag"` + K8sClusterUID ResourceAttributeConfig `mapstructure:"k8s.cluster.uid"` K8sContainerName ResourceAttributeConfig `mapstructure:"k8s.container.name"` K8sCronjobName ResourceAttributeConfig `mapstructure:"k8s.cronjob.name"` K8sDaemonsetName ResourceAttributeConfig `mapstructure:"k8s.daemonset.name"` @@ -43,6 +44,9 @@ func DefaultResourceAttributesConfig() ResourceAttributesConfig { ContainerImageTag: ResourceAttributeConfig{ Enabled: true, }, + K8sClusterUID: ResourceAttributeConfig{ + Enabled: false, + }, K8sContainerName: ResourceAttributeConfig{ Enabled: false, }, diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor/internal/metadata/generated_resource.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor/internal/metadata/generated_resource.go index d8981673d..2029665c5 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor/internal/metadata/generated_resource.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor/internal/metadata/generated_resource.go @@ -42,6 +42,13 @@ func (rb *ResourceBuilder) SetContainerImageTag(val string) { } } +// SetK8sClusterUID sets provided value as "k8s.cluster.uid" attribute. +func (rb *ResourceBuilder) SetK8sClusterUID(val string) { + if rb.config.K8sClusterUID.Enabled { + rb.res.Attributes().PutStr("k8s.cluster.uid", val) + } +} + // SetK8sContainerName sets provided value as "k8s.container.name" attribute. func (rb *ResourceBuilder) SetK8sContainerName(val string) { if rb.config.K8sContainerName.Enabled { diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor/metadata.yaml b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor/metadata.yaml index 58c9b5d52..33afa9a2f 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor/metadata.yaml +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor/metadata.yaml @@ -9,6 +9,10 @@ status: active: [dmitryax, rmfitzpatrick, fatsheep9146] # resource attributes are exposed through a different configuration interface (extract::metadata). resource_attributes: + k8s.cluster.uid: + description: Gives cluster uid identified with kube-system namespace + type: string + enabled: false k8s.namespace.name: description: The name of the namespace that the pod is running in. type: string diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor/options.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor/options.go index 75abb9b7d..1135897c9 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor/options.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor/options.go @@ -23,6 +23,8 @@ const ( filterOPDoesNotExist = "does-not-exist" metadataPodStartTime = "k8s.pod.start_time" specPodHostName = "k8s.pod.hostname" + // TODO: use k8s.cluster.uid from semconv when available, and replace clusterUID with conventions.AttributeClusterUid + clusterUID = "k8s.cluster.uid" ) // option represents a configuration option that can be passes. @@ -50,6 +52,9 @@ func withPassthrough() option { // enabledAttributes returns the list of resource attributes enabled by default. func enabledAttributes() (attributes []string) { defaultConfig := metadata.DefaultResourceAttributesConfig() + if defaultConfig.K8sClusterUID.Enabled { + attributes = append(attributes, clusterUID) + } if defaultConfig.ContainerID.Enabled { attributes = append(attributes, conventions.AttributeContainerID) } @@ -167,6 +172,8 @@ func withExtractMetadata(fields ...string) option { p.rules.ContainerImageName = true case conventions.AttributeContainerImageTag: p.rules.ContainerImageTag = true + case clusterUID: + p.rules.ClusterUID = true default: return fmt.Errorf("\"%s\" is not a supported metadata field", field) } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor/config.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor/config.go index 18983b248..b462a54a2 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor/config.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor/config.go @@ -4,49 +4,49 @@ package metricstransformprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor" const ( - // IncludeFieldName is the mapstructure field name for Include field - IncludeFieldName = "include" + // includeFieldName is the mapstructure field name for Include field + includeFieldName = "include" - // MatchTypeFieldName is the mapstructure field name for MatchType field - MatchTypeFieldName = "match_type" + // matchTypeFieldName is the mapstructure field name for matchType field + matchTypeFieldName = "match_type" - // ActionFieldName is the mapstructure field name for Action field - ActionFieldName = "action" + // actionFieldName is the mapstructure field name for Action field + actionFieldName = "action" - // NewNameFieldName is the mapstructure field name for NewName field - NewNameFieldName = "new_name" + // newNameFieldName is the mapstructure field name for NewName field + newNameFieldName = "new_name" - // GroupResourceLabelsFieldName is the mapstructure field name for GroupResouceLabels field - GroupResourceLabelsFieldName = "group_resource_labels" + // groupResourceLabelsFieldName is the mapstructure field name for GroupResouceLabels field + groupResourceLabelsFieldName = "group_resource_labels" - // AggregationTypeFieldName is the mapstructure field name for AggregationType field - AggregationTypeFieldName = "aggregation_type" + // aggregationTypeFieldName is the mapstructure field name for aggregationType field + aggregationTypeFieldName = "aggregation_type" - // LabelFieldName is the mapstructure field name for Label field - LabelFieldName = "label" + // labelFieldName is the mapstructure field name for Label field + labelFieldName = "label" - // NewLabelFieldName is the mapstructure field name for NewLabel field - NewLabelFieldName = "new_label" + // newLabelFieldName is the mapstructure field name for NewLabel field + newLabelFieldName = "new_label" - // NewValueFieldName is the mapstructure field name for NewValue field - NewValueFieldName = "new_value" + // newValueFieldName is the mapstructure field name for NewValue field + newValueFieldName = "new_value" - // ScaleFieldName is the mapstructure field name for Scale field - ScaleFieldName = "experimental_scale" + // scaleFieldName is the mapstructure field name for Scale field + scaleFieldName = "experimental_scale" - // SubmatchCaseFieldName is the mapstructure field name for SubmatchCase field - SubmatchCaseFieldName = "submatch_case" + // submatchCaseFieldName is the mapstructure field name for submatchCase field + submatchCaseFieldName = "submatch_case" ) // Config defines configuration for Resource processor. type Config struct { - // Transform specifies a list of transforms on metrics with each transform focusing on one metric. - Transforms []Transform `mapstructure:"transforms"` + // transform specifies a list of transforms on metrics with each transform focusing on one metric. + Transforms []transform `mapstructure:"transforms"` } -// Transform defines the transformation applied to the specific metric -type Transform struct { +// transform defines the transformation applied to the specific metric +type transform struct { // --- SPECIFY WHICH METRIC(S) TO MATCH --- @@ -75,10 +75,10 @@ type Transform struct { // AggregationType specifies how to aggregate. // REQUIRED only if Action is COMBINE. - AggregationType AggregationType `mapstructure:"aggregation_type"` + AggregationType aggregationType `mapstructure:"aggregation_type"` // SubmatchCase specifies what case to use for label values created from regexp submatches. - SubmatchCase SubmatchCase `mapstructure:"submatch_case"` + SubmatchCase submatchCase `mapstructure:"submatch_case"` // Operations contains a list of operations that will be performed on the resulting metric(s). Operations []Operation `mapstructure:"operations"` @@ -89,7 +89,7 @@ type FilterConfig struct { Include string `mapstructure:"include"` // MatchType determines how the Include string is matched: . - MatchType MatchType `mapstructure:"match_type"` + MatchType matchType `mapstructure:"match_type"` // MatchLabels specifies the label set against which the metric filter will work. // This field is optional. @@ -100,7 +100,7 @@ type FilterConfig struct { type Operation struct { // Action specifies the action performed for this operation. // REQUIRED - Action OperationAction `mapstructure:"action"` + Action operationAction `mapstructure:"action"` // Label identifies the exact label to operate on. Label string `mapstructure:"label"` @@ -112,12 +112,12 @@ type Operation struct { LabelSet []string `mapstructure:"label_set"` // AggregationType specifies how to aggregate. - AggregationType AggregationType `mapstructure:"aggregation_type"` + AggregationType aggregationType `mapstructure:"aggregation_type"` // AggregatedValues is a list of label values to aggregate away. AggregatedValues []string `mapstructure:"aggregated_values"` - // NewValue is used to set a new label value either when the operation is `AggregatedValues` or `AddLabel`. + // NewValue is used to set a new label value either when the operation is `AggregatedValues` or `addLabel`. NewValue string `mapstructure:"new_value"` // ValueActions is a list of renaming actions for label values. @@ -168,45 +168,45 @@ func (ca ConfigAction) isValid() bool { return false } -// OperationAction is the enum to capture the thress types of actions to perform for an operation. -type OperationAction string +// operationAction is the enum to capture the thress types of actions to perform for an operation. +type operationAction string const ( - // AddLabel adds a new label to an existing metric. + // addLabel adds a new label to an existing metric. // Metric has to match the FilterConfig with all its data points if used with Update ConfigAction, // otherwise the operation will be ignored. - AddLabel OperationAction = "add_label" + addLabel operationAction = "add_label" - // UpdateLabel applies name changes to label and/or label values. - UpdateLabel OperationAction = "update_label" + // updateLabel applies name changes to label and/or label values. + updateLabel operationAction = "update_label" - // DeleteLabelValue deletes a label value by also removing all the points associated with this label value + // deleteLabelValue deletes a label value by also removing all the points associated with this label value // Metric has to match the FilterConfig with all its data points if used with Update ConfigAction, // otherwise the operation will be ignored. - DeleteLabelValue OperationAction = "delete_label_value" + deleteLabelValue operationAction = "delete_label_value" - // ToggleScalarDataType changes the data type from int64 to double, or vice-versa - ToggleScalarDataType OperationAction = "toggle_scalar_data_type" + // toggleScalarDataType changes the data type from int64 to double, or vice-versa + toggleScalarDataType operationAction = "toggle_scalar_data_type" - // ScaleValue multiplies the value by a constant scalar - ScaleValue OperationAction = "experimental_scale_value" + // scaleValue multiplies the value by a constant scalar + scaleValue operationAction = "experimental_scale_value" - // AggregateLabels aggregates away all labels other than the ones in Operation.LabelSet + // aggregateLabels aggregates away all labels other than the ones in Operation.LabelSet // by the method indicated by Operation.AggregationType. // Metric has to match the FilterConfig with all its data points if used with Update ConfigAction, // otherwise the operation will be ignored. - AggregateLabels OperationAction = "aggregate_labels" + aggregateLabels operationAction = "aggregate_labels" - // AggregateLabelValues aggregates away the values in Operation.AggregatedValues + // aggregateLabelValues aggregates away the values in Operation.AggregatedValues // by the method indicated by Operation.AggregationType. // Metric has to match the FilterConfig with all its data points if used with Update ConfigAction, // otherwise the operation will be ignored. - AggregateLabelValues OperationAction = "aggregate_label_values" + aggregateLabelValues operationAction = "aggregate_label_values" ) -var operationActions = []OperationAction{AddLabel, UpdateLabel, DeleteLabelValue, ToggleScalarDataType, ScaleValue, AggregateLabels, AggregateLabelValues} +var operationActions = []operationAction{addLabel, updateLabel, deleteLabelValue, toggleScalarDataType, scaleValue, aggregateLabels, aggregateLabelValues} -func (oa OperationAction) isValid() bool { +func (oa operationAction) isValid() bool { for _, operationAction := range operationActions { if oa == operationAction { return true @@ -216,26 +216,26 @@ func (oa OperationAction) isValid() bool { return false } -// AggregationType is the enum to capture the three types of aggregation for the aggregation operation. -type AggregationType string +// aggregationType is the enum to capture the three types of aggregation for the aggregation operation. +type aggregationType string const ( - // Sum indicates taking the sum of the aggregated data. - Sum AggregationType = "sum" + // sum indicates taking the sum of the aggregated data. + sum aggregationType = "sum" - // Mean indicates taking the mean of the aggregated data. - Mean AggregationType = "mean" + // mean indicates taking the mean of the aggregated data. + mean aggregationType = "mean" - // Min indicates taking the minimum of the aggregated data. - Min AggregationType = "min" + // min indicates taking the minimum of the aggregated data. + min aggregationType = "min" - // Max indicates taking the max of the aggregated data. - Max AggregationType = "max" + // max indicates taking the max of the aggregated data. + max aggregationType = "max" ) -var aggregationTypes = []AggregationType{Sum, Mean, Min, Max} +var aggregationTypes = []aggregationType{sum, mean, min, max} -func (at AggregationType) isValid() bool { +func (at aggregationType) isValid() bool { for _, aggregationType := range aggregationTypes { if at == aggregationType { return true @@ -245,20 +245,20 @@ func (at AggregationType) isValid() bool { return false } -// MatchType is the enum to capture the two types of matching metric(s) that should have operations applied to them. -type MatchType string +// matchType is the enum to capture the two types of matching metric(s) that should have operations applied to them. +type matchType string const ( - // StrictMatchType is the FilterType for filtering by exact string matches. - StrictMatchType MatchType = "strict" + // strictMatchType is the FilterType for filtering by exact string matches. + strictMatchType matchType = "strict" - // RegexpMatchType is the FilterType for filtering by regexp string matches. - RegexpMatchType MatchType = "regexp" + // regexpMatchType is the FilterType for filtering by regexp string matches. + regexpMatchType matchType = "regexp" ) -var matchTypes = []MatchType{StrictMatchType, RegexpMatchType} +var matchTypes = []matchType{strictMatchType, regexpMatchType} -func (mt MatchType) isValid() bool { +func (mt matchType) isValid() bool { for _, matchType := range matchTypes { if mt == matchType { return true @@ -268,20 +268,20 @@ func (mt MatchType) isValid() bool { return false } -// SubmatchCase is the enum to capture the two types of case changes to apply to submatches. -type SubmatchCase string +// submatchCase is the enum to capture the two types of case changes to apply to submatches. +type submatchCase string const ( - // Lower is the SubmatchCase for lower casing the submatch. - Lower SubmatchCase = "lower" + // lower is the submatchCase for lower casing the submatch. + lower submatchCase = "lower" - // Upper is the SubmatchCase for upper casing the submatch. - Upper SubmatchCase = "upper" + // upper is the submatchCase for upper casing the submatch. + upper submatchCase = "upper" ) -var submatchCases = []SubmatchCase{Lower, Upper} +var submatchCases = []submatchCase{lower, upper} -func (sc SubmatchCase) isValid() bool { +func (sc submatchCase) isValid() bool { for _, submatchCase := range submatchCases { if sc == submatchCase { return true diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor/factory.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor/factory.go index dcd9e3d20..94c5681c1 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor/factory.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor/factory.go @@ -19,7 +19,7 @@ import ( var consumerCapabilities = consumer.Capabilities{MutatesData: true} -// NewFactory returns a new factory for the Metrics Transform processor. +// NewFactory returns a new factory for the Metrics transform processor. func NewFactory() processor.Factory { return processor.NewFactory( metadata.Type, @@ -62,60 +62,60 @@ func createMetricsProcessor( func validateConfiguration(config *Config) error { for _, transform := range config.Transforms { if transform.MetricIncludeFilter.Include == "" { - return fmt.Errorf("missing required field %q", IncludeFieldName) + return fmt.Errorf("missing required field %q", includeFieldName) } if transform.MetricIncludeFilter.MatchType != "" && !transform.MetricIncludeFilter.MatchType.isValid() { - return fmt.Errorf("%q must be in %q", MatchTypeFieldName, matchTypes) + return fmt.Errorf("%q must be in %q", matchTypeFieldName, matchTypes) } - if transform.MetricIncludeFilter.MatchType == RegexpMatchType { + if transform.MetricIncludeFilter.MatchType == regexpMatchType { _, err := regexp.Compile(transform.MetricIncludeFilter.Include) if err != nil { - return fmt.Errorf("%q, %w", IncludeFieldName, err) + return fmt.Errorf("%q, %w", includeFieldName, err) } } if !transform.Action.isValid() { - return fmt.Errorf("%q must be in %q", ActionFieldName, actions) + return fmt.Errorf("%q must be in %q", actionFieldName, actions) } if transform.Action == Insert && transform.NewName == "" { - return fmt.Errorf("missing required field %q while %q is %v", NewNameFieldName, ActionFieldName, Insert) + return fmt.Errorf("missing required field %q while %q is %v", newNameFieldName, actionFieldName, Insert) } if transform.Action == Group && transform.GroupResourceLabels == nil { - return fmt.Errorf("missing required field %q while %q is %v", GroupResourceLabelsFieldName, ActionFieldName, Group) + return fmt.Errorf("missing required field %q while %q is %v", groupResourceLabelsFieldName, actionFieldName, Group) } if transform.AggregationType != "" && !transform.AggregationType.isValid() { - return fmt.Errorf("%q must be in %q", AggregationTypeFieldName, aggregationTypes) + return fmt.Errorf("%q must be in %q", aggregationTypeFieldName, aggregationTypes) } if transform.SubmatchCase != "" && !transform.SubmatchCase.isValid() { - return fmt.Errorf("%q must be in %q", SubmatchCaseFieldName, submatchCases) + return fmt.Errorf("%q must be in %q", submatchCaseFieldName, submatchCases) } for i, op := range transform.Operations { if !op.Action.isValid() { - return fmt.Errorf("operation %v: %q must be in %q", i+1, ActionFieldName, operationActions) + return fmt.Errorf("operation %v: %q must be in %q", i+1, actionFieldName, operationActions) } - if op.Action == UpdateLabel && op.Label == "" { - return fmt.Errorf("operation %v: missing required field %q while %q is %v", i+1, LabelFieldName, ActionFieldName, UpdateLabel) + if op.Action == updateLabel && op.Label == "" { + return fmt.Errorf("operation %v: missing required field %q while %q is %v", i+1, labelFieldName, actionFieldName, updateLabel) } - if op.Action == AddLabel && op.NewLabel == "" { - return fmt.Errorf("operation %v: missing required field %q while %q is %v", i+1, NewLabelFieldName, ActionFieldName, AddLabel) + if op.Action == addLabel && op.NewLabel == "" { + return fmt.Errorf("operation %v: missing required field %q while %q is %v", i+1, newLabelFieldName, actionFieldName, addLabel) } - if op.Action == AddLabel && op.NewValue == "" { - return fmt.Errorf("operation %v: missing required field %q while %q is %v", i+1, NewValueFieldName, ActionFieldName, AddLabel) + if op.Action == addLabel && op.NewValue == "" { + return fmt.Errorf("operation %v: missing required field %q while %q is %v", i+1, newValueFieldName, actionFieldName, addLabel) } - if op.Action == ScaleValue && op.Scale == 0 { - return fmt.Errorf("operation %v: missing required field %q while %q is %v", i+1, ScaleFieldName, ActionFieldName, ScaleValue) + if op.Action == scaleValue && op.Scale == 0 { + return fmt.Errorf("operation %v: missing required field %q while %q is %v", i+1, scaleFieldName, actionFieldName, scaleValue) } if op.AggregationType != "" && !op.AggregationType.isValid() { - return fmt.Errorf("operation %v: %q must be in %q", i+1, AggregationTypeFieldName, aggregationTypes) + return fmt.Errorf("operation %v: %q must be in %q", i+1, aggregationTypeFieldName, aggregationTypes) } } } @@ -128,7 +128,7 @@ func buildHelperConfig(config *Config, version string) ([]internalTransform, err for i, t := range config.Transforms { if t.MetricIncludeFilter.MatchType == "" { - t.MetricIncludeFilter.MatchType = StrictMatchType + t.MetricIncludeFilter.MatchType = strictMatchType } filter, err := createFilter(t.MetricIncludeFilter) @@ -154,9 +154,9 @@ func buildHelperConfig(config *Config, version string) ([]internalTransform, err if len(op.ValueActions) > 0 { mtpOp.valueActionsMapping = createLabelValueMapping(op.ValueActions, version) } - if op.Action == AggregateLabels { + if op.Action == aggregateLabels { mtpOp.labelSetMap = sliceToSet(op.LabelSet) - } else if op.Action == AggregateLabelValues { + } else if op.Action == aggregateLabelValues { mtpOp.aggregatedValuesSet = sliceToSet(op.AggregatedValues) } helperT.Operations[j] = mtpOp @@ -168,13 +168,13 @@ func buildHelperConfig(config *Config, version string) ([]internalTransform, err func createFilter(filterConfig FilterConfig) (internalFilter, error) { switch filterConfig.MatchType { - case StrictMatchType: + case strictMatchType: matchers, err := getMatcherMap(filterConfig.MatchLabels, func(str string) (StringMatcher, error) { return strictMatcher(str), nil }) if err != nil { return nil, err } return internalFilterStrict{include: filterConfig.Include, attrMatchers: matchers}, nil - case RegexpMatchType: + case regexpMatchType: matchers, err := getMatcherMap(filterConfig.MatchLabels, func(str string) (StringMatcher, error) { return regexp.Compile(str) }) if err != nil { return nil, err diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor/metrics_transform_processor.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor/metrics_transform_processor.go index a9efeecee..d39042340 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor/metrics_transform_processor.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor/metrics_transform_processor.go @@ -23,8 +23,8 @@ type internalTransform struct { Action ConfigAction NewName string GroupResourceLabels map[string]string - AggregationType AggregationType - SubmatchCase SubmatchCase + AggregationType aggregationType + SubmatchCase submatchCase Operations []internalOperation } @@ -79,11 +79,11 @@ func newMetricsTransformProcessor(logger *zap.Logger, internalTransforms []inter } } -func replaceCaseOfSubmatch(replacement SubmatchCase, submatch string) string { +func replaceCaseOfSubmatch(replacement submatchCase, submatch string) string { switch replacement { - case Lower: + case lower: return strings.ToLower(submatch) - case Upper: + case upper: return strings.ToUpper(submatch) } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor/metrics_transform_processor_otlp.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor/metrics_transform_processor_otlp.go index cf93a5263..3a6e11166 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor/metrics_transform_processor_otlp.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor/metrics_transform_processor_otlp.go @@ -537,25 +537,25 @@ func transformMetric(metric pmetric.Metric, transform internalTransform) bool { for _, op := range transform.Operations { switch op.configOperation.Action { - case UpdateLabel: + case updateLabel: updateLabelOp(metric, op, transform.MetricIncludeFilter) - case AggregateLabels: + case aggregateLabels: if canChangeMetric { aggregateLabelsOp(metric, op) } - case AggregateLabelValues: + case aggregateLabelValues: if canChangeMetric { aggregateLabelValuesOp(metric, op) } - case ToggleScalarDataType: + case toggleScalarDataType: toggleScalarDataTypeOp(metric, transform.MetricIncludeFilter) - case ScaleValue: + case scaleValue: scaleValueOp(metric, op, transform.MetricIncludeFilter) - case AddLabel: + case addLabel: if canChangeMetric { addLabelOp(metric, op) } - case DeleteLabelValue: + case deleteLabelValue: if canChangeMetric { deleteLabelValueOp(metric, op) } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor/operation_aggregate_labels.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor/operation_aggregate_labels.go index 97f418795..dccc9ec86 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor/operation_aggregate_labels.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor/operation_aggregate_labels.go @@ -31,7 +31,7 @@ func aggregateLabelsOp(metric pmetric.Metric, mtpOp internalOperation) { // groupMetrics groups all the provided timeseries that will be aggregated together based on all the label values. // Returns a map of grouped timeseries and the corresponding selected labels // canBeCombined must be callled before. -func groupMetrics(metrics pmetric.MetricSlice, aggType AggregationType, to pmetric.Metric) { +func groupMetrics(metrics pmetric.MetricSlice, aggType aggregationType, to pmetric.Metric) { var ag aggGroups for i := 0; i < metrics.Len(); i++ { ag = groupDataPoints(metrics.At(i), ag) @@ -68,7 +68,7 @@ func groupDataPoints(metric pmetric.Metric, ag aggGroups) aggGroups { return ag } -func mergeDataPoints(to pmetric.Metric, aggType AggregationType, ag aggGroups) { +func mergeDataPoints(to pmetric.Metric, aggType aggregationType, ag aggGroups) { switch to.Type() { case pmetric.MetricTypeGauge: mergeNumberDataPoints(ag.gauge, aggType, to.Gauge().DataPoints()) @@ -153,7 +153,7 @@ func dataPointHashKey(atts pcommon.Map, ts pcommon.Timestamp, other ...interface return string(jsonStr) } -func mergeNumberDataPoints(dpsMap map[string]pmetric.NumberDataPointSlice, agg AggregationType, to pmetric.NumberDataPointSlice) { +func mergeNumberDataPoints(dpsMap map[string]pmetric.NumberDataPointSlice, agg aggregationType, to pmetric.NumberDataPointSlice) { for _, dps := range dpsMap { dp := to.AppendEmpty() dps.At(0).MoveTo(dp) @@ -161,30 +161,30 @@ func mergeNumberDataPoints(dpsMap map[string]pmetric.NumberDataPointSlice, agg A case pmetric.NumberDataPointValueTypeDouble: for i := 1; i < dps.Len(); i++ { switch agg { - case Sum, Mean: + case sum, mean: dp.SetDoubleValue(dp.DoubleValue() + doubleVal(dps.At(i))) - case Max: + case max: dp.SetDoubleValue(math.Max(dp.DoubleValue(), doubleVal(dps.At(i)))) - case Min: + case min: dp.SetDoubleValue(math.Min(dp.DoubleValue(), doubleVal(dps.At(i)))) } if dps.At(i).StartTimestamp() < dp.StartTimestamp() { dp.SetStartTimestamp(dps.At(i).StartTimestamp()) } } - if agg == Mean { + if agg == mean { dp.SetDoubleValue(dp.DoubleValue() / float64(dps.Len())) } case pmetric.NumberDataPointValueTypeInt: for i := 1; i < dps.Len(); i++ { switch agg { - case Sum, Mean: + case sum, mean: dp.SetIntValue(dp.IntValue() + dps.At(i).IntValue()) - case Max: + case max: if dp.IntValue() < intVal(dps.At(i)) { dp.SetIntValue(intVal(dps.At(i))) } - case Min: + case min: if dp.IntValue() > intVal(dps.At(i)) { dp.SetIntValue(intVal(dps.At(i))) } @@ -193,7 +193,7 @@ func mergeNumberDataPoints(dpsMap map[string]pmetric.NumberDataPointSlice, agg A dp.SetStartTimestamp(dps.At(i).StartTimestamp()) } } - if agg == Mean { + if agg == mean { dp.SetIntValue(dp.IntValue() / int64(dps.Len())) } } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/README.md index a34a49072..7fc00ffd5 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/README.md +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/README.md @@ -48,6 +48,7 @@ Queries the host machine to retrieve the following resource attributes: * host.arch * host.name * host.id + * os.description * os.type By default `host.name` is being set to FQDN if possible, and a hostname provided by OS used as fallback. @@ -163,6 +164,8 @@ processors: * host.id (instance id) * host.name (instance name) * host.type (machine type) + * (optional) gcp.gce.instance.hostname + * (optional) gcp.gce.instance.name #### GKE Metadata @@ -180,7 +183,7 @@ able to determine `host.name`. In that case, users are encouraged to set `host.n - `node.name` through the downward API with the `env` detector - obtaining the Kubernetes node name from the Kubernetes API (with `k8s.io/client-go`) -#### Google Cloud Run Metadata +#### Google Cloud Run Services Metadata * cloud.provider ("gcp") * cloud.platform ("gcp_cloud_run") @@ -190,6 +193,17 @@ able to determine `host.name`. In that case, users are encouraged to set `host.n * faas.name (service name) * faas.version (service revision) +#### Cloud Run Jobs Metadata + + * cloud.provider ("gcp") + * cloud.platform ("gcp_cloud_run") + * cloud.account.id (project id) + * cloud.region (e.g. "us-central1") + * faas.id (instance id) + * faas.name (service name) + * gcp.cloud_run.job.execution ("my-service-ajg89") + * gcp.cloud_run.job.task_index ("0") + #### Google Cloud Functions Metadata * cloud.provider ("gcp") @@ -435,7 +449,7 @@ metadata: name: otel-collector rules: - apiGroups: ["config.openshift.io"] - resources: ["infrastructures"] + resources: ["infrastructures", "infrastructures/status"] verbs: ["get", "watch", "list"] ``` diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal/gcp/gcp.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal/gcp/gcp.go index 6b9e0ac74..c1e7d7418 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal/gcp/gcp.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal/gcp/gcp.go @@ -116,6 +116,8 @@ func (d *detector) Detect(context.Context) (resource pcommon.Resource, schemaURL d.rb.SetFromCallable(d.rb.SetHostType, d.detector.GCEHostType), d.rb.SetFromCallable(d.rb.SetHostID, d.detector.GCEHostID), d.rb.SetFromCallable(d.rb.SetHostName, d.detector.GCEHostName), + d.rb.SetFromCallable(d.rb.SetGcpGceInstanceHostname, d.detector.GCEInstanceHostname), + d.rb.SetFromCallable(d.rb.SetGcpGceInstanceName, d.detector.GCEInstanceName), ) default: // We don't support this platform yet, so just return with what we have diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal/gcp/internal/metadata/generated_config.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal/gcp/internal/metadata/generated_config.go index f87d7deb1..732dff567 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal/gcp/internal/metadata/generated_config.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal/gcp/internal/metadata/generated_config.go @@ -19,6 +19,8 @@ type ResourceAttributesConfig struct { FaasVersion ResourceAttributeConfig `mapstructure:"faas.version"` GcpCloudRunJobExecution ResourceAttributeConfig `mapstructure:"gcp.cloud_run.job.execution"` GcpCloudRunJobTaskIndex ResourceAttributeConfig `mapstructure:"gcp.cloud_run.job.task_index"` + GcpGceInstanceHostname ResourceAttributeConfig `mapstructure:"gcp.gce.instance.hostname"` + GcpGceInstanceName ResourceAttributeConfig `mapstructure:"gcp.gce.instance.name"` HostID ResourceAttributeConfig `mapstructure:"host.id"` HostName ResourceAttributeConfig `mapstructure:"host.name"` HostType ResourceAttributeConfig `mapstructure:"host.type"` @@ -57,6 +59,12 @@ func DefaultResourceAttributesConfig() ResourceAttributesConfig { GcpCloudRunJobTaskIndex: ResourceAttributeConfig{ Enabled: true, }, + GcpGceInstanceHostname: ResourceAttributeConfig{ + Enabled: false, + }, + GcpGceInstanceName: ResourceAttributeConfig{ + Enabled: false, + }, HostID: ResourceAttributeConfig{ Enabled: true, }, diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal/gcp/internal/metadata/generated_resource.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal/gcp/internal/metadata/generated_resource.go index a1ebeade5..6059deb79 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal/gcp/internal/metadata/generated_resource.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal/gcp/internal/metadata/generated_resource.go @@ -91,6 +91,20 @@ func (rb *ResourceBuilder) SetGcpCloudRunJobTaskIndex(val string) { } } +// SetGcpGceInstanceHostname sets provided value as "gcp.gce.instance.hostname" attribute. +func (rb *ResourceBuilder) SetGcpGceInstanceHostname(val string) { + if rb.config.GcpGceInstanceHostname.Enabled { + rb.res.Attributes().PutStr("gcp.gce.instance.hostname", val) + } +} + +// SetGcpGceInstanceName sets provided value as "gcp.gce.instance.name" attribute. +func (rb *ResourceBuilder) SetGcpGceInstanceName(val string) { + if rb.config.GcpGceInstanceName.Enabled { + rb.res.Attributes().PutStr("gcp.gce.instance.name", val) + } +} + // SetHostID sets provided value as "host.id" attribute. func (rb *ResourceBuilder) SetHostID(val string) { if rb.config.HostID.Enabled { diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal/gcp/metadata.yaml b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal/gcp/metadata.yaml index 764438932..4cfe81b2b 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal/gcp/metadata.yaml +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal/gcp/metadata.yaml @@ -58,4 +58,12 @@ resource_attributes: gcp.cloud_run.job.task_index: description: The Job execution task index type: string - enabled: true \ No newline at end of file + enabled: true + gcp.gce.instance.name: + description: The name of the GCE instance. + type: string + enabled: false + gcp.gce.instance.hostname: + description: The hostname of the GCE instance. + type: string + enabled: false diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal/gcp/types.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal/gcp/types.go index 5982425c1..a4bcb4e2b 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal/gcp/types.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal/gcp/types.go @@ -31,4 +31,6 @@ type gcpDetector interface { GCEHostName() (string, error) CloudRunJobExecution() (string, error) CloudRunJobTaskIndex() (string, error) + GCEInstanceHostname() (string, error) + GCEInstanceName() (string, error) } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal/heroku/heroku.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal/heroku/heroku.go index a2ba9f4de..968c18dd0 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal/heroku/heroku.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal/heroku/heroku.go @@ -37,16 +37,29 @@ type detector struct { // Detect detects heroku metadata and returns a resource with the available ones func (d *detector) Detect(_ context.Context) (resource pcommon.Resource, schemaURL string, err error) { - dynoID, ok := os.LookupEnv("HEROKU_DYNO_ID") - if !ok { - d.logger.Debug("heroku metadata unavailable", zap.Error(err)) - return pcommon.NewResource(), "", nil + dynoIDMissing := false + if dynoID, ok := os.LookupEnv("HEROKU_DYNO_ID"); ok { + d.rb.SetServiceInstanceID(dynoID) + } else { + dynoIDMissing = true } - d.rb.SetCloudProvider("heroku") - d.rb.SetServiceInstanceID(dynoID) + herokuAppIDMissing := false if v, ok := os.LookupEnv("HEROKU_APP_ID"); ok { d.rb.SetHerokuAppID(v) + } else { + herokuAppIDMissing = true + } + if dynoIDMissing { + if herokuAppIDMissing { + d.logger.Debug("Heroku metadata is missing. Please check metadata is enabled.") + } else { + // some heroku deployments will enable some of the metadata. + d.logger.Debug("Partial Heroku metadata is missing. Please check metadata is supported.") + } + } + if !herokuAppIDMissing { + d.rb.SetCloudProvider("heroku") } if v, ok := os.LookupEnv("HEROKU_APP_NAME"); ok { d.rb.SetServiceName(v) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal/system/internal/metadata/generated_config.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal/system/internal/metadata/generated_config.go index 1b35a1578..3abbed5b3 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal/system/internal/metadata/generated_config.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal/system/internal/metadata/generated_config.go @@ -9,10 +9,11 @@ type ResourceAttributeConfig struct { // ResourceAttributesConfig provides config for resourcedetectionprocessor/system resource attributes. type ResourceAttributesConfig struct { - HostArch ResourceAttributeConfig `mapstructure:"host.arch"` - HostID ResourceAttributeConfig `mapstructure:"host.id"` - HostName ResourceAttributeConfig `mapstructure:"host.name"` - OsType ResourceAttributeConfig `mapstructure:"os.type"` + HostArch ResourceAttributeConfig `mapstructure:"host.arch"` + HostID ResourceAttributeConfig `mapstructure:"host.id"` + HostName ResourceAttributeConfig `mapstructure:"host.name"` + OsDescription ResourceAttributeConfig `mapstructure:"os.description"` + OsType ResourceAttributeConfig `mapstructure:"os.type"` } func DefaultResourceAttributesConfig() ResourceAttributesConfig { @@ -26,6 +27,9 @@ func DefaultResourceAttributesConfig() ResourceAttributesConfig { HostName: ResourceAttributeConfig{ Enabled: true, }, + OsDescription: ResourceAttributeConfig{ + Enabled: false, + }, OsType: ResourceAttributeConfig{ Enabled: true, }, diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal/system/internal/metadata/generated_resource.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal/system/internal/metadata/generated_resource.go index 4c3acdf9e..b28dc2be6 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal/system/internal/metadata/generated_resource.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal/system/internal/metadata/generated_resource.go @@ -42,6 +42,13 @@ func (rb *ResourceBuilder) SetHostName(val string) { } } +// SetOsDescription sets provided value as "os.description" attribute. +func (rb *ResourceBuilder) SetOsDescription(val string) { + if rb.config.OsDescription.Enabled { + rb.res.Attributes().PutStr("os.description", val) + } +} + // SetOsType sets provided value as "os.type" attribute. func (rb *ResourceBuilder) SetOsType(val string) { if rb.config.OsType.Enabled { diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal/system/metadata.yaml b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal/system/metadata.yaml index 943beea7f..f34204318 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal/system/metadata.yaml +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal/system/metadata.yaml @@ -11,6 +11,10 @@ resource_attributes: description: The host.id type: string enabled: false + os.description: + description: Human readable OS version information. + type: string + enabled: false os.type: description: The os.type type: string diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal/system/system.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal/system/system.go index 60f698f55..0366b6288 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal/system/system.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal/system/system.go @@ -34,10 +34,10 @@ var _ internal.Detector = (*Detector)(nil) // Detector is a system metadata detector type Detector struct { - provider system.Provider - logger *zap.Logger - hostnameSources []string - rb *metadata.ResourceBuilder + provider system.Provider + logger *zap.Logger + cfg Config + rb *metadata.ResourceBuilder } // NewDetector creates a new system metadata detector @@ -48,10 +48,10 @@ func NewDetector(p processor.CreateSettings, dcfg internal.DetectorConfig) (inte } return &Detector{ - provider: system.NewProvider(), - logger: p.Logger, - hostnameSources: cfg.HostnameSources, - rb: metadata.NewResourceBuilder(cfg.ResourceAttributes), + provider: system.NewProvider(), + logger: p.Logger, + cfg: cfg, + rb: metadata.NewResourceBuilder(cfg.ResourceAttributes), }, nil } @@ -64,24 +64,31 @@ func (d *Detector) Detect(ctx context.Context) (resource pcommon.Resource, schem return pcommon.NewResource(), "", fmt.Errorf("failed getting OS type: %w", err) } - hostID, err := d.provider.HostID(ctx) + hostArch, err := d.provider.HostArch() if err != nil { - return pcommon.NewResource(), "", fmt.Errorf("failed getting host ID: %w", err) + return pcommon.NewResource(), "", fmt.Errorf("failed getting host architecture: %w", err) } - hostArch, err := d.provider.HostArch() + osDescription, err := d.provider.OSDescription(ctx) if err != nil { - return pcommon.NewResource(), "", fmt.Errorf("failed getting host architecture: %w", err) + return pcommon.NewResource(), "", fmt.Errorf("failed getting OS description: %w", err) } - for _, source := range d.hostnameSources { + for _, source := range d.cfg.HostnameSources { getHostFromSource := hostnameSourcesMap[source] hostname, err = getHostFromSource(d) if err == nil { d.rb.SetHostName(hostname) d.rb.SetOsType(osType) - d.rb.SetHostID(hostID) + if d.cfg.ResourceAttributes.HostID.Enabled { + if hostID, hostIDErr := d.provider.HostID(ctx); hostIDErr == nil { + d.rb.SetHostID(hostID) + } else { + d.logger.Warn("failed to get host ID", zap.Error(hostIDErr)) + } + } d.rb.SetHostArch(hostArch) + d.rb.SetOsDescription(osDescription) return d.rb.Emit(), conventions.SchemaURL, nil } d.logger.Debug(err.Error()) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/and_helper.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/and_helper.go index 5afaaea5f..4577c8c49 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/and_helper.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/and_helper.go @@ -10,14 +10,14 @@ import ( ) func getNewAndPolicy(settings component.TelemetrySettings, config *AndCfg) (sampling.PolicyEvaluator, error) { - var subPolicyEvaluators []sampling.PolicyEvaluator + subPolicyEvaluators := make([]sampling.PolicyEvaluator, len(config.SubPolicyCfg)) for i := range config.SubPolicyCfg { policyCfg := &config.SubPolicyCfg[i] policy, err := getAndSubPolicyEvaluator(settings, policyCfg) if err != nil { return nil, err } - subPolicyEvaluators = append(subPolicyEvaluators, policy) + subPolicyEvaluators[i] = policy } return sampling.NewAnd(settings.Logger, subPolicyEvaluators), nil } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/composite_helper.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/composite_helper.go index cff0e3e9f..769bfcf95 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/composite_helper.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/composite_helper.go @@ -10,7 +10,7 @@ import ( ) func getNewCompositePolicy(settings component.TelemetrySettings, config *CompositeCfg) (sampling.PolicyEvaluator, error) { - var subPolicyEvalParams []sampling.SubPolicyEvalParams + subPolicyEvalParams := make([]sampling.SubPolicyEvalParams, len(config.SubPolicyCfg)) rateAllocationsMap := getRateAllocationMap(config) for i := range config.SubPolicyCfg { policyCfg := &config.SubPolicyCfg[i] @@ -23,7 +23,7 @@ func getNewCompositePolicy(settings component.TelemetrySettings, config *Composi Evaluator: policy, MaxSpansPerSecond: int64(rateAllocationsMap[policyCfg.Name]), } - subPolicyEvalParams = append(subPolicyEvalParams, evalParams) + subPolicyEvalParams[i] = evalParams } return sampling.NewComposite(settings.Logger, config.MaxTotalSpansPerSecond, subPolicyEvalParams, sampling.MonotonicClock{}), nil } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/processor.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/processor.go index f2c55f956..8d22af8b6 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/processor.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/processor.go @@ -69,7 +69,7 @@ func newTracesProcessor(ctx context.Context, settings component.TelemetrySetting return nil, err } - var policies []*policy + policies := make([]*policy, len(cfg.PolicyCfgs)) for i := range cfg.PolicyCfgs { policyCfg := &cfg.PolicyCfgs[i] policyCtx, err := tag.New(ctx, tag.Upsert(tagPolicyKey, policyCfg.Name), tag.Upsert(tagSourceFormat, sourceFormat)) @@ -85,7 +85,7 @@ func newTracesProcessor(ctx context.Context, settings component.TelemetrySetting evaluator: eval, ctx: policyCtx, } - policies = append(policies, p) + policies[i] = p } tsp := &tailSamplingSpanProcessor{ diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver/internal/cadvisor/cadvisor_linux.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver/internal/cadvisor/cadvisor_linux.go index 43471e84b..7411bf6cd 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver/internal/cadvisor/cadvisor_linux.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver/internal/cadvisor/cadvisor_linux.go @@ -110,6 +110,7 @@ type EcsInfo interface { type Decorator interface { Decorate(*extractors.CAdvisorMetric) *extractors.CAdvisorMetric + Shutdown() error } type Cadvisor struct { @@ -164,6 +165,18 @@ func GetMetricsExtractors() []extractors.MetricExtractor { return metricsExtractors } +func (c *Cadvisor) Shutdown() error { + var errs error + for _, ext := range metricsExtractors { + errs = errors.Join(errs, ext.Shutdown()) + } + + if c.k8sDecorator != nil { + errs = errors.Join(errs, c.k8sDecorator.Shutdown()) + } + return errs +} + func (c *Cadvisor) addEbsVolumeInfo(tags map[string]string, ebsVolumeIdsUsedAsPV map[string]string) { deviceName, ok := tags[ci.DiskDev] if !ok { diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver/internal/cadvisor/cadvisor_nolinux.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver/internal/cadvisor/cadvisor_nolinux.go index 224df6287..4e7871dbb 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver/internal/cadvisor/cadvisor_nolinux.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver/internal/cadvisor/cadvisor_nolinux.go @@ -27,6 +27,7 @@ type Cadvisor struct { type Decorator interface { Decorate(*extractors.CAdvisorMetric) *extractors.CAdvisorMetric + Shutdown() error } // Option is a function that can be used to configure Cadvisor struct @@ -54,3 +55,7 @@ func New(_ string, _ HostInfo, _ *zap.Logger, _ ...Option) (*Cadvisor, error) { func (c *Cadvisor) GetMetrics() []pmetric.Metrics { return []pmetric.Metrics{} } + +func (c *Cadvisor) Shutdown() error { + return nil +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver/internal/cadvisor/extractors/cpu_extractor.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver/internal/cadvisor/extractors/cpu_extractor.go index 7b2381bb0..e2b8851d6 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver/internal/cadvisor/extractors/cpu_extractor.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver/internal/cadvisor/extractors/cpu_extractor.go @@ -53,6 +53,10 @@ func (c *CPUMetricExtractor) GetValue(info *cInfo.ContainerInfo, mInfo CPUMemInf return metrics } +func (c *CPUMetricExtractor) Shutdown() error { + return c.rateCalculator.Shutdown() +} + func NewCPUMetricExtractor(logger *zap.Logger) *CPUMetricExtractor { return &CPUMetricExtractor{ logger: logger, diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver/internal/cadvisor/extractors/diskio_extractor.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver/internal/cadvisor/extractors/diskio_extractor.go index 9bb0d1e9c..384acf0e5 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver/internal/cadvisor/extractors/diskio_extractor.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver/internal/cadvisor/extractors/diskio_extractor.go @@ -56,6 +56,10 @@ func (d *DiskIOMetricExtractor) extractIoMetrics(curStatsSet []cInfo.PerDiskStat return metrics } +func (d *DiskIOMetricExtractor) Shutdown() error { + return d.rateCalculator.Shutdown() +} + func ioMetricName(prefix, key string) string { return prefix + strings.ToLower(key) } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver/internal/cadvisor/extractors/extractor.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver/internal/cadvisor/extractors/extractor.go index b80d67e06..559ee5481 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver/internal/cadvisor/extractors/extractor.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver/internal/cadvisor/extractors/extractor.go @@ -30,6 +30,7 @@ type CPUMemInfoProvider interface { type MetricExtractor interface { HasValue(*cinfo.ContainerInfo) bool GetValue(info *cinfo.ContainerInfo, mInfo CPUMemInfoProvider, containerType string) []*CAdvisorMetric + Shutdown() error } type CAdvisorMetric struct { @@ -136,7 +137,7 @@ func assignRateValueToField(rateCalculator *awsmetrics.MetricCalculator, fields // MergeMetrics merges an array of cadvisor metrics based on common metric keys func MergeMetrics(metrics []*CAdvisorMetric) []*CAdvisorMetric { - var result []*CAdvisorMetric + result := make([]*CAdvisorMetric, 0, len(metrics)) metricMap := make(map[string]*CAdvisorMetric) for _, metric := range metrics { if metricKey := getMetricKey(metric); metricKey != "" { @@ -159,7 +160,7 @@ func MergeMetrics(metrics []*CAdvisorMetric) []*CAdvisorMetric { // return MetricKey for merge-able metrics func getMetricKey(metric *CAdvisorMetric) string { metricType := metric.GetMetricType() - metricKey := "" + var metricKey string switch metricType { case ci.TypeInstance: // merge cpu, memory, net metric for type Instance diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver/internal/cadvisor/extractors/fs_extractor.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver/internal/cadvisor/extractors/fs_extractor.go index 9ff3cfe52..6e2f888b4 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver/internal/cadvisor/extractors/fs_extractor.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver/internal/cadvisor/extractors/fs_extractor.go @@ -24,13 +24,13 @@ func (f *FileSystemMetricExtractor) HasValue(info *cinfo.ContainerInfo) bool { } func (f *FileSystemMetricExtractor) GetValue(info *cinfo.ContainerInfo, _ CPUMemInfoProvider, containerType string) []*CAdvisorMetric { - var metrics []*CAdvisorMetric if containerType == ci.TypePod || containerType == ci.TypeInfraContainer { - return metrics + return nil } containerType = getFSMetricType(containerType, f.logger) stats := GetStats(info) + metrics := make([]*CAdvisorMetric, 0, len(stats.Filesystem)) for _, v := range stats.Filesystem { metric := newCadvisorMetric(containerType, f.logger) @@ -63,6 +63,10 @@ func (f *FileSystemMetricExtractor) GetValue(info *cinfo.ContainerInfo, _ CPUMem return metrics } +func (f *FileSystemMetricExtractor) Shutdown() error { + return nil +} + func NewFileSystemMetricExtractor(logger *zap.Logger) *FileSystemMetricExtractor { fse := &FileSystemMetricExtractor{ logger: logger, diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver/internal/cadvisor/extractors/mem_extractor.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver/internal/cadvisor/extractors/mem_extractor.go index 40751481a..e5205c3cc 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver/internal/cadvisor/extractors/mem_extractor.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver/internal/cadvisor/extractors/mem_extractor.go @@ -64,6 +64,10 @@ func (m *MemMetricExtractor) GetValue(info *cinfo.ContainerInfo, mInfo CPUMemInf return metrics } +func (m *MemMetricExtractor) Shutdown() error { + return m.rateCalculator.Shutdown() +} + func NewMemMetricExtractor(logger *zap.Logger) *MemMetricExtractor { return &MemMetricExtractor{ logger: logger, diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver/internal/cadvisor/extractors/net_extractor.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver/internal/cadvisor/extractors/net_extractor.go index 50308b96e..35df5aea6 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver/internal/cadvisor/extractors/net_extractor.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver/internal/cadvisor/extractors/net_extractor.go @@ -31,11 +31,10 @@ func (n *NetMetricExtractor) HasValue(info *cinfo.ContainerInfo) bool { } func (n *NetMetricExtractor) GetValue(info *cinfo.ContainerInfo, _ CPUMemInfoProvider, containerType string) []*CAdvisorMetric { - var metrics []*CAdvisorMetric // Just a protection here, there is no Container level Net metrics if containerType == ci.TypePod || containerType == ci.TypeContainer { - return metrics + return nil } // Rename type to pod so the metric name prefix is pod_ @@ -47,9 +46,10 @@ func (n *NetMetricExtractor) GetValue(info *cinfo.ContainerInfo, _ CPUMemInfoPro curIfceStats := getInterfacesStats(curStats) // used for aggregation - var netIfceMetrics []map[string]interface{} + netIfceMetrics := make([]map[string]interface{}, len(curIfceStats)) + metrics := make([]*CAdvisorMetric, len(curIfceStats)) - for _, cur := range curIfceStats { + for i, cur := range curIfceStats { mType := getNetMetricType(containerType, n.logger) netIfceMetric := make(map[string]interface{}) @@ -68,7 +68,7 @@ func (n *NetMetricExtractor) GetValue(info *cinfo.ContainerInfo, _ CPUMemInfoPro netIfceMetric[ci.NetTotalBytes] = netIfceMetric[ci.NetRxBytes].(float64) + netIfceMetric[ci.NetTxBytes].(float64) } - netIfceMetrics = append(netIfceMetrics, netIfceMetric) + netIfceMetrics[i] = netIfceMetric metric := newCadvisorMetric(mType, n.logger) metric.tags[ci.NetIfce] = cur.Name @@ -76,7 +76,7 @@ func (n *NetMetricExtractor) GetValue(info *cinfo.ContainerInfo, _ CPUMemInfoPro metric.fields[ci.MetricName(mType, k)] = v } - metrics = append(metrics, metric) + metrics[i] = metric } aggregatedFields := ci.SumFields(netIfceMetrics) @@ -91,6 +91,10 @@ func (n *NetMetricExtractor) GetValue(info *cinfo.ContainerInfo, _ CPUMemInfoPro return metrics } +func (n *NetMetricExtractor) Shutdown() error { + return n.rateCalculator.Shutdown() +} + func NewNetMetricExtractor(logger *zap.Logger) *NetMetricExtractor { return &NetMetricExtractor{ logger: logger, diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver/internal/k8sapiserver/k8sapiserver.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver/internal/k8sapiserver/k8sapiserver.go index 007c76101..6462c5cc5 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver/internal/k8sapiserver/k8sapiserver.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver/internal/k8sapiserver/k8sapiserver.go @@ -238,10 +238,11 @@ func (k *K8sAPIServer) init() error { } // Shutdown stops the k8sApiServer -func (k *K8sAPIServer) Shutdown() { +func (k *K8sAPIServer) Shutdown() error { if k.cancel != nil { k.cancel() } + return nil } func (k *K8sAPIServer) startLeaderElection(ctx context.Context, lock resourcelock.Interface) { diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver/internal/stores/podstore.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver/internal/stores/podstore.go index 53c410ac4..9c587f956 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver/internal/stores/podstore.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver/internal/stores/podstore.go @@ -59,6 +59,8 @@ type mapWithExpiry struct { } func (m *mapWithExpiry) Get(key string) (interface{}, bool) { + m.MapWithExpiry.Lock() + defer m.MapWithExpiry.Unlock() if val, ok := m.MapWithExpiry.Get(awsmetrics.NewKey(key, nil)); ok { return val.RawValue, ok } @@ -67,6 +69,8 @@ func (m *mapWithExpiry) Get(key string) (interface{}, bool) { } func (m *mapWithExpiry) Set(key string, content interface{}) { + m.MapWithExpiry.Lock() + defer m.MapWithExpiry.Unlock() val := awsmetrics.MetricValue{ RawValue: content, Timestamp: time.Now(), @@ -131,6 +135,17 @@ func NewPodStore(hostIP string, prefFullPodName bool, addFullPodNameMetricLabel return podStore, nil } +func (p *PodStore) Shutdown() error { + var errs error + errs = p.cache.Shutdown() + for _, maps := range p.prevMeasurements { + if prevMeasErr := maps.Shutdown(); prevMeasErr != nil { + errs = errors.Join(errs, prevMeasErr) + } + } + return errs +} + func (p *PodStore) getPrevMeasurement(metricType, metricKey string) (interface{}, bool) { prevMeasurement, ok := p.prevMeasurements[metricType] if !ok { @@ -164,8 +179,6 @@ func (p *PodStore) RefreshTick(ctx context.Context) { now := time.Now() if now.Sub(p.lastRefreshed) >= refreshInterval { p.refresh(ctx, now) - // call cleanup every refresh cycle - p.cleanup(now) p.lastRefreshed = now } } @@ -239,16 +252,6 @@ func (p *PodStore) refresh(ctx context.Context, now time.Time) { p.refreshInternal(now, podList) } -func (p *PodStore) cleanup(now time.Time) { - for _, prevMeasurement := range p.prevMeasurements { - prevMeasurement.CleanUp(now) - } - - p.Lock() - defer p.Unlock() - p.cache.CleanUp(now) -} - func (p *PodStore) refreshInternal(now time.Time, podList []corev1.Pod) { var podCount int var containerCount int diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver/internal/stores/store.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver/internal/stores/store.go index 890d07e0a..2e4754660 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver/internal/stores/store.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver/internal/stores/store.go @@ -40,6 +40,8 @@ type K8sDecorator struct { // The K8sStore (e.g. podstore) does network request in Decorate function, thus needs to take a context // object for canceling the request ctx context.Context + // the pod store needs to be saved here because the map it is stateful and needs to be shut down. + podStore *PodStore } func NewK8sDecorator(ctx context.Context, tagService bool, prefFullPodName bool, addFullPodNameMetricLabel bool, logger *zap.Logger) (*K8sDecorator, error) { @@ -53,9 +55,11 @@ func NewK8sDecorator(ctx context.Context, tagService bool, prefFullPodName bool, } podstore, err := NewPodStore(hostIP, prefFullPodName, addFullPodNameMetricLabel, logger) + if err != nil { return nil, err } + k.podStore = podstore k.stores = append(k.stores, podstore) if tagService { @@ -97,3 +101,7 @@ func (k *K8sDecorator) Decorate(metric *extractors.CAdvisorMetric) *extractors.C TagMetricSource(metric) return metric } + +func (k *K8sDecorator) Shutdown() error { + return k.podStore.Shutdown() +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver/receiver.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver/receiver.go index 0c3b01e3a..d192977a1 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver/receiver.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver/receiver.go @@ -26,6 +26,7 @@ var _ receiver.Metrics = (*awsContainerInsightReceiver)(nil) type metricsProvider interface { GetMetrics() []pmetric.Metrics + Shutdown() error } // awsContainerInsightReceiver implements the receiver.Metrics @@ -125,7 +126,18 @@ func (acir *awsContainerInsightReceiver) Shutdown(context.Context) error { return nil } acir.cancel() - return nil + + var errs error + + if acir.k8sapiserver != nil { + errs = errors.Join(errs, acir.k8sapiserver.Shutdown()) + } + if acir.cadvisor != nil { + errs = errors.Join(errs, acir.cadvisor.Shutdown()) + } + + return errs + } // collectData collects container stats from Amazon ECS Task Metadata Endpoint diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsxrayreceiver/receiver.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsxrayreceiver/receiver.go index d815c2e54..458e81c1d 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsxrayreceiver/receiver.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsxrayreceiver/receiver.go @@ -5,13 +5,13 @@ package awsxrayreceiver // import "github.com/open-telemetry/opentelemetry-colle import ( "context" + "errors" "fmt" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/obsreport" "go.opentelemetry.io/collector/receiver" - "go.uber.org/multierr" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/proxy" @@ -102,7 +102,7 @@ func (x *xrayReceiver) Shutdown(ctx context.Context) error { } if proxyErr := x.server.Shutdown(ctx); proxyErr != nil { - err = multierr.Append(err, fmt.Errorf("failed to close proxy: %w", proxyErr)) + err = errors.Join(err, fmt.Errorf("failed to close proxy: %w", proxyErr)) } return err } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/kafka_receiver.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/kafka_receiver.go index 5f76e5295..064588803 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/kafka_receiver.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/kafka_receiver.go @@ -9,7 +9,7 @@ import ( "strings" "sync" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "go.opencensus.io/stats" "go.opencensus.io/tag" "go.opentelemetry.io/collector/component" @@ -498,7 +498,7 @@ func (c *tracesConsumerGroupHandler) ConsumeClaim(session sarama.ConsumerGroupSe // Should return when `session.Context()` is done. // If not, will raise `ErrRebalanceInProgress` or `read tcp :: i/o timeout` when kafka rebalance. see: - // https://github.com/Shopify/sarama/issues/1192 + // https://github.com/IBM/sarama/issues/1192 case <-session.Context().Done(): return nil } @@ -573,7 +573,7 @@ func (c *metricsConsumerGroupHandler) ConsumeClaim(session sarama.ConsumerGroupS // Should return when `session.Context()` is done. // If not, will raise `ErrRebalanceInProgress` or `read tcp :: i/o timeout` when kafka rebalance. see: - // https://github.com/Shopify/sarama/issues/1192 + // https://github.com/IBM/sarama/issues/1192 case <-session.Context().Done(): return nil } @@ -653,7 +653,7 @@ func (c *logsConsumerGroupHandler) ConsumeClaim(session sarama.ConsumerGroupSess // Should return when `session.Context()` is done. // If not, will raise `ErrRebalanceInProgress` or `read tcp :: i/o timeout` when kafka rebalance. see: - // https://github.com/Shopify/sarama/issues/1192 + // https://github.com/IBM/sarama/issues/1192 case <-session.Context().Done(): return nil } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver/README.md index 5a3e59535..bc183ef99 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver/README.md +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver/README.md @@ -72,6 +72,8 @@ prometheus --config.file=prom.yaml "--feature-gates=receiver.prometheusreceiver.UseCreatedMetric" ``` +- `report_extra_scrape_metrics`: Extra Prometheus scrape metrics can be reported by setting this parameter to `true` + You can copy and paste that same configuration under: ```yaml diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver/config.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver/config.go index 699452d8f..28f224b7b 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver/config.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver/config.go @@ -43,6 +43,9 @@ type Config struct { UseStartTimeMetric bool `mapstructure:"use_start_time_metric"` StartTimeMetricRegex string `mapstructure:"start_time_metric_regex"` + // ReportExtraScrapeMetrics - enables reporting of additional metrics for Prometheus client like scrape_body_size_bytes + ReportExtraScrapeMetrics bool `mapstructure:"report_extra_scrape_metrics"` + TargetAllocator *targetAllocator `mapstructure:"target_allocator"` // ConfigPlaceholder is just an entry to make the configuration pass a check diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver/internal/metricfamily.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver/internal/metricfamily.go index 71ebcaef8..85c764cb4 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver/internal/metricfamily.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver/internal/metricfamily.go @@ -87,27 +87,28 @@ func (mg *metricGroup) sortPoints() { } func (mg *metricGroup) toDistributionPoint(dest pmetric.HistogramDataPointSlice) { - if !mg.hasCount || len(mg.complexValue) == 0 { + if !mg.hasCount { return } mg.sortPoints() + bucketCount := len(mg.complexValue) + 1 + // if the final bucket is +Inf, we ignore it + if bucketCount > 1 && mg.complexValue[bucketCount-2].boundary == math.Inf(1) { + bucketCount-- + } + // for OTLP the bounds won't include +inf - bounds := make([]float64, len(mg.complexValue)-1) - bucketCounts := make([]uint64, len(mg.complexValue)) + bounds := make([]float64, bucketCount-1) + bucketCounts := make([]uint64, bucketCount) + var adjustedCount float64 pointIsStale := value.IsStaleNaN(mg.sum) || value.IsStaleNaN(mg.count) + for i := 0; i < bucketCount-1; i++ { + bounds[i] = mg.complexValue[i].boundary + adjustedCount = mg.complexValue[i].value - for i := 0; i < len(mg.complexValue); i++ { - if i != len(mg.complexValue)-1 { - // not need to add +inf as OTLP assumes it - bounds[i] = mg.complexValue[i].boundary - } else if mg.complexValue[i].boundary != math.Inf(1) { - // This histogram is missing the +Inf bucket, and isn't a complete prometheus histogram. - return - } - adjustedCount := mg.complexValue[i].value // Buckets still need to be sent to know to set them as stale, // but a staleness NaN converted to uint64 would be an extremely large number. // Setting to 0 instead. @@ -119,6 +120,15 @@ func (mg *metricGroup) toDistributionPoint(dest pmetric.HistogramDataPointSlice) bucketCounts[i] = uint64(adjustedCount) } + // Add the final bucket based on the total count + adjustedCount = mg.count + if pointIsStale { + adjustedCount = 0 + } else if bucketCount > 1 { + adjustedCount -= mg.complexValue[bucketCount-2].value + } + bucketCounts[bucketCount-1] = uint64(adjustedCount) + point := dest.AppendEmpty() if pointIsStale { diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver/metrics_receiver.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver/metrics_receiver.go index 70b1c6af8..4ff129637 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver/metrics_receiver.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver/metrics_receiver.go @@ -265,8 +265,10 @@ func (r *pReceiver) initPrometheusComponents(ctx context.Context, host component if err != nil { return err } + r.scrapeManager = scrape.NewManager(&scrape.Options{ PassMetadataInContext: true, + ExtraMetrics: r.cfg.ReportExtraScrapeMetrics, HTTPClientOptions: []commonconfig.HTTPClientOption{ commonconfig.WithUserAgent(r.settings.BuildInfo.Command + "/" + r.settings.BuildInfo.Version), }, diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/config.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/config.go index 920e812c6..67a92b493 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/config.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/config.go @@ -11,7 +11,7 @@ import ( "go.opentelemetry.io/collector/config/confignet" "go.uber.org/multierr" - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/protocol" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/internal/protocol" ) // Config defines configuration for StatsD receiver. diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/factory.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/factory.go index f851ed426..a83cc2255 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/factory.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/factory.go @@ -13,7 +13,7 @@ import ( "go.opentelemetry.io/collector/receiver" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/internal/metadata" - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/protocol" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/internal/protocol" ) const ( diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/protocol/metric_translator.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/internal/protocol/metric_translator.go similarity index 98% rename from vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/protocol/metric_translator.go rename to vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/internal/protocol/metric_translator.go index 4bf28ef19..9dc7e7573 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/protocol/metric_translator.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/internal/protocol/metric_translator.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package protocol // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/protocol" +package protocol // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/internal/protocol" import ( "sort" diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/protocol/parser.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/internal/protocol/parser.go similarity index 90% rename from vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/protocol/parser.go rename to vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/internal/protocol/parser.go index f5e38d9fe..abfea560c 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/protocol/parser.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/internal/protocol/parser.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package protocol // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/protocol" +package protocol // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/internal/protocol" import ( "net" diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/protocol/statsd_parser.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/internal/protocol/statsd_parser.go similarity index 99% rename from vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/protocol/statsd_parser.go rename to vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/internal/protocol/statsd_parser.go index def7b3243..6969de362 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/protocol/statsd_parser.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/internal/protocol/statsd_parser.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package protocol // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/protocol" +package protocol // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/internal/protocol" import ( "errors" diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/transport/mock_reporter.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/internal/transport/mock_reporter.go similarity index 94% rename from vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/transport/mock_reporter.go rename to vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/internal/transport/mock_reporter.go index 23eaf197c..33d8ac8ce 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/transport/mock_reporter.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/internal/transport/mock_reporter.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package transport // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/transport" +package transport // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/internal/transport" import ( "context" diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/transport/server.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/internal/transport/server.go similarity index 95% rename from vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/transport/server.go rename to vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/internal/transport/server.go index ef334aecb..7b3cdab60 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/transport/server.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/internal/transport/server.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package transport // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/transport" +package transport // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/internal/transport" import ( "context" @@ -10,7 +10,7 @@ import ( "go.opentelemetry.io/collector/consumer" - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/protocol" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/internal/protocol" ) var errNilListenAndServeParameters = errors.New("no parameter of ListenAndServe can be nil") diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/transport/udp_server.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/internal/transport/udp_server.go similarity index 94% rename from vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/transport/udp_server.go rename to vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/internal/transport/udp_server.go index aec9576e1..7a483ce61 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/transport/udp_server.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/internal/transport/udp_server.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package transport // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/transport" +package transport // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/internal/transport" import ( "bytes" @@ -12,7 +12,7 @@ import ( "go.opentelemetry.io/collector/consumer" - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/protocol" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/internal/protocol" ) type udpServer struct { diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/receiver.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/receiver.go index a28c4683d..feac277f7 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/receiver.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/receiver.go @@ -18,8 +18,8 @@ import ( "go.opentelemetry.io/collector/receiver" "go.uber.org/zap" - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/protocol" - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/transport" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/internal/protocol" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/internal/transport" ) var _ receiver.Metrics = (*statsdReceiver)(nil) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/reporter.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/reporter.go index d205649d6..ebbb94f16 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/reporter.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/reporter.go @@ -11,7 +11,7 @@ import ( "go.opentelemetry.io/collector/receiver" "go.uber.org/zap" - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/transport" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/internal/transport" ) // reporter struct implements the transport.Reporter interface to give consistent diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go index 6f9e6fd3a..581cf7cdf 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go @@ -59,13 +59,4 @@ const ( // AnnotationBaseImageName is the annotation key for the image reference of the image's base image. AnnotationBaseImageName = "org.opencontainers.image.base.name" - - // AnnotationArtifactCreated is the annotation key for the date and time on which the artifact was built, conforming to RFC 3339. - AnnotationArtifactCreated = "org.opencontainers.artifact.created" - - // AnnotationArtifactDescription is the annotation key for the human readable description for the artifact. - AnnotationArtifactDescription = "org.opencontainers.artifact.description" - - // AnnotationReferrersFiltersApplied is the annotation key for the comma separated list of filters applied by the registry in the referrers listing. - AnnotationReferrersFiltersApplied = "org.opencontainers.referrers.filtersApplied" ) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/artifact.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/artifact.go deleted file mode 100644 index 03d76ce43..000000000 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/artifact.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2022 The Linux Foundation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -// Artifact describes an artifact manifest. -// This structure provides `application/vnd.oci.artifact.manifest.v1+json` mediatype when marshalled to JSON. -type Artifact struct { - // MediaType is the media type of the object this schema refers to. - MediaType string `json:"mediaType"` - - // ArtifactType is the IANA media type of the artifact this schema refers to. - ArtifactType string `json:"artifactType"` - - // Blobs is a collection of blobs referenced by this manifest. - Blobs []Descriptor `json:"blobs,omitempty"` - - // Subject (reference) is an optional link from the artifact to another manifest forming an association between the artifact and the other manifest. - Subject *Descriptor `json:"subject,omitempty"` - - // Annotations contains arbitrary metadata for the artifact manifest. - Annotations map[string]string `json:"annotations,omitempty"` -} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go index ffff4b6d1..36b0aeb8f 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go @@ -48,6 +48,17 @@ type ImageConfig struct { // StopSignal contains the system call signal that will be sent to the container to exit. StopSignal string `json:"StopSignal,omitempty"` + + // ArgsEscaped + // + // Deprecated: This field is present only for legacy compatibility with + // Docker and should not be used by new image builders. It is used by Docker + // for Windows images to indicate that the `Entrypoint` or `Cmd` or both, + // contains only a single element array, that is a pre-escaped, and combined + // into a single string `CommandLine`. If `true` the value in `Entrypoint` or + // `Cmd` should be used as-is to avoid double escaping. + // https://github.com/opencontainers/image-spec/pull/892 + ArgsEscaped bool `json:"ArgsEscaped,omitempty"` } // RootFS describes a layer content addresses @@ -86,22 +97,8 @@ type Image struct { // Author defines the name and/or email address of the person or entity which created and is responsible for maintaining the image. Author string `json:"author,omitempty"` - // Architecture is the CPU architecture which the binaries in this image are built to run on. - Architecture string `json:"architecture"` - - // Variant is the variant of the specified CPU architecture which image binaries are intended to run on. - Variant string `json:"variant,omitempty"` - - // OS is the name of the operating system which the image is built to run on. - OS string `json:"os"` - - // OSVersion is an optional field specifying the operating system - // version, for example on Windows `10.0.14393.1066`. - OSVersion string `json:"os.version,omitempty"` - - // OSFeatures is an optional field specifying an array of strings, - // each listing a required OS feature (for example on Windows `win32k`). - OSFeatures []string `json:"os.features,omitempty"` + // Platform describes the platform which the image in the manifest runs on. + Platform // Config defines the execution parameters which should be used as a base when running a container using the image. Config ImageConfig `json:"config,omitempty"` diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go index 9654aa5af..3004e9a40 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go @@ -52,7 +52,7 @@ type Descriptor struct { // Platform describes the platform which the image in the manifest runs on. type Platform struct { // Architecture field specifies the CPU architecture, for example - // `amd64` or `ppc64`. + // `amd64` or `ppc64le`. Architecture string `json:"architecture"` // OS specifies the operating system, for example `linux` or `windows`. @@ -70,3 +70,11 @@ type Platform struct { // example `v7` to specify ARMv7 when architecture is `arm`. Variant string `json:"variant,omitempty"` } + +// DescriptorEmptyJSON is the descriptor of a blob with content of `{}`. +var DescriptorEmptyJSON = Descriptor{ + MediaType: MediaTypeEmptyJSON, + Digest: `sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a`, + Size: 2, + Data: []byte(`{}`), +} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go index ed4a56e59..e2bed9d4e 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go @@ -24,9 +24,15 @@ type Index struct { // MediaType specifies the type of this document data structure e.g. `application/vnd.oci.image.index.v1+json` MediaType string `json:"mediaType,omitempty"` + // ArtifactType specifies the IANA media type of artifact when the manifest is used for an artifact. + ArtifactType string `json:"artifactType,omitempty"` + // Manifests references platform specific manifests. Manifests []Descriptor `json:"manifests"` + // Subject is an optional link from the image manifest to another manifest forming an association between the image manifest and the other manifest. + Subject *Descriptor `json:"subject,omitempty"` + // Annotations contains arbitrary metadata for the image index. Annotations map[string]string `json:"annotations,omitempty"` } diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go index 730a09359..26fec52a6 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go @@ -23,6 +23,9 @@ type Manifest struct { // MediaType specifies the type of this document data structure e.g. `application/vnd.oci.image.manifest.v1+json` MediaType string `json:"mediaType,omitempty"` + // ArtifactType specifies the IANA media type of artifact when the manifest is used for an artifact. + ArtifactType string `json:"artifactType,omitempty"` + // Config references a configuration object for a container, by digest. // The referenced configuration object is a JSON blob that the runtime uses to set up the container. Config Descriptor `json:"config"` diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go index 935b481e3..892ba3de9 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go @@ -40,21 +40,36 @@ const ( // MediaTypeImageLayerNonDistributable is the media type for layers referenced by // the manifest but with distribution restrictions. + // + // Deprecated: Non-distributable layers are deprecated, and not recommended + // for future use. Implementations SHOULD NOT produce new non-distributable + // layers. + // https://github.com/opencontainers/image-spec/pull/965 MediaTypeImageLayerNonDistributable = "application/vnd.oci.image.layer.nondistributable.v1.tar" // MediaTypeImageLayerNonDistributableGzip is the media type for // gzipped layers referenced by the manifest but with distribution // restrictions. + // + // Deprecated: Non-distributable layers are deprecated, and not recommended + // for future use. Implementations SHOULD NOT produce new non-distributable + // layers. + // https://github.com/opencontainers/image-spec/pull/965 MediaTypeImageLayerNonDistributableGzip = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip" // MediaTypeImageLayerNonDistributableZstd is the media type for zstd // compressed layers referenced by the manifest but with distribution // restrictions. + // + // Deprecated: Non-distributable layers are deprecated, and not recommended + // for future use. Implementations SHOULD NOT produce new non-distributable + // layers. + // https://github.com/opencontainers/image-spec/pull/965 MediaTypeImageLayerNonDistributableZstd = "application/vnd.oci.image.layer.nondistributable.v1.tar+zstd" // MediaTypeImageConfig specifies the media type for the image configuration. MediaTypeImageConfig = "application/vnd.oci.image.config.v1+json" - // MediaTypeArtifactManifest specifies the media type for a content descriptor. - MediaTypeArtifactManifest = "application/vnd.oci.artifact.manifest.v1+json" + // MediaTypeEmptyJSON specifies the media type for an unused blob containing the value `{}` + MediaTypeEmptyJSON = "application/vnd.oci.empty.v1+json" ) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/version.go b/vendor/github.com/opencontainers/image-spec/specs-go/version.go index d27903579..e3b7ac03a 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/version.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/version.go @@ -25,7 +25,7 @@ const ( VersionPatch = 0 // VersionDev indicates development branch. Releases will be empty string. - VersionDev = "-rc2" + VersionDev = "-rc.4" ) // Version is the specification version that the package types support. diff --git a/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go b/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go index 068edd052..4e7717d53 100644 --- a/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go +++ b/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go @@ -33,6 +33,34 @@ type Spec struct { ZOS *ZOS `json:"zos,omitempty" platform:"zos"` } +// Scheduler represents the scheduling attributes for a process. It is based on +// the Linux sched_setattr(2) syscall. +type Scheduler struct { + // Policy represents the scheduling policy (e.g., SCHED_FIFO, SCHED_RR, SCHED_OTHER). + Policy LinuxSchedulerPolicy `json:"policy"` + + // Nice is the nice value for the process, which affects its priority. + Nice int32 `json:"nice,omitempty"` + + // Priority represents the static priority of the process. + Priority int32 `json:"priority,omitempty"` + + // Flags is an array of scheduling flags. + Flags []LinuxSchedulerFlag `json:"flags,omitempty"` + + // The following ones are used by the DEADLINE scheduler. + + // Runtime is the amount of time in nanoseconds during which the process + // is allowed to run in a given period. + Runtime uint64 `json:"runtime,omitempty"` + + // Deadline is the absolute deadline for the process to complete its execution. + Deadline uint64 `json:"deadline,omitempty"` + + // Period is the length of the period in nanoseconds used for determining the process runtime. + Period uint64 `json:"period,omitempty"` +} + // Process contains information to start a specific application inside the container. type Process struct { // Terminal creates an interactive terminal for the container. @@ -60,8 +88,12 @@ type Process struct { ApparmorProfile string `json:"apparmorProfile,omitempty" platform:"linux"` // Specify an oom_score_adj for the container. OOMScoreAdj *int `json:"oomScoreAdj,omitempty" platform:"linux"` + // Scheduler specifies the scheduling attributes for a process + Scheduler *Scheduler `json:"scheduler,omitempty" platform:"linux"` // SelinuxLabel specifies the selinux context that the container process is run as. SelinuxLabel string `json:"selinuxLabel,omitempty" platform:"linux"` + // IOPriority contains the I/O priority settings for the cgroup. + IOPriority *LinuxIOPriority `json:"ioPriority,omitempty" platform:"linux"` } // LinuxCapabilities specifies the list of allowed capabilities that are kept for a process. @@ -79,6 +111,22 @@ type LinuxCapabilities struct { Ambient []string `json:"ambient,omitempty" platform:"linux"` } +// IOPriority represents I/O priority settings for the container's processes within the process group. +type LinuxIOPriority struct { + Class IOPriorityClass `json:"class"` + Priority int `json:"priority"` +} + +// IOPriorityClass represents an I/O scheduling class. +type IOPriorityClass string + +// Possible values for IOPriorityClass. +const ( + IOPRIO_CLASS_RT IOPriorityClass = "IOPRIO_CLASS_RT" + IOPRIO_CLASS_BE IOPriorityClass = "IOPRIO_CLASS_BE" + IOPRIO_CLASS_IDLE IOPriorityClass = "IOPRIO_CLASS_IDLE" +) + // Box specifies dimensions of a rectangle. Used for specifying the size of a console. type Box struct { // Height is the vertical dimension of a box. @@ -191,6 +239,8 @@ type Linux struct { IntelRdt *LinuxIntelRdt `json:"intelRdt,omitempty"` // Personality contains configuration for the Linux personality syscall Personality *LinuxPersonality `json:"personality,omitempty"` + // TimeOffsets specifies the offset for supporting time namespaces. + TimeOffsets map[string]LinuxTimeOffset `json:"timeOffsets,omitempty"` } // LinuxNamespace is the configuration for a Linux namespace @@ -220,6 +270,8 @@ const ( UserNamespace LinuxNamespaceType = "user" // CgroupNamespace for isolating cgroup hierarchies CgroupNamespace LinuxNamespaceType = "cgroup" + // TimeNamespace for isolating the clocks + TimeNamespace LinuxNamespaceType = "time" ) // LinuxIDMapping specifies UID/GID mappings @@ -232,6 +284,14 @@ type LinuxIDMapping struct { Size uint32 `json:"size"` } +// LinuxTimeOffset specifies the offset for Time Namespace +type LinuxTimeOffset struct { + // Secs is the offset of clock (in secs) in the container + Secs int64 `json:"secs,omitempty"` + // Nanosecs is the additional offset for Secs (in nanosecs) + Nanosecs uint32 `json:"nanosecs,omitempty"` +} + // POSIXRlimit type and restrictions type POSIXRlimit struct { // Type of the rlimit to set @@ -242,12 +302,13 @@ type POSIXRlimit struct { Soft uint64 `json:"soft"` } -// LinuxHugepageLimit structure corresponds to limiting kernel hugepages +// LinuxHugepageLimit structure corresponds to limiting kernel hugepages. +// Default to reservation limits if supported. Otherwise fallback to page fault limits. type LinuxHugepageLimit struct { - // Pagesize is the hugepage size - // Format: "B' (e.g. 64KB, 2MB, 1GB, etc.) + // Pagesize is the hugepage size. + // Format: "B' (e.g. 64KB, 2MB, 1GB, etc.). Pagesize string `json:"pageSize"` - // Limit is the limit of "hugepagesize" hugetlb usage + // Limit is the limit of "hugepagesize" hugetlb reservations (if supported) or usage. Limit uint64 `json:"limit"` } @@ -331,6 +392,9 @@ type LinuxCPU struct { Shares *uint64 `json:"shares,omitempty"` // CPU hardcap limit (in usecs). Allowed cpu time in a given period. Quota *int64 `json:"quota,omitempty"` + // CPU hardcap burst limit (in usecs). Allowed accumulated cpu time additionally for burst in a + // given period. + Burst *uint64 `json:"burst,omitempty"` // CPU period to be used for hardcapping (in usecs). Period *uint64 `json:"period,omitempty"` // How much time realtime scheduling may use (in usecs). @@ -379,7 +443,7 @@ type LinuxResources struct { Pids *LinuxPids `json:"pids,omitempty"` // BlockIO restriction configuration BlockIO *LinuxBlockIO `json:"blockIO,omitempty"` - // Hugetlb limit (in bytes) + // Hugetlb limits (in bytes). Default to reservation limits if supported. HugepageLimits []LinuxHugepageLimit `json:"hugepageLimits,omitempty"` // Network restriction configuration Network *LinuxNetwork `json:"network,omitempty"` @@ -773,3 +837,43 @@ type ZOSDevice struct { // Gid of the device. GID *uint32 `json:"gid,omitempty"` } + +// LinuxSchedulerPolicy represents different scheduling policies used with the Linux Scheduler +type LinuxSchedulerPolicy string + +const ( + // SchedOther is the default scheduling policy + SchedOther LinuxSchedulerPolicy = "SCHED_OTHER" + // SchedFIFO is the First-In-First-Out scheduling policy + SchedFIFO LinuxSchedulerPolicy = "SCHED_FIFO" + // SchedRR is the Round-Robin scheduling policy + SchedRR LinuxSchedulerPolicy = "SCHED_RR" + // SchedBatch is the Batch scheduling policy + SchedBatch LinuxSchedulerPolicy = "SCHED_BATCH" + // SchedISO is the Isolation scheduling policy + SchedISO LinuxSchedulerPolicy = "SCHED_ISO" + // SchedIdle is the Idle scheduling policy + SchedIdle LinuxSchedulerPolicy = "SCHED_IDLE" + // SchedDeadline is the Deadline scheduling policy + SchedDeadline LinuxSchedulerPolicy = "SCHED_DEADLINE" +) + +// LinuxSchedulerFlag represents the flags used by the Linux Scheduler. +type LinuxSchedulerFlag string + +const ( + // SchedFlagResetOnFork represents the reset on fork scheduling flag + SchedFlagResetOnFork LinuxSchedulerFlag = "SCHED_FLAG_RESET_ON_FORK" + // SchedFlagReclaim represents the reclaim scheduling flag + SchedFlagReclaim LinuxSchedulerFlag = "SCHED_FLAG_RECLAIM" + // SchedFlagDLOverrun represents the deadline overrun scheduling flag + SchedFlagDLOverrun LinuxSchedulerFlag = "SCHED_FLAG_DL_OVERRUN" + // SchedFlagKeepPolicy represents the keep policy scheduling flag + SchedFlagKeepPolicy LinuxSchedulerFlag = "SCHED_FLAG_KEEP_POLICY" + // SchedFlagKeepParams represents the keep parameters scheduling flag + SchedFlagKeepParams LinuxSchedulerFlag = "SCHED_FLAG_KEEP_PARAMS" + // SchedFlagUtilClampMin represents the utilization clamp minimum scheduling flag + SchedFlagUtilClampMin LinuxSchedulerFlag = "SCHED_FLAG_UTIL_CLAMP_MIN" + // SchedFlagUtilClampMin represents the utilization clamp maximum scheduling flag + SchedFlagUtilClampMax LinuxSchedulerFlag = "SCHED_FLAG_UTIL_CLAMP_MAX" +) diff --git a/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go b/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go index 596af0c2f..41933fb17 100644 --- a/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go +++ b/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go @@ -6,12 +6,12 @@ const ( // VersionMajor is for an API incompatible changes VersionMajor = 1 // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 0 + VersionMinor = 1 // VersionPatch is for backwards-compatible bug fixes - VersionPatch = 2 + VersionPatch = 0 // VersionDev indicates development branch. Releases will be empty string. - VersionDev = "-dev" + VersionDev = "-rc.3" ) // Version is the specification version that the package types support. diff --git a/vendor/github.com/ovh/go-ovh/LICENSE b/vendor/github.com/ovh/go-ovh/LICENSE index d847c8afb..e84ce8a65 100644 --- a/vendor/github.com/ovh/go-ovh/LICENSE +++ b/vendor/github.com/ovh/go-ovh/LICENSE @@ -1,26 +1,28 @@ -Copyright (c) 2015-2017, OVH SAS. -All rights reserved. +BSD 3-Clause License + +Copyright (c) 2015-2023, OVH SAS Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of OVH SAS nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. -THIS SOFTWARE IS PROVIDED BY OVH SAS AND CONTRIBUTORS ``AS IS'' AND ANY -EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL OVH SAS AND CONTRIBUTORS BE LIABLE FOR ANY -DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/ovh/go-ovh/ovh/configuration.go b/vendor/github.com/ovh/go-ovh/ovh/configuration.go index 4cc624dd3..b648c0f9b 100644 --- a/vendor/github.com/ovh/go-ovh/ovh/configuration.go +++ b/vendor/github.com/ovh/go-ovh/ovh/configuration.go @@ -4,43 +4,71 @@ import ( "fmt" "os" "os/user" - "path/filepath" "strings" "gopkg.in/ini.v1" ) -// Use variables for easier test overload -var ( - systemConfigPath = "/etc/ovh.conf" - userConfigPath = "/.ovh.conf" // prefixed with homeDir - localConfigPath = "./ovh.conf" -) +var configPaths = []string{ + // System wide configuration + "/etc/ovh.com", + // Configuration in user's home + "~/.ovh.conf", + // Configuration in local folder + "./ovh.conf", +} -// currentUserHome attempts to get current user's home directory +// currentUserHome attempts to get current user's home directory. func currentUserHome() (string, error) { - userHome := "" usr, err := user.Current() if err != nil { // Fallback by trying to read $HOME - userHome = os.Getenv("HOME") - if userHome != "" { - err = nil + if userHome := os.Getenv("HOME"); userHome != "" { + return userHome, nil } - } else { - userHome = usr.HomeDir + return "", err } - return userHome, nil + + return usr.HomeDir, nil } -// appendConfigurationFile only if it exists. We need to do this because -// ini package will fail to load configuration at all if a configuration -// file is missing. This is racy, but better than always failing. -func appendConfigurationFile(cfg *ini.File, path string) { - if file, err := os.Open(path); err == nil { - file.Close() - cfg.Append(path) +// configPaths returns configPaths, with ~/ prefix expanded. +func expandConfigPaths() []interface{} { + paths := []interface{}{} + + // Will be initialized on first use + var home string + var homeErr error + + for _, path := range configPaths { + if strings.HasPrefix(path, "~/") { + // Find home if needed + if home == "" && homeErr == nil { + home, homeErr = currentUserHome() + } + // Ignore file in HOME if we cannot find it + if homeErr != nil { + continue + } + + path = home + path[1:] + } + + paths = append(paths, path) } + + return paths +} + +// loadINI builds a ini.File from the configuration paths provided in configPaths. +// It's a helper for loadConfig. +func loadINI() (*ini.File, error) { + paths := expandConfigPaths() + if len(paths) == 0 { + return ini.Empty(), nil + } + + return ini.LooseLoad(paths[0], paths[1:]...) } // loadConfig loads client configuration from params, environments or configuration @@ -57,17 +85,17 @@ func appendConfigurationFile(cfg *ini.File, path string) { // - ./ovh.conf // - $HOME/.ovh.conf // - /etc/ovh.conf -// func (c *Client) loadConfig(endpointName string) error { + if strings.HasSuffix(endpointName, "/") { + return fmt.Errorf("endpoint name cannot have a tailing slash") + } + // Load configuration files by order of increasing priority. All configuration // files are optional. Only load file from user home if home could be resolve - cfg := ini.Empty() - appendConfigurationFile(cfg, systemConfigPath) - if home, err := currentUserHome(); err == nil { - userConfigFullPath := filepath.Join(home, userConfigPath) - appendConfigurationFile(cfg, userConfigFullPath) + cfg, err := loadINI() + if err != nil { + return fmt.Errorf("cannot load configuration: %w", err) } - appendConfigurationFile(cfg, localConfigPath) // Canonicalize configuration if endpointName == "" { @@ -107,7 +135,7 @@ func (c *Client) loadConfig(endpointName string) error { return nil } -// getConfigValue returns the value of OVH_ or ``name`` value from ``section``. If +// getConfigValue returns the value of OVH_ or "name" value from "section". If // the value could not be read from either env or any configuration files, return 'def' func getConfigValue(cfg *ini.File, section, name, def string) string { // Attempt to load from environment diff --git a/vendor/github.com/ovh/go-ovh/ovh/consumer_key.go b/vendor/github.com/ovh/go-ovh/ovh/consumer_key.go index 716375511..b32b80109 100644 --- a/vendor/github.com/ovh/go-ovh/ovh/consumer_key.go +++ b/vendor/github.com/ovh/go-ovh/ovh/consumer_key.go @@ -83,7 +83,6 @@ func (ck *CkRequest) AddRules(methods []string, path string) { for _, method := range methods { ck.AddRule(method, path) } - } // AddRecursiveRules adds grant requests on "path" and "path/*", for all diff --git a/vendor/github.com/ovh/go-ovh/ovh/error.go b/vendor/github.com/ovh/go-ovh/ovh/error.go index 7beb39241..fd4b95513 100644 --- a/vendor/github.com/ovh/go-ovh/ovh/error.go +++ b/vendor/github.com/ovh/go-ovh/ovh/error.go @@ -1,6 +1,9 @@ package ovh -import "fmt" +import ( + "fmt" + "strings" +) // APIError represents an error that can occurred while calling the API. type APIError struct { @@ -16,10 +19,33 @@ type APIError struct { QueryID string } -func (err *APIError) Error() string { - if err.Class == "" { - return fmt.Sprintf("HTTP Error %d: %q", err.Code, err.Message) +// Let's make sure that APIError always satisfies the fmt.Stringer and error interfaces +var _ fmt.Stringer = APIError{} +var _ error = APIError{} + +func (err APIError) Error() string { + var sb strings.Builder + sb.Grow(128) + + // Base message + fmt.Fprint(&sb, "OVHcloud API error (status code ", err.Code, "): ") + + // Append class if any + if err.Class != "" { + fmt.Fprint(&sb, err.Class, ": ") + } + + // Real error message, quoted + fmt.Fprintf(&sb, "%q", err.Message) + + // QueryID if any + if err.QueryID != "" { + fmt.Fprint(&sb, " (X-OVH-Query-Id: ", err.QueryID, ")") } - return fmt.Sprintf("HTTP Error %d: %s: %q (X-OVH-Query-Id: %s)", err.Code, err.Class, err.Message, err.QueryID) + return sb.String() +} + +func (err APIError) String() string { + return err.Error() } diff --git a/vendor/github.com/ovh/go-ovh/ovh/ovh.go b/vendor/github.com/ovh/go-ovh/ovh/ovh.go index d06991a93..9c47e436c 100644 --- a/vendor/github.com/ovh/go-ovh/ovh/ovh.go +++ b/vendor/github.com/ovh/go-ovh/ovh/ovh.go @@ -11,10 +11,15 @@ import ( "io/ioutil" "net/http" "strconv" + "strings" "sync/atomic" "time" ) +// getLocalTime is a function to be overwritten during the tests, it returns the time +// on the the local machine +var getLocalTime = time.Now + // DefaultTimeout api requests after 180s const DefaultTimeout = 180 * time.Second @@ -86,7 +91,7 @@ func NewClient(endpoint, appKey, appSecret, consumerKey string) (*Client, error) AppSecret: appSecret, ConsumerKey: consumerKey, Client: &http.Client{}, - Timeout: time.Duration(DefaultTimeout), + Timeout: DefaultTimeout, } // Get and check the configuration @@ -227,7 +232,7 @@ func (c *Client) getTimeDelta() (time.Duration, error) { return 0, err } - d = time.Since(*ovhTime) + d = getLocalTime().Sub(*ovhTime) c.timeDelta.Store(d) return d, nil @@ -246,16 +251,15 @@ func (c *Client) getTime() (*time.Time, error) { return &serverTime, nil } -// getLocalTime is a function to be overwritten during the tests, it return the time -// on the the local machine -var getLocalTime = func() time.Time { - return time.Now() -} +// getTarget returns the URL to target given and endpoint and a path. +// If the path starts with `/v1` or `/v2`, then remove the trailing `/1.0` from the endpoint. +func getTarget(endpoint, path string) string { + // /1.0 + /v1/ or /1.0 + /v2/ + if strings.HasSuffix(endpoint, "/1.0") && (strings.HasPrefix(path, "/v1/") || strings.HasPrefix(path, "/v2/")) { + return endpoint[:len(endpoint)-4] + path + } -// getEndpointForSignature is a function to be overwritten during the tests, it returns a -// the endpoint -var getEndpointForSignature = func(c *Client) string { - return c.endpoint + return endpoint + path } // NewRequest returns a new HTTP request @@ -270,7 +274,7 @@ func (c *Client) NewRequest(method, path string, reqBody interface{}, needAuth b } } - target := fmt.Sprintf("%s%s", c.endpoint, path) + target := getTarget(c.endpoint, path) req, err := http.NewRequest(method, target, bytes.NewReader(body)) if err != nil { return nil, err @@ -297,12 +301,11 @@ func (c *Client) NewRequest(method, path string, reqBody interface{}, needAuth b req.Header.Add("X-Ovh-Consumer", c.ConsumerKey) h := sha1.New() - h.Write([]byte(fmt.Sprintf("%s+%s+%s+%s%s+%s+%d", + h.Write([]byte(fmt.Sprintf("%s+%s+%s+%s+%s+%d", c.AppSecret, c.ConsumerKey, method, - getEndpointForSignature(c), - path, + target, body, timestamp, ))) @@ -369,7 +372,7 @@ func (c *Client) CallAPI(method, path string, reqBody, resType interface{}, need // - full serialized request body // - server current time (takes time delta into account) // -// Context is used by http.Client to handle context cancelation +// Context is used by http.Client to handle context cancelation. // // Call will automatically assemble the target url from the endpoint // configured in the client instance and the path argument. If the reqBody diff --git a/vendor/github.com/prometheus/prometheus/config/config.go b/vendor/github.com/prometheus/prometheus/config/config.go index a29c98eed..5c51d5a0d 100644 --- a/vendor/github.com/prometheus/prometheus/config/config.go +++ b/vendor/github.com/prometheus/prometheus/config/config.go @@ -173,16 +173,16 @@ var ( // DefaultQueueConfig is the default remote queue configuration. DefaultQueueConfig = QueueConfig{ - // With a maximum of 200 shards, assuming an average of 100ms remote write - // time and 500 samples per batch, we will be able to push 1M samples/s. - MaxShards: 200, + // With a maximum of 50 shards, assuming an average of 100ms remote write + // time and 2000 samples per batch, we will be able to push 1M samples/s. + MaxShards: 50, MinShards: 1, - MaxSamplesPerSend: 500, + MaxSamplesPerSend: 2000, - // Each shard will have a max of 2500 samples pending in its channel, plus the pending - // samples that have been enqueued. Theoretically we should only ever have about 3000 samples - // per shard pending. At 200 shards that's 600k. - Capacity: 2500, + // Each shard will have a max of 10,000 samples pending in its channel, plus the pending + // samples that have been enqueued. Theoretically we should only ever have about 12,000 samples + // per shard pending. At 50 shards that's 600k. + Capacity: 10000, BatchSendDeadline: model.Duration(5 * time.Second), // Backoff times for retrying a batch of samples on recoverable errors. @@ -194,7 +194,7 @@ var ( DefaultMetadataConfig = MetadataConfig{ Send: true, SendInterval: model.Duration(1 * time.Minute), - MaxSamplesPerSend: 500, + MaxSamplesPerSend: 2000, } // DefaultRemoteReadConfig is the default remote read configuration. diff --git a/vendor/github.com/prometheus/prometheus/discovery/aws/ec2.go b/vendor/github.com/prometheus/prometheus/discovery/aws/ec2.go index ca9921159..86d76627e 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/aws/ec2.go +++ b/vendor/github.com/prometheus/prometheus/discovery/aws/ec2.go @@ -164,7 +164,7 @@ func NewEC2Discovery(conf *EC2SDConfig, logger log.Logger) *EC2Discovery { return d } -func (d *EC2Discovery) ec2Client(ctx context.Context) (*ec2.EC2, error) { +func (d *EC2Discovery) ec2Client(context.Context) (*ec2.EC2, error) { if d.ec2 != nil { return d.ec2, nil } diff --git a/vendor/github.com/prometheus/prometheus/discovery/dns/dns.go b/vendor/github.com/prometheus/prometheus/discovery/dns/dns.go index 2b11c242a..96e07254f 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/dns/dns.go +++ b/vendor/github.com/prometheus/prometheus/discovery/dns/dns.go @@ -285,21 +285,22 @@ func lookupWithSearchPath(name string, qtype uint16, logger log.Logger) (*dns.Ms for _, lname := range conf.NameList(name) { response, err := lookupFromAnyServer(lname, qtype, conf, logger) - if err != nil { + switch { + case err != nil: // We can't go home yet, because a later name // may give us a valid, successful answer. However // we can no longer say "this name definitely doesn't // exist", because we did not get that answer for // at least one name. allResponsesValid = false - } else if response.Rcode == dns.RcodeSuccess { + case response.Rcode == dns.RcodeSuccess: // Outcome 1: GOLD! return response, nil } } if allResponsesValid { - // Outcome 2: everyone says NXDOMAIN, that's good enough for me + // Outcome 2: everyone says NXDOMAIN, that's good enough for me. return &dns.Msg{}, nil } // Outcome 3: boned. diff --git a/vendor/github.com/prometheus/prometheus/discovery/hetzner/hcloud.go b/vendor/github.com/prometheus/prometheus/discovery/hetzner/hcloud.go index aa406a1a7..50afdc1ec 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/hetzner/hcloud.go +++ b/vendor/github.com/prometheus/prometheus/discovery/hetzner/hcloud.go @@ -59,7 +59,7 @@ type hcloudDiscovery struct { } // newHcloudDiscovery returns a new hcloudDiscovery which periodically refreshes its targets. -func newHcloudDiscovery(conf *SDConfig, logger log.Logger) (*hcloudDiscovery, error) { +func newHcloudDiscovery(conf *SDConfig, _ log.Logger) (*hcloudDiscovery, error) { d := &hcloudDiscovery{ port: conf.Port, } diff --git a/vendor/github.com/prometheus/prometheus/discovery/hetzner/robot.go b/vendor/github.com/prometheus/prometheus/discovery/hetzner/robot.go index 4b7abaf77..496088028 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/hetzner/robot.go +++ b/vendor/github.com/prometheus/prometheus/discovery/hetzner/robot.go @@ -51,7 +51,7 @@ type robotDiscovery struct { } // newRobotDiscovery returns a new robotDiscovery which periodically refreshes its targets. -func newRobotDiscovery(conf *SDConfig, logger log.Logger) (*robotDiscovery, error) { +func newRobotDiscovery(conf *SDConfig, _ log.Logger) (*robotDiscovery, error) { d := &robotDiscovery{ port: conf.Port, endpoint: conf.robotEndpoint, @@ -69,7 +69,7 @@ func newRobotDiscovery(conf *SDConfig, logger log.Logger) (*robotDiscovery, erro return d, nil } -func (d *robotDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { +func (d *robotDiscovery) refresh(context.Context) ([]*targetgroup.Group, error) { req, err := http.NewRequest("GET", d.endpoint+"/server", nil) if err != nil { return nil, err diff --git a/vendor/github.com/prometheus/prometheus/discovery/ionos/server.go b/vendor/github.com/prometheus/prometheus/discovery/ionos/server.go index 8ac363970..a850fbbfb 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/ionos/server.go +++ b/vendor/github.com/prometheus/prometheus/discovery/ionos/server.go @@ -60,7 +60,7 @@ type serverDiscovery struct { datacenterID string } -func newServerDiscovery(conf *SDConfig, logger log.Logger) (*serverDiscovery, error) { +func newServerDiscovery(conf *SDConfig, _ log.Logger) (*serverDiscovery, error) { d := &serverDiscovery{ port: conf.Port, datacenterID: conf.DatacenterID, diff --git a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/client_metrics.go b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/client_metrics.go index 3a33e3e8d..b316f7d88 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/client_metrics.go +++ b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/client_metrics.go @@ -122,11 +122,11 @@ func (f *clientGoRequestMetricAdapter) Register(registerer prometheus.Registerer ) } -func (clientGoRequestMetricAdapter) Increment(ctx context.Context, code, method, host string) { +func (clientGoRequestMetricAdapter) Increment(_ context.Context, code, _, _ string) { clientGoRequestResultMetricVec.WithLabelValues(code).Inc() } -func (clientGoRequestMetricAdapter) Observe(ctx context.Context, verb string, u url.URL, latency time.Duration) { +func (clientGoRequestMetricAdapter) Observe(_ context.Context, _ string, u url.URL, latency time.Duration) { clientGoRequestLatencyMetricVec.WithLabelValues(u.EscapedPath()).Observe(latency.Seconds()) } @@ -169,7 +169,7 @@ func (f *clientGoWorkqueueMetricsProvider) NewLongestRunningProcessorSecondsMetr return clientGoWorkqueueLongestRunningProcessorMetricVec.WithLabelValues(name) } -func (clientGoWorkqueueMetricsProvider) NewRetriesMetric(name string) workqueue.CounterMetric { +func (clientGoWorkqueueMetricsProvider) NewRetriesMetric(string) workqueue.CounterMetric { // Retries are not used so the metric is omitted. return noopMetric{} } diff --git a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/endpoints.go b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/endpoints.go index 039daf4fa..27742ab46 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/endpoints.go +++ b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/endpoints.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +// nolint:revive // Many legitimately empty blocks in this file. package kubernetes import ( diff --git a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/endpointslice.go b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/endpointslice.go index 135735154..841b7d4f6 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/endpointslice.go +++ b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/endpointslice.go @@ -190,7 +190,7 @@ func (e *EndpointSlice) Run(ctx context.Context, ch chan<- []*targetgroup.Group) } go func() { - for e.process(ctx, ch) { + for e.process(ctx, ch) { // nolint:revive } }() @@ -300,7 +300,7 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou } if port.protocol() != nil { - target[endpointSlicePortProtocolLabel] = lv(string(*port.protocol())) + target[endpointSlicePortProtocolLabel] = lv(*port.protocol()) } if port.port() != nil { diff --git a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/ingress.go b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/ingress.go index 8c9249f54..ad47c341a 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/ingress.go +++ b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/ingress.go @@ -89,7 +89,7 @@ func (i *Ingress) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { } go func() { - for i.process(ctx, ch) { + for i.process(ctx, ch) { // nolint:revive } }() diff --git a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/kubernetes.go b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/kubernetes.go index 0f03e2cdb..a44bd513c 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/kubernetes.go +++ b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/kubernetes.go @@ -299,12 +299,13 @@ func New(l log.Logger, conf *SDConfig) (*Discovery, error) { err error ownNamespace string ) - if conf.KubeConfig != "" { + switch { + case conf.KubeConfig != "": kcfg, err = clientcmd.BuildConfigFromFlags("", conf.KubeConfig) if err != nil { return nil, err } - } else if conf.APIServer.URL == nil { + case conf.APIServer.URL == nil: // Use the Kubernetes provided pod service account // as described in https://kubernetes.io/docs/admin/service-accounts-admin/ kcfg, err = rest.InClusterConfig() @@ -324,7 +325,7 @@ func New(l log.Logger, conf *SDConfig) (*Discovery, error) { } level.Info(l).Log("msg", "Using pod service account via in-cluster config") - } else { + default: rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "kubernetes_sd") if err != nil { return nil, err diff --git a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/node.go b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/node.go index 93adf7825..16a06e7a0 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/node.go +++ b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/node.go @@ -96,7 +96,7 @@ func (n *Node) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { } go func() { - for n.process(ctx, ch) { + for n.process(ctx, ch) { // nolint:revive } }() diff --git a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/pod.go b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/pod.go index 396720c22..732cf52ad 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/pod.go +++ b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/pod.go @@ -132,7 +132,7 @@ func (p *Pod) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { } go func() { - for p.process(ctx, ch) { + for p.process(ctx, ch) { // nolint:revive } }() diff --git a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/service.go b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/service.go index a19f06e7d..40e17679e 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/service.go +++ b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/service.go @@ -92,7 +92,7 @@ func (s *Service) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { } go func() { - for s.process(ctx, ch) { + for s.process(ctx, ch) { // nolint:revive } }() diff --git a/vendor/github.com/prometheus/prometheus/discovery/linode/linode.go b/vendor/github.com/prometheus/prometheus/discovery/linode/linode.go index 0fd0a2c37..12b957514 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/linode/linode.go +++ b/vendor/github.com/prometheus/prometheus/discovery/linode/linode.go @@ -249,20 +249,20 @@ func (d *Discovery) refreshData(ctx context.Context) ([]*targetgroup.Group, erro if detailedIP.Address != ip.String() { continue } - - if detailedIP.Public && publicIPv4 == "" { + switch { + case detailedIP.Public && publicIPv4 == "": publicIPv4 = detailedIP.Address if detailedIP.RDNS != "" && detailedIP.RDNS != "null" { publicIPv4RDNS = detailedIP.RDNS } - } else if !detailedIP.Public && privateIPv4 == "" { + case !detailedIP.Public && privateIPv4 == "": privateIPv4 = detailedIP.Address if detailedIP.RDNS != "" && detailedIP.RDNS != "null" { privateIPv4RDNS = detailedIP.RDNS } - } else { + default: extraIPs = append(extraIPs, detailedIP.Address) } } diff --git a/vendor/github.com/prometheus/prometheus/discovery/marathon/marathon.go b/vendor/github.com/prometheus/prometheus/discovery/marathon/marathon.go index 079f93ad0..cfd3e2c08 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/marathon/marathon.go +++ b/vendor/github.com/prometheus/prometheus/discovery/marathon/marathon.go @@ -136,9 +136,10 @@ func NewDiscovery(conf SDConfig, logger log.Logger) (*Discovery, error) { return nil, err } - if len(conf.AuthToken) > 0 { + switch { + case len(conf.AuthToken) > 0: rt, err = newAuthTokenRoundTripper(conf.AuthToken, rt) - } else if len(conf.AuthTokenFile) > 0 { + case len(conf.AuthTokenFile) > 0: rt, err = newAuthTokenFileRoundTripper(conf.AuthTokenFile, rt) } if err != nil { @@ -400,19 +401,20 @@ func targetsForApp(app *app) []model.LabelSet { var labels []map[string]string var prefix string - if len(app.Container.PortMappings) != 0 { + switch { + case len(app.Container.PortMappings) != 0: // In Marathon 1.5.x the "container.docker.portMappings" object was moved // to "container.portMappings". ports, labels = extractPortMapping(app.Container.PortMappings, app.isContainerNet()) prefix = portMappingLabelPrefix - } else if len(app.Container.Docker.PortMappings) != 0 { + case len(app.Container.Docker.PortMappings) != 0: // Prior to Marathon 1.5 the port mappings could be found at the path // "container.docker.portMappings". ports, labels = extractPortMapping(app.Container.Docker.PortMappings, app.isContainerNet()) prefix = portMappingLabelPrefix - } else if len(app.PortDefinitions) != 0 { + case len(app.PortDefinitions) != 0: // PortDefinitions deprecates the "ports" array and can be used to specify // a list of ports with metadata in case a mapping is not required. ports = make([]uint32, len(app.PortDefinitions)) diff --git a/vendor/github.com/prometheus/prometheus/discovery/nomad/nomad.go b/vendor/github.com/prometheus/prometheus/discovery/nomad/nomad.go index c8d513039..7013f0737 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/nomad/nomad.go +++ b/vendor/github.com/prometheus/prometheus/discovery/nomad/nomad.go @@ -161,7 +161,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) { return d, nil } -func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { +func (d *Discovery) refresh(context.Context) ([]*targetgroup.Group, error) { opts := &nomad.QueryOptions{ AllowStale: d.allowStale, } diff --git a/vendor/github.com/prometheus/prometheus/discovery/ovhcloud/dedicated_server.go b/vendor/github.com/prometheus/prometheus/discovery/ovhcloud/dedicated_server.go index aeb4eccbb..bb5dadcd7 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/ovhcloud/dedicated_server.go +++ b/vendor/github.com/prometheus/prometheus/discovery/ovhcloud/dedicated_server.go @@ -102,7 +102,7 @@ func (d *dedicatedServerDiscovery) getSource() string { return fmt.Sprintf("%s_%s", d.config.Name(), d.getService()) } -func (d *dedicatedServerDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { +func (d *dedicatedServerDiscovery) refresh(context.Context) ([]*targetgroup.Group, error) { client, err := createClient(d.config) if err != nil { return nil, err diff --git a/vendor/github.com/prometheus/prometheus/discovery/ovhcloud/vps.go b/vendor/github.com/prometheus/prometheus/discovery/ovhcloud/vps.go index 705b42b65..e2d1dee36 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/ovhcloud/vps.go +++ b/vendor/github.com/prometheus/prometheus/discovery/ovhcloud/vps.go @@ -117,7 +117,7 @@ func (d *vpsDiscovery) getSource() string { return fmt.Sprintf("%s_%s", d.config.Name(), d.getService()) } -func (d *vpsDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { +func (d *vpsDiscovery) refresh(context.Context) ([]*targetgroup.Group, error) { client, err := createClient(d.config) if err != nil { return nil, err diff --git a/vendor/github.com/prometheus/prometheus/discovery/registry.go b/vendor/github.com/prometheus/prometheus/discovery/registry.go index 8274628c2..13168a07a 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/registry.go +++ b/vendor/github.com/prometheus/prometheus/discovery/registry.go @@ -253,7 +253,7 @@ func replaceYAMLTypeError(err error, oldTyp, newTyp reflect.Type) error { oldStr := oldTyp.String() newStr := newTyp.String() for i, s := range e.Errors { - e.Errors[i] = strings.Replace(s, oldStr, newStr, -1) + e.Errors[i] = strings.ReplaceAll(s, oldStr, newStr) } } return err diff --git a/vendor/github.com/prometheus/prometheus/discovery/vultr/vultr.go b/vendor/github.com/prometheus/prometheus/discovery/vultr/vultr.go index 2f489e7d4..42881d3c1 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/vultr/vultr.go +++ b/vendor/github.com/prometheus/prometheus/discovery/vultr/vultr.go @@ -202,10 +202,8 @@ func (d *Discovery) listInstances(ctx context.Context) ([]govultr.Instance, erro if meta.Links.Next == "" { break - } else { - listOptions.Cursor = meta.Links.Next - continue } + listOptions.Cursor = meta.Links.Next } return instances, nil diff --git a/vendor/github.com/prometheus/prometheus/discovery/zookeeper/zookeeper.go b/vendor/github.com/prometheus/prometheus/discovery/zookeeper/zookeeper.go index 308d63a5f..cadff5fd2 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/zookeeper/zookeeper.go +++ b/vendor/github.com/prometheus/prometheus/discovery/zookeeper/zookeeper.go @@ -193,7 +193,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { } for _, pathUpdate := range d.pathUpdates { // Drain event channel in case the treecache leaks goroutines otherwise. - for range pathUpdate { + for range pathUpdate { // nolint:revive } } d.conn.Close() diff --git a/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go b/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go index 256679a8c..f95f0051c 100644 --- a/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go +++ b/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go @@ -192,6 +192,30 @@ func (h *FloatHistogram) Scale(factor float64) *FloatHistogram { // // This method returns a pointer to the receiving histogram for convenience. func (h *FloatHistogram) Add(other *FloatHistogram) *FloatHistogram { + switch { + case other.CounterResetHint == h.CounterResetHint: + // Adding apples to apples, all good. No need to change anything. + case h.CounterResetHint == GaugeType: + // Adding something else to a gauge. That's probably OK. Outcome is a gauge. + // Nothing to do since the receiver is already marked as gauge. + case other.CounterResetHint == GaugeType: + // Similar to before, but this time the receiver is "something else" and we have to change it to gauge. + h.CounterResetHint = GaugeType + case h.CounterResetHint == UnknownCounterReset: + // With the receiver's CounterResetHint being "unknown", this could still be legitimate + // if the caller knows what they are doing. Outcome is then again "unknown". + // No need to do anything since the receiver's CounterResetHint is already "unknown". + case other.CounterResetHint == UnknownCounterReset: + // Similar to before, but now we have to set the receiver's CounterResetHint to "unknown". + h.CounterResetHint = UnknownCounterReset + default: + // All other cases shouldn't actually happen. + // They are a direct collision of CounterReset and NotCounterReset. + // Conservatively set the CounterResetHint to "unknown" and isse a warning. + h.CounterResetHint = UnknownCounterReset + // TODO(trevorwhitney): Actually issue the warning as soon as the plumbing for it is in place + } + otherZeroCount := h.reconcileZeroBuckets(other) h.ZeroCount += otherZeroCount h.Count += other.Count @@ -414,6 +438,10 @@ func (h *FloatHistogram) Compact(maxEmptyBuckets int) *FloatHistogram { // of observations, but NOT the sum of observations) is smaller in the receiving // histogram compared to the previous histogram. Otherwise, it returns false. // +// This method will shortcut to true if a CounterReset is detected, and shortcut +// to false if NotCounterReset is detected. Otherwise it will do the work to detect +// a reset. +// // Special behavior in case the Schema or the ZeroThreshold are not the same in // both histograms: // @@ -432,12 +460,23 @@ func (h *FloatHistogram) Compact(maxEmptyBuckets int) *FloatHistogram { // - Upon a decrease of the Schema, the buckets of the previous histogram are // merged so that they match the new, lower-resolution schema (again without // mutating the provided previous histogram). -// -// Note that this kind of reset detection is quite expensive. Ideally, resets -// are detected at ingest time and stored in the TSDB, so that the reset -// information can be read directly from there rather than be detected each time -// again. func (h *FloatHistogram) DetectReset(previous *FloatHistogram) bool { + if h.CounterResetHint == CounterReset { + return true + } + if h.CounterResetHint == NotCounterReset { + return false + } + // In all other cases of CounterResetHint (UnknownCounterReset and GaugeType), + // we go on as we would otherwise, for reasons explained below. + // + // If the CounterResetHint is UnknownCounterReset, we do not know yet if this histogram comes + // with a counter reset. Therefore, we have to do all the detailed work to find out if there + // is a counter reset or not. + // We do the same if the CounterResetHint is GaugeType, which should not happen, but PromQL still + // allows the user to apply functions to gauge histograms that are only meant for counter histograms. + // In this case, we treat the gauge histograms as a counter histograms + // (and we plan to return a warning about it to the user). if h.Count < previous.Count { return true } @@ -785,10 +824,11 @@ mergeLoop: // Merge together all buckets from the original schema that fall into origIdx += span.Offset } currIdx := i.targetIdx(origIdx) - if firstPass { + switch { + case firstPass: i.currIdx = currIdx firstPass = false - } else if currIdx != i.currIdx { + case currIdx != i.currIdx: // Reached next bucket in targetSchema. // Do not actually forward to the next bucket, but break out. break mergeLoop diff --git a/vendor/github.com/prometheus/prometheus/model/labels/labels.go b/vendor/github.com/prometheus/prometheus/model/labels/labels.go index 2622f7941..9ac0e5b53 100644 --- a/vendor/github.com/prometheus/prometheus/model/labels/labels.go +++ b/vendor/github.com/prometheus/prometheus/model/labels/labels.go @@ -169,11 +169,12 @@ func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) { b = b[:0] i, j := 0, 0 for i < len(ls) && j < len(names) { - if names[j] < ls[i].Name { + switch { + case names[j] < ls[i].Name: j++ - } else if ls[i].Name < names[j] { + case ls[i].Name < names[j]: i++ - } else { + default: b = append(b, ls[i].Name...) b = append(b, seps[0]) b = append(b, ls[i].Value...) @@ -213,11 +214,12 @@ func (ls Labels) BytesWithLabels(buf []byte, names ...string) []byte { b.WriteByte(labelSep) i, j := 0, 0 for i < len(ls) && j < len(names) { - if names[j] < ls[i].Name { + switch { + case names[j] < ls[i].Name: j++ - } else if ls[i].Name < names[j] { + case ls[i].Name < names[j]: i++ - } else { + default: if b.Len() > 1 { b.WriteByte(seps[0]) } @@ -546,8 +548,8 @@ func (b *Builder) Get(n string) string { // Range calls f on each label in the Builder. func (b *Builder) Range(f func(l Label)) { // Stack-based arrays to avoid heap allocation in most cases. - var addStack [1024]Label - var delStack [1024]string + var addStack [128]Label + var delStack [128]string // Take a copy of add and del, so they are unaffected by calls to Set() or Del(). origAdd, origDel := append(addStack[:0], b.add...), append(delStack[:0], b.del...) b.base.Range(func(l Label) { @@ -569,24 +571,18 @@ func contains(s []Label, n string) bool { return false } -// Labels returns the labels from the builder, adding them to res if non-nil. -// Argument res can be the same as b.base, if caller wants to overwrite that slice. +// Labels returns the labels from the builder. // If no modifications were made, the original labels are returned. -func (b *Builder) Labels(res Labels) Labels { +func (b *Builder) Labels() Labels { if len(b.del) == 0 && len(b.add) == 0 { return b.base } - if res == nil { - // In the general case, labels are removed, modified or moved - // rather than added. - res = make(Labels, 0, len(b.base)) - } else { - res = res[:0] + expectedSize := len(b.base) + len(b.add) - len(b.del) + if expectedSize < 1 { + expectedSize = 1 } - // Justification that res can be the same slice as base: in this loop - // we move forward through base, and either skip an element or assign - // it to res at its current position or an earlier position. + res := make(Labels, 0, expectedSize) for _, l := range b.base { if slices.Contains(b.del, l.Name) || contains(b.add, l.Name) { continue @@ -636,3 +632,9 @@ func (b *ScratchBuilder) Labels() Labels { // Copy the slice, so the next use of ScratchBuilder doesn't overwrite. return append([]Label{}, b.add...) } + +// Write the newly-built Labels out to ls. +// Callers must ensure that there are no other references to ls, or any strings fetched from it. +func (b *ScratchBuilder) Overwrite(ls *Labels) { + *ls = append((*ls)[:0], b.add...) +} diff --git a/vendor/github.com/prometheus/prometheus/model/labels/labels_string.go b/vendor/github.com/prometheus/prometheus/model/labels/labels_string.go index db8c981e0..6d54e98ab 100644 --- a/vendor/github.com/prometheus/prometheus/model/labels/labels_string.go +++ b/vendor/github.com/prometheus/prometheus/model/labels/labels_string.go @@ -56,8 +56,14 @@ func (ls labelSlice) Swap(i, j int) { ls[i], ls[j] = ls[j], ls[i] } func (ls labelSlice) Less(i, j int) bool { return ls[i].Name < ls[j].Name } func decodeSize(data string, index int) (int, int) { - var size int - for shift := uint(0); ; shift += 7 { + // Fast-path for common case of a single byte, value 0..127. + b := data[index] + index++ + if b < 0x80 { + return int(b), index + } + size := int(b & 0x7F) + for shift := uint(7); ; shift += 7 { // Just panic if we go of the end of data, since all Labels strings are constructed internally and // malformed data indicates a bug, or memory corruption. b := data[index] @@ -158,7 +164,7 @@ func (ls Labels) MatchLabels(on bool, names ...string) Labels { b.Del(MetricName) b.Del(names...) } - return b.Labels(EmptyLabels()) + return b.Labels() } // Hash returns a hash value for the label set. @@ -602,8 +608,8 @@ func (b *Builder) Get(n string) string { // Range calls f on each label in the Builder. func (b *Builder) Range(f func(l Label)) { // Stack-based arrays to avoid heap allocation in most cases. - var addStack [1024]Label - var delStack [1024]string + var addStack [128]Label + var delStack [128]string // Take a copy of add and del, so they are unaffected by calls to Set() or Del(). origAdd, origDel := append(addStack[:0], b.add...), append(delStack[:0], b.del...) b.base.Range(func(l Label) { @@ -625,10 +631,9 @@ func contains(s []Label, n string) bool { return false } -// Labels returns the labels from the builder, adding them to res if non-nil. -// Argument res can be the same as b.base, if caller wants to overwrite that slice. +// Labels returns the labels from the builder. // If no modifications were made, the original labels are returned. -func (b *Builder) Labels(res Labels) Labels { +func (b *Builder) Labels() Labels { if len(b.del) == 0 && len(b.add) == 0 { return b.base } @@ -638,7 +643,7 @@ func (b *Builder) Labels(res Labels) Labels { a, d := 0, 0 bufSize := len(b.base.data) + labelsSize(b.add) - buf := make([]byte, 0, bufSize) // TODO: see if we can re-use the buffer from res. + buf := make([]byte, 0, bufSize) for pos := 0; pos < len(b.base.data); { oldPos := pos var lName string @@ -813,7 +818,7 @@ func (b *ScratchBuilder) Labels() Labels { } // Write the newly-built Labels out to ls, reusing an internal buffer. -// Callers must ensure that there are no other references to ls. +// Callers must ensure that there are no other references to ls, or any strings fetched from it. func (b *ScratchBuilder) Overwrite(ls *Labels) { size := labelsSize(b.add) if size <= cap(b.overwriteBuffer) { diff --git a/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go b/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go index 5ef79b4a7..5027c3963 100644 --- a/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go +++ b/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go @@ -211,7 +211,7 @@ func Process(lbls labels.Labels, cfgs ...*Config) (ret labels.Labels, keep bool) if !ProcessBuilder(lb, cfgs...) { return labels.EmptyLabels(), false } - return lb.Labels(lbls), true + return lb.Labels(), true } // ProcessBuilder is like Process, but the caller passes a labels.Builder diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go b/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go index 2c981f050..94338a666 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go +++ b/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go @@ -238,9 +238,10 @@ func (p *PromParser) Metric(l *labels.Labels) string { return s } -// Exemplar writes the exemplar of the current sample into the passed -// exemplar. It returns if an exemplar exists. -func (p *PromParser) Exemplar(e *exemplar.Exemplar) bool { +// Exemplar implements the Parser interface. However, since the classic +// Prometheus text format does not support exemplars, this implementation simply +// returns false and does nothing else. +func (p *PromParser) Exemplar(*exemplar.Exemplar) bool { return false } diff --git a/vendor/github.com/prometheus/prometheus/prompb/custom.go b/vendor/github.com/prometheus/prometheus/prompb/custom.go index 4b07187bd..13d6e0f0c 100644 --- a/vendor/github.com/prometheus/prometheus/prompb/custom.go +++ b/vendor/github.com/prometheus/prometheus/prompb/custom.go @@ -20,6 +20,11 @@ import ( func (m Sample) T() int64 { return m.Timestamp } func (m Sample) V() float64 { return m.Value } +func (h Histogram) IsFloatHistogram() bool { + _, ok := h.GetCount().(*Histogram_CountFloat) + return ok +} + func (r *ChunkedReadResponse) PooledMarshal(p *sync.Pool) ([]byte, error) { size := r.Size() data, ok := p.Get().(*[]byte) diff --git a/vendor/github.com/prometheus/prometheus/prompb/types.pb.go b/vendor/github.com/prometheus/prometheus/prompb/types.pb.go index e78e48809..125f868e9 100644 --- a/vendor/github.com/prometheus/prometheus/prompb/types.pb.go +++ b/vendor/github.com/prometheus/prometheus/prompb/types.pb.go @@ -134,21 +134,24 @@ func (LabelMatcher_Type) EnumDescriptor() ([]byte, []int) { type Chunk_Encoding int32 const ( - Chunk_UNKNOWN Chunk_Encoding = 0 - Chunk_XOR Chunk_Encoding = 1 - Chunk_HISTOGRAM Chunk_Encoding = 2 + Chunk_UNKNOWN Chunk_Encoding = 0 + Chunk_XOR Chunk_Encoding = 1 + Chunk_HISTOGRAM Chunk_Encoding = 2 + Chunk_FLOAT_HISTOGRAM Chunk_Encoding = 3 ) var Chunk_Encoding_name = map[int32]string{ 0: "UNKNOWN", 1: "XOR", 2: "HISTOGRAM", + 3: "FLOAT_HISTOGRAM", } var Chunk_Encoding_value = map[string]int32{ - "UNKNOWN": 0, - "XOR": 1, - "HISTOGRAM": 2, + "UNKNOWN": 0, + "XOR": 1, + "HISTOGRAM": 2, + "FLOAT_HISTOGRAM": 3, } func (x Chunk_Encoding) String() string { @@ -1143,75 +1146,76 @@ func init() { func init() { proto.RegisterFile("types.proto", fileDescriptor_d938547f84707355) } var fileDescriptor_d938547f84707355 = []byte{ - // 1081 bytes of a gzipped FileDescriptorProto + // 1092 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0xdb, 0x6e, 0xdb, 0x46, - 0x13, 0x36, 0x49, 0x89, 0x12, 0x47, 0x87, 0xd0, 0x0b, 0x27, 0x3f, 0xff, 0xa0, 0x71, 0x54, 0x02, - 0x69, 0x85, 0xa2, 0x90, 0x91, 0xb4, 0x17, 0x0d, 0x1a, 0x14, 0xb0, 0x5d, 0xf9, 0x80, 0x46, 0x12, - 0xb2, 0x92, 0xd1, 0xa6, 0x37, 0xc2, 0x5a, 0x5a, 0x4b, 0x44, 0x78, 0x2a, 0x77, 0x15, 0x58, 0x7d, - 0x8f, 0xde, 0xf5, 0x25, 0x7a, 0xdf, 0x07, 0x08, 0xd0, 0x9b, 0x3e, 0x41, 0x51, 0xf8, 0xaa, 0x8f, - 0x51, 0xec, 0x90, 0x14, 0xa9, 0x38, 0x05, 0x9a, 0xde, 0xed, 0x7c, 0xf3, 0xcd, 0xec, 0xc7, 0xdd, - 0x99, 0x59, 0x42, 0x43, 0xae, 0x63, 0x2e, 0x7a, 0x71, 0x12, 0xc9, 0x88, 0x40, 0x9c, 0x44, 0x01, - 0x97, 0x4b, 0xbe, 0x12, 0xf7, 0xf7, 0x16, 0xd1, 0x22, 0x42, 0xf8, 0x40, 0xad, 0x52, 0x86, 0xfb, - 0xb3, 0x0e, 0xed, 0x01, 0x97, 0x89, 0x37, 0x1b, 0x70, 0xc9, 0xe6, 0x4c, 0x32, 0xf2, 0x14, 0x2a, - 0x2a, 0x87, 0xa3, 0x75, 0xb4, 0x6e, 0xfb, 0xc9, 0xa3, 0x5e, 0x91, 0xa3, 0xb7, 0xcd, 0xcc, 0xcc, - 0xc9, 0x3a, 0xe6, 0x14, 0x43, 0xc8, 0xa7, 0x40, 0x02, 0xc4, 0xa6, 0x57, 0x2c, 0xf0, 0xfc, 0xf5, - 0x34, 0x64, 0x01, 0x77, 0xf4, 0x8e, 0xd6, 0xb5, 0xa8, 0x9d, 0x7a, 0x4e, 0xd0, 0x31, 0x64, 0x01, - 0x27, 0x04, 0x2a, 0x4b, 0xee, 0xc7, 0x4e, 0x05, 0xfd, 0xb8, 0x56, 0xd8, 0x2a, 0xf4, 0xa4, 0x53, - 0x4d, 0x31, 0xb5, 0x76, 0xd7, 0x00, 0xc5, 0x4e, 0xa4, 0x01, 0xb5, 0x8b, 0xe1, 0x37, 0xc3, 0xd1, - 0xb7, 0x43, 0x7b, 0x47, 0x19, 0xc7, 0xa3, 0x8b, 0xe1, 0xa4, 0x4f, 0x6d, 0x8d, 0x58, 0x50, 0x3d, - 0x3d, 0xbc, 0x38, 0xed, 0xdb, 0x3a, 0x69, 0x81, 0x75, 0x76, 0x3e, 0x9e, 0x8c, 0x4e, 0xe9, 0xe1, - 0xc0, 0x36, 0x08, 0x81, 0x36, 0x7a, 0x0a, 0xac, 0xa2, 0x42, 0xc7, 0x17, 0x83, 0xc1, 0x21, 0x7d, - 0x69, 0x57, 0x49, 0x1d, 0x2a, 0xe7, 0xc3, 0x93, 0x91, 0x6d, 0x92, 0x26, 0xd4, 0xc7, 0x93, 0xc3, - 0x49, 0x7f, 0xdc, 0x9f, 0xd8, 0x35, 0xf7, 0x19, 0x98, 0x63, 0x16, 0xc4, 0x3e, 0x27, 0x7b, 0x50, - 0x7d, 0xcd, 0xfc, 0x55, 0x7a, 0x2c, 0x1a, 0x4d, 0x0d, 0xf2, 0x01, 0x58, 0xd2, 0x0b, 0xb8, 0x90, - 0x2c, 0x88, 0xf1, 0x3b, 0x0d, 0x5a, 0x00, 0x6e, 0x04, 0xf5, 0xfe, 0x35, 0x0f, 0x62, 0x9f, 0x25, - 0xe4, 0x00, 0x4c, 0x9f, 0x5d, 0x72, 0x5f, 0x38, 0x5a, 0xc7, 0xe8, 0x36, 0x9e, 0xec, 0x96, 0xcf, - 0xf5, 0xb9, 0xf2, 0x1c, 0x55, 0xde, 0xfc, 0xf1, 0x70, 0x87, 0x66, 0xb4, 0x62, 0x43, 0xfd, 0x1f, - 0x37, 0x34, 0xde, 0xde, 0xf0, 0xb7, 0x2a, 0x58, 0x67, 0x9e, 0x90, 0xd1, 0x22, 0x61, 0x01, 0x79, - 0x00, 0xd6, 0x2c, 0x5a, 0x85, 0x72, 0xea, 0x85, 0x12, 0x65, 0x57, 0xce, 0x76, 0x68, 0x1d, 0xa1, - 0xf3, 0x50, 0x92, 0x0f, 0xa1, 0x91, 0xba, 0xaf, 0xfc, 0x88, 0xc9, 0x74, 0x9b, 0xb3, 0x1d, 0x0a, - 0x08, 0x9e, 0x28, 0x8c, 0xd8, 0x60, 0x88, 0x55, 0x80, 0xfb, 0x68, 0x54, 0x2d, 0xc9, 0x3d, 0x30, - 0xc5, 0x6c, 0xc9, 0x03, 0x86, 0xb7, 0xb6, 0x4b, 0x33, 0x8b, 0x3c, 0x82, 0xf6, 0x8f, 0x3c, 0x89, - 0xa6, 0x72, 0x99, 0x70, 0xb1, 0x8c, 0xfc, 0x39, 0xde, 0xa0, 0x46, 0x5b, 0x0a, 0x9d, 0xe4, 0x20, - 0xf9, 0x28, 0xa3, 0x15, 0xba, 0x4c, 0xd4, 0xa5, 0xd1, 0xa6, 0xc2, 0x8f, 0x73, 0x6d, 0x9f, 0x80, - 0x5d, 0xe2, 0xa5, 0x02, 0x6b, 0x28, 0x50, 0xa3, 0xed, 0x0d, 0x33, 0x15, 0x79, 0x0c, 0xed, 0x90, - 0x2f, 0x98, 0xf4, 0x5e, 0xf3, 0xa9, 0x88, 0x59, 0x28, 0x9c, 0x3a, 0x9e, 0xf0, 0xbd, 0xf2, 0x09, - 0x1f, 0xad, 0x66, 0xaf, 0xb8, 0x1c, 0xc7, 0x2c, 0xcc, 0x8e, 0xb9, 0x95, 0xc7, 0x28, 0x4c, 0x90, - 0x8f, 0xe1, 0xce, 0x26, 0xc9, 0x9c, 0xfb, 0x92, 0x09, 0xc7, 0xea, 0x18, 0x5d, 0x42, 0x37, 0xb9, - 0xbf, 0x46, 0x74, 0x8b, 0x88, 0xea, 0x84, 0x03, 0x1d, 0xa3, 0xab, 0x15, 0x44, 0x94, 0x26, 0x94, - 0xac, 0x38, 0x12, 0x5e, 0x49, 0x56, 0xe3, 0xdf, 0xc8, 0xca, 0x63, 0x36, 0xb2, 0x36, 0x49, 0x32, - 0x59, 0xcd, 0x54, 0x56, 0x0e, 0x17, 0xb2, 0x36, 0xc4, 0x4c, 0x56, 0x2b, 0x95, 0x95, 0xc3, 0x99, - 0xac, 0xaf, 0x00, 0x12, 0x2e, 0xb8, 0x9c, 0x2e, 0xd5, 0xe9, 0xb7, 0xb1, 0xc7, 0x1f, 0x96, 0x25, - 0x6d, 0xea, 0xa7, 0x47, 0x15, 0xef, 0xcc, 0x0b, 0x25, 0xb5, 0x92, 0x7c, 0xb9, 0x5d, 0x80, 0x77, - 0xde, 0x2e, 0xc0, 0xcf, 0xc1, 0xda, 0x44, 0x6d, 0x77, 0x6a, 0x0d, 0x8c, 0x97, 0xfd, 0xb1, 0xad, - 0x11, 0x13, 0xf4, 0xe1, 0xc8, 0xd6, 0x8b, 0x6e, 0x35, 0x8e, 0x6a, 0x50, 0x45, 0xcd, 0x47, 0x4d, - 0x80, 0xe2, 0xda, 0xdd, 0x67, 0x00, 0xc5, 0xf9, 0xa8, 0xca, 0x8b, 0xae, 0xae, 0x04, 0x4f, 0x4b, - 0x79, 0x97, 0x66, 0x96, 0xc2, 0x7d, 0x1e, 0x2e, 0xe4, 0x12, 0x2b, 0xb8, 0x45, 0x33, 0xcb, 0xfd, - 0x4b, 0x03, 0x98, 0x78, 0x01, 0x1f, 0xf3, 0xc4, 0xe3, 0xe2, 0xfd, 0xfb, 0xef, 0x09, 0xd4, 0x04, - 0xb6, 0xbe, 0x70, 0x74, 0x8c, 0x20, 0xe5, 0x88, 0x74, 0x2a, 0x64, 0x21, 0x39, 0x91, 0x7c, 0x01, - 0x16, 0xcf, 0x1a, 0x5e, 0x38, 0x06, 0x46, 0xed, 0x95, 0xa3, 0xf2, 0x69, 0x90, 0xc5, 0x15, 0x64, - 0xf2, 0x25, 0xc0, 0x32, 0x3f, 0x78, 0xe1, 0x54, 0x30, 0xf4, 0xee, 0x3b, 0xaf, 0x25, 0x8b, 0x2d, - 0xd1, 0xdd, 0xc7, 0x50, 0xc5, 0x2f, 0x50, 0xd3, 0x13, 0x27, 0xae, 0x96, 0x4e, 0x4f, 0xb5, 0xde, - 0x9e, 0x23, 0x56, 0x36, 0x47, 0xdc, 0xa7, 0x60, 0x3e, 0x4f, 0xbf, 0xf3, 0x7d, 0x0f, 0xc6, 0xfd, - 0x49, 0x83, 0x26, 0xe2, 0x03, 0x26, 0x67, 0x4b, 0x9e, 0x90, 0xc7, 0x5b, 0x0f, 0xc6, 0x83, 0x5b, - 0xf1, 0x19, 0xaf, 0x57, 0x7a, 0x28, 0x72, 0xa1, 0xfa, 0xbb, 0x84, 0x1a, 0x65, 0xa1, 0x5d, 0xa8, - 0xe0, 0xd8, 0x37, 0x41, 0xef, 0xbf, 0x48, 0xeb, 0x68, 0xd8, 0x7f, 0x91, 0xd6, 0x11, 0x55, 0xa3, - 0x5e, 0x01, 0xb4, 0x6f, 0x1b, 0xee, 0x2f, 0x9a, 0x2a, 0x3e, 0x36, 0x57, 0xb5, 0x27, 0xc8, 0xff, - 0xa0, 0x26, 0x24, 0x8f, 0xa7, 0x81, 0x40, 0x5d, 0x06, 0x35, 0x95, 0x39, 0x10, 0x6a, 0xeb, 0xab, - 0x55, 0x38, 0xcb, 0xb7, 0x56, 0x6b, 0xf2, 0x7f, 0xa8, 0x0b, 0xc9, 0x12, 0xa9, 0xd8, 0xe9, 0x50, - 0xad, 0xa1, 0x3d, 0x10, 0xe4, 0x2e, 0x98, 0x3c, 0x9c, 0x4f, 0xf1, 0x52, 0x94, 0xa3, 0xca, 0xc3, - 0xf9, 0x40, 0x90, 0xfb, 0x50, 0x5f, 0x24, 0xd1, 0x2a, 0xf6, 0xc2, 0x85, 0x53, 0xed, 0x18, 0x5d, - 0x8b, 0x6e, 0x6c, 0xd2, 0x06, 0xfd, 0x72, 0x8d, 0x83, 0xad, 0x4e, 0xf5, 0xcb, 0xb5, 0xca, 0x9e, - 0xb0, 0x70, 0xc1, 0x55, 0x92, 0x5a, 0x9a, 0x1d, 0xed, 0x81, 0x70, 0x7f, 0xd5, 0xa0, 0x7a, 0xbc, - 0x5c, 0x85, 0xaf, 0xc8, 0x3e, 0x34, 0x02, 0x2f, 0x9c, 0xaa, 0x56, 0x2a, 0x34, 0x5b, 0x81, 0x17, - 0xaa, 0x1a, 0x1e, 0x08, 0xf4, 0xb3, 0xeb, 0x8d, 0x3f, 0x7b, 0x6b, 0x02, 0x76, 0x9d, 0xf9, 0x7b, - 0xd9, 0x25, 0x18, 0x78, 0x09, 0xf7, 0xcb, 0x97, 0x80, 0x1b, 0xf4, 0xfa, 0xe1, 0x2c, 0x9a, 0x7b, - 0xe1, 0xa2, 0xb8, 0x01, 0xf5, 0x86, 0xe3, 0x57, 0x35, 0x29, 0xae, 0xdd, 0x03, 0xa8, 0xe7, 0xac, - 0x5b, 0xcd, 0xfb, 0xdd, 0x48, 0x3d, 0xb1, 0x5b, 0xef, 0xaa, 0xee, 0xfe, 0x00, 0x2d, 0x4c, 0xce, - 0xe7, 0xff, 0xb5, 0xcb, 0x0e, 0xc0, 0x9c, 0xa9, 0x0c, 0x79, 0x93, 0xed, 0xde, 0x12, 0x9e, 0x07, - 0xa4, 0xb4, 0xa3, 0xbd, 0x37, 0x37, 0xfb, 0xda, 0xef, 0x37, 0xfb, 0xda, 0x9f, 0x37, 0xfb, 0xda, - 0xf7, 0xa6, 0x62, 0xc7, 0x97, 0x97, 0x26, 0xfe, 0xcd, 0x7c, 0xf6, 0x77, 0x00, 0x00, 0x00, 0xff, - 0xff, 0x53, 0x09, 0xe5, 0x37, 0xfe, 0x08, 0x00, 0x00, + 0x13, 0x36, 0x49, 0x89, 0x12, 0x47, 0x87, 0xd0, 0xfb, 0x3b, 0xf9, 0x59, 0xa3, 0x71, 0x54, 0x02, + 0x69, 0x85, 0xa2, 0x90, 0x11, 0xb7, 0x17, 0x0d, 0x1a, 0x14, 0xb0, 0x1d, 0xf9, 0x80, 0x5a, 0x12, + 0xb2, 0x92, 0xd1, 0xa6, 0x37, 0xc2, 0x5a, 0x5a, 0x4b, 0x44, 0xc4, 0x43, 0xb9, 0xab, 0xc0, 0xea, + 0x7b, 0xf4, 0xae, 0x2f, 0xd1, 0xb7, 0x08, 0xd0, 0x9b, 0xf6, 0x05, 0x8a, 0xc2, 0x57, 0x7d, 0x8c, + 0x62, 0x87, 0xa4, 0x48, 0xc5, 0x29, 0xd0, 0xf4, 0x6e, 0xe7, 0x9b, 0x6f, 0x76, 0x3e, 0xee, 0xce, + 0xcc, 0x12, 0x6a, 0x72, 0x15, 0x71, 0xd1, 0x89, 0xe2, 0x50, 0x86, 0x04, 0xa2, 0x38, 0xf4, 0xb9, + 0x9c, 0xf3, 0xa5, 0xd8, 0xdd, 0x99, 0x85, 0xb3, 0x10, 0xe1, 0x7d, 0xb5, 0x4a, 0x18, 0xee, 0xcf, + 0x3a, 0x34, 0x7b, 0x5c, 0xc6, 0xde, 0xa4, 0xc7, 0x25, 0x9b, 0x32, 0xc9, 0xc8, 0x53, 0x28, 0xa9, + 0x3d, 0x1c, 0xad, 0xa5, 0xb5, 0x9b, 0x07, 0x8f, 0x3b, 0xf9, 0x1e, 0x9d, 0x4d, 0x66, 0x6a, 0x8e, + 0x56, 0x11, 0xa7, 0x18, 0x42, 0x3e, 0x03, 0xe2, 0x23, 0x36, 0xbe, 0x66, 0xbe, 0xb7, 0x58, 0x8d, + 0x03, 0xe6, 0x73, 0x47, 0x6f, 0x69, 0x6d, 0x8b, 0xda, 0x89, 0xe7, 0x04, 0x1d, 0x7d, 0xe6, 0x73, + 0x42, 0xa0, 0x34, 0xe7, 0x8b, 0xc8, 0x29, 0xa1, 0x1f, 0xd7, 0x0a, 0x5b, 0x06, 0x9e, 0x74, 0xca, + 0x09, 0xa6, 0xd6, 0xee, 0x0a, 0x20, 0xcf, 0x44, 0x6a, 0x50, 0xb9, 0xec, 0x7f, 0xd3, 0x1f, 0x7c, + 0xdb, 0xb7, 0xb7, 0x94, 0x71, 0x3c, 0xb8, 0xec, 0x8f, 0xba, 0xd4, 0xd6, 0x88, 0x05, 0xe5, 0xd3, + 0xc3, 0xcb, 0xd3, 0xae, 0xad, 0x93, 0x06, 0x58, 0x67, 0xe7, 0xc3, 0xd1, 0xe0, 0x94, 0x1e, 0xf6, + 0x6c, 0x83, 0x10, 0x68, 0xa2, 0x27, 0xc7, 0x4a, 0x2a, 0x74, 0x78, 0xd9, 0xeb, 0x1d, 0xd2, 0x97, + 0x76, 0x99, 0x54, 0xa1, 0x74, 0xde, 0x3f, 0x19, 0xd8, 0x26, 0xa9, 0x43, 0x75, 0x38, 0x3a, 0x1c, + 0x75, 0x87, 0xdd, 0x91, 0x5d, 0x71, 0x9f, 0x81, 0x39, 0x64, 0x7e, 0xb4, 0xe0, 0x64, 0x07, 0xca, + 0xaf, 0xd9, 0x62, 0x99, 0x1c, 0x8b, 0x46, 0x13, 0x83, 0x7c, 0x08, 0x96, 0xf4, 0x7c, 0x2e, 0x24, + 0xf3, 0x23, 0xfc, 0x4e, 0x83, 0xe6, 0x80, 0x1b, 0x42, 0xb5, 0x7b, 0xc3, 0xfd, 0x68, 0xc1, 0x62, + 0xb2, 0x0f, 0xe6, 0x82, 0x5d, 0xf1, 0x85, 0x70, 0xb4, 0x96, 0xd1, 0xae, 0x1d, 0x6c, 0x17, 0xcf, + 0xf5, 0x42, 0x79, 0x8e, 0x4a, 0x6f, 0xfe, 0x78, 0xb4, 0x45, 0x53, 0x5a, 0x9e, 0x50, 0xff, 0xc7, + 0x84, 0xc6, 0xdb, 0x09, 0x7f, 0x2d, 0x83, 0x75, 0xe6, 0x09, 0x19, 0xce, 0x62, 0xe6, 0x93, 0x87, + 0x60, 0x4d, 0xc2, 0x65, 0x20, 0xc7, 0x5e, 0x20, 0x51, 0x76, 0xe9, 0x6c, 0x8b, 0x56, 0x11, 0x3a, + 0x0f, 0x24, 0xf9, 0x08, 0x6a, 0x89, 0xfb, 0x7a, 0x11, 0x32, 0x99, 0xa4, 0x39, 0xdb, 0xa2, 0x80, + 0xe0, 0x89, 0xc2, 0x88, 0x0d, 0x86, 0x58, 0xfa, 0x98, 0x47, 0xa3, 0x6a, 0x49, 0x1e, 0x80, 0x29, + 0x26, 0x73, 0xee, 0x33, 0xbc, 0xb5, 0x6d, 0x9a, 0x5a, 0xe4, 0x31, 0x34, 0x7f, 0xe4, 0x71, 0x38, + 0x96, 0xf3, 0x98, 0x8b, 0x79, 0xb8, 0x98, 0xe2, 0x0d, 0x6a, 0xb4, 0xa1, 0xd0, 0x51, 0x06, 0x92, + 0x8f, 0x53, 0x5a, 0xae, 0xcb, 0x44, 0x5d, 0x1a, 0xad, 0x2b, 0xfc, 0x38, 0xd3, 0xf6, 0x29, 0xd8, + 0x05, 0x5e, 0x22, 0xb0, 0x82, 0x02, 0x35, 0xda, 0x5c, 0x33, 0x13, 0x91, 0xc7, 0xd0, 0x0c, 0xf8, + 0x8c, 0x49, 0xef, 0x35, 0x1f, 0x8b, 0x88, 0x05, 0xc2, 0xa9, 0xe2, 0x09, 0x3f, 0x28, 0x9e, 0xf0, + 0xd1, 0x72, 0xf2, 0x8a, 0xcb, 0x61, 0xc4, 0x82, 0xf4, 0x98, 0x1b, 0x59, 0x8c, 0xc2, 0x04, 0xf9, + 0x04, 0xee, 0xad, 0x37, 0x99, 0xf2, 0x85, 0x64, 0xc2, 0xb1, 0x5a, 0x46, 0x9b, 0xd0, 0xf5, 0xde, + 0xcf, 0x11, 0xdd, 0x20, 0xa2, 0x3a, 0xe1, 0x40, 0xcb, 0x68, 0x6b, 0x39, 0x11, 0xa5, 0x09, 0x25, + 0x2b, 0x0a, 0x85, 0x57, 0x90, 0x55, 0xfb, 0x37, 0xb2, 0xb2, 0x98, 0xb5, 0xac, 0xf5, 0x26, 0xa9, + 0xac, 0x7a, 0x22, 0x2b, 0x83, 0x73, 0x59, 0x6b, 0x62, 0x2a, 0xab, 0x91, 0xc8, 0xca, 0xe0, 0x54, + 0xd6, 0xd7, 0x00, 0x31, 0x17, 0x5c, 0x8e, 0xe7, 0xea, 0xf4, 0x9b, 0xd8, 0xe3, 0x8f, 0x8a, 0x92, + 0xd6, 0xf5, 0xd3, 0xa1, 0x8a, 0x77, 0xe6, 0x05, 0x92, 0x5a, 0x71, 0xb6, 0xdc, 0x2c, 0xc0, 0x7b, + 0x6f, 0x17, 0xe0, 0x17, 0x60, 0xad, 0xa3, 0x36, 0x3b, 0xb5, 0x02, 0xc6, 0xcb, 0xee, 0xd0, 0xd6, + 0x88, 0x09, 0x7a, 0x7f, 0x60, 0xeb, 0x79, 0xb7, 0x1a, 0x47, 0x15, 0x28, 0xa3, 0xe6, 0xa3, 0x3a, + 0x40, 0x7e, 0xed, 0xee, 0x33, 0x80, 0xfc, 0x7c, 0x54, 0xe5, 0x85, 0xd7, 0xd7, 0x82, 0x27, 0xa5, + 0xbc, 0x4d, 0x53, 0x4b, 0xe1, 0x0b, 0x1e, 0xcc, 0xe4, 0x1c, 0x2b, 0xb8, 0x41, 0x53, 0xcb, 0xfd, + 0x4b, 0x03, 0x18, 0x79, 0x3e, 0x1f, 0xf2, 0xd8, 0xe3, 0xe2, 0xfd, 0xfb, 0xef, 0x00, 0x2a, 0x02, + 0x5b, 0x5f, 0x38, 0x3a, 0x46, 0x90, 0x62, 0x44, 0x32, 0x15, 0xd2, 0x90, 0x8c, 0x48, 0xbe, 0x04, + 0x8b, 0xa7, 0x0d, 0x2f, 0x1c, 0x03, 0xa3, 0x76, 0x8a, 0x51, 0xd9, 0x34, 0x48, 0xe3, 0x72, 0x32, + 0xf9, 0x0a, 0x60, 0x9e, 0x1d, 0xbc, 0x70, 0x4a, 0x18, 0x7a, 0xff, 0x9d, 0xd7, 0x92, 0xc6, 0x16, + 0xe8, 0xee, 0x13, 0x28, 0xe3, 0x17, 0xa8, 0xe9, 0x89, 0x13, 0x57, 0x4b, 0xa6, 0xa7, 0x5a, 0x6f, + 0xce, 0x11, 0x2b, 0x9d, 0x23, 0xee, 0x53, 0x30, 0x2f, 0x92, 0xef, 0x7c, 0xdf, 0x83, 0x71, 0x7f, + 0xd2, 0xa0, 0x8e, 0x78, 0x8f, 0xc9, 0xc9, 0x9c, 0xc7, 0xe4, 0xc9, 0xc6, 0x83, 0xf1, 0xf0, 0x4e, + 0x7c, 0xca, 0xeb, 0x14, 0x1e, 0x8a, 0x4c, 0xa8, 0xfe, 0x2e, 0xa1, 0x46, 0x51, 0x68, 0x1b, 0x4a, + 0x38, 0xf6, 0x4d, 0xd0, 0xbb, 0x2f, 0x92, 0x3a, 0xea, 0x77, 0x5f, 0x24, 0x75, 0x44, 0xd5, 0xa8, + 0x57, 0x00, 0xed, 0xda, 0x86, 0xfb, 0x8b, 0xa6, 0x8a, 0x8f, 0x4d, 0x55, 0xed, 0x09, 0xf2, 0x7f, + 0xa8, 0x08, 0xc9, 0xa3, 0xb1, 0x2f, 0x50, 0x97, 0x41, 0x4d, 0x65, 0xf6, 0x84, 0x4a, 0x7d, 0xbd, + 0x0c, 0x26, 0x59, 0x6a, 0xb5, 0x26, 0x1f, 0x40, 0x55, 0x48, 0x16, 0x4b, 0xc5, 0x4e, 0x86, 0x6a, + 0x05, 0xed, 0x9e, 0x20, 0xf7, 0xc1, 0xe4, 0xc1, 0x74, 0x8c, 0x97, 0xa2, 0x1c, 0x65, 0x1e, 0x4c, + 0x7b, 0x82, 0xec, 0x42, 0x75, 0x16, 0x87, 0xcb, 0xc8, 0x0b, 0x66, 0x4e, 0xb9, 0x65, 0xb4, 0x2d, + 0xba, 0xb6, 0x49, 0x13, 0xf4, 0xab, 0x15, 0x0e, 0xb6, 0x2a, 0xd5, 0xaf, 0x56, 0x6a, 0xf7, 0x98, + 0x05, 0x33, 0xae, 0x36, 0xa9, 0x24, 0xbb, 0xa3, 0xdd, 0x13, 0xee, 0xef, 0x1a, 0x94, 0x8f, 0xe7, + 0xcb, 0xe0, 0x15, 0xd9, 0x83, 0x9a, 0xef, 0x05, 0x63, 0xd5, 0x4a, 0xb9, 0x66, 0xcb, 0xf7, 0x02, + 0x55, 0xc3, 0x3d, 0x81, 0x7e, 0x76, 0xb3, 0xf6, 0xa7, 0x6f, 0x8d, 0xcf, 0x6e, 0x52, 0x7f, 0x27, + 0xbd, 0x04, 0x03, 0x2f, 0x61, 0xb7, 0x78, 0x09, 0x98, 0xa0, 0xd3, 0x0d, 0x26, 0xe1, 0xd4, 0x0b, + 0x66, 0xf9, 0x0d, 0xa8, 0x37, 0x1c, 0xbf, 0xaa, 0x4e, 0x71, 0xed, 0x3e, 0x87, 0x6a, 0xc6, 0xba, + 0xd3, 0xbc, 0xdf, 0x0d, 0xd4, 0x13, 0xbb, 0xf1, 0xae, 0xea, 0xe4, 0x7f, 0x70, 0xef, 0xe4, 0x62, + 0x70, 0x38, 0x1a, 0x17, 0x1e, 0x5b, 0xf7, 0x07, 0x68, 0x60, 0x46, 0x3e, 0xfd, 0xaf, 0xad, 0xb7, + 0x0f, 0xe6, 0x44, 0xed, 0x90, 0x75, 0xde, 0xf6, 0x9d, 0xaf, 0xc9, 0x02, 0x12, 0xda, 0xd1, 0xce, + 0x9b, 0xdb, 0x3d, 0xed, 0xb7, 0xdb, 0x3d, 0xed, 0xcf, 0xdb, 0x3d, 0xed, 0x7b, 0x53, 0xb1, 0xa3, + 0xab, 0x2b, 0x13, 0x7f, 0x71, 0x3e, 0xff, 0x3b, 0x00, 0x00, 0xff, 0xff, 0xfb, 0x5f, 0xf2, 0x4d, + 0x13, 0x09, 0x00, 0x00, } func (m *MetricMetadata) Marshal() (dAtA []byte, err error) { diff --git a/vendor/github.com/prometheus/prometheus/prompb/types.proto b/vendor/github.com/prometheus/prometheus/prompb/types.proto index 57216b81d..aa322515c 100644 --- a/vendor/github.com/prometheus/prometheus/prompb/types.proto +++ b/vendor/github.com/prometheus/prometheus/prompb/types.proto @@ -169,9 +169,10 @@ message Chunk { // We require this to match chunkenc.Encoding. enum Encoding { - UNKNOWN = 0; - XOR = 1; - HISTOGRAM = 2; + UNKNOWN = 0; + XOR = 1; + HISTOGRAM = 2; + FLOAT_HISTOGRAM = 3; } Encoding type = 3; bytes data = 4; diff --git a/vendor/github.com/prometheus/prometheus/scrape/manager.go b/vendor/github.com/prometheus/prometheus/scrape/manager.go index 69a0eaa1f..d75fe30cf 100644 --- a/vendor/github.com/prometheus/prometheus/scrape/manager.go +++ b/vendor/github.com/prometheus/prometheus/scrape/manager.go @@ -288,10 +288,11 @@ func (m *Manager) ApplyConfig(cfg *config.Config) error { // Cleanup and reload pool if the configuration has changed. var failed bool for name, sp := range m.scrapePools { - if cfg, ok := m.scrapeConfigs[name]; !ok { + switch cfg, ok := m.scrapeConfigs[name]; { + case !ok: sp.stop() delete(m.scrapePools, name) - } else if !reflect.DeepEqual(sp.config, cfg) { + case !reflect.DeepEqual(sp.config, cfg): err := sp.reload(cfg) if err != nil { level.Error(m.logger).Log("msg", "error reloading scrape pool", "err", err, "scrape_pool", name) diff --git a/vendor/github.com/prometheus/prometheus/scrape/scrape.go b/vendor/github.com/prometheus/prometheus/scrape/scrape.go index 3fce6f9dd..5c649e729 100644 --- a/vendor/github.com/prometheus/prometheus/scrape/scrape.go +++ b/vendor/github.com/prometheus/prometheus/scrape/scrape.go @@ -500,9 +500,13 @@ func (sp *scrapePool) Sync(tgs []*targetgroup.Group) { } targetSyncFailed.WithLabelValues(sp.config.JobName).Add(float64(len(failures))) for _, t := range targets { - if !t.Labels().IsEmpty() { + // Replicate .Labels().IsEmpty() with a loop here to avoid generating garbage. + nonEmpty := false + t.LabelsRange(func(l labels.Label) { nonEmpty = true }) + switch { + case nonEmpty: all = append(all, t) - } else if !t.DiscoveredLabels().IsEmpty() { + case !t.discoveredLabels.IsEmpty(): sp.droppedTargets = append(sp.droppedTargets, t) } } @@ -637,7 +641,7 @@ func verifyLabelLimits(lset labels.Labels, limits *labelLimits) error { met := lset.Get(labels.MetricName) if limits.labelLimit > 0 { nbLabels := lset.Len() - if nbLabels > int(limits.labelLimit) { + if nbLabels > limits.labelLimit { return fmt.Errorf("label_limit exceeded (metric: %.50s, number of labels: %d, limit: %d)", met, nbLabels, limits.labelLimit) } } @@ -649,14 +653,14 @@ func verifyLabelLimits(lset labels.Labels, limits *labelLimits) error { return lset.Validate(func(l labels.Label) error { if limits.labelNameLengthLimit > 0 { nameLength := len(l.Name) - if nameLength > int(limits.labelNameLengthLimit) { + if nameLength > limits.labelNameLengthLimit { return fmt.Errorf("label_name_length_limit exceeded (metric: %.50s, label name: %.50s, length: %d, limit: %d)", met, l.Name, nameLength, limits.labelNameLengthLimit) } } if limits.labelValueLengthLimit > 0 { valueLength := len(l.Value) - if valueLength > int(limits.labelValueLengthLimit) { + if valueLength > limits.labelValueLengthLimit { return fmt.Errorf("label_value_length_limit exceeded (metric: %.50s, label name: %.50s, value: %.50q, length: %d, limit: %d)", met, l.Name, l.Value, valueLength, limits.labelValueLengthLimit) } } @@ -666,17 +670,16 @@ func verifyLabelLimits(lset labels.Labels, limits *labelLimits) error { func mutateSampleLabels(lset labels.Labels, target *Target, honor bool, rc []*relabel.Config) labels.Labels { lb := labels.NewBuilder(lset) - targetLabels := target.Labels() if honor { - targetLabels.Range(func(l labels.Label) { + target.LabelsRange(func(l labels.Label) { if !lset.Has(l.Name) { lb.Set(l.Name, l.Value) } }) } else { var conflictingExposedLabels []labels.Label - targetLabels.Range(func(l labels.Label) { + target.LabelsRange(func(l labels.Label) { existingValue := lset.Get(l.Name) if existingValue != "" { conflictingExposedLabels = append(conflictingExposedLabels, labels.Label{Name: l.Name, Value: existingValue}) @@ -686,11 +689,11 @@ func mutateSampleLabels(lset labels.Labels, target *Target, honor bool, rc []*re }) if len(conflictingExposedLabels) > 0 { - resolveConflictingExposedLabels(lb, lset, targetLabels, conflictingExposedLabels) + resolveConflictingExposedLabels(lb, conflictingExposedLabels) } } - res := lb.Labels(labels.EmptyLabels()) + res := lb.Labels() if len(rc) > 0 { res, _ = relabel.Process(res, rc...) @@ -699,47 +702,32 @@ func mutateSampleLabels(lset labels.Labels, target *Target, honor bool, rc []*re return res } -func resolveConflictingExposedLabels(lb *labels.Builder, exposedLabels, targetLabels labels.Labels, conflictingExposedLabels []labels.Label) { +func resolveConflictingExposedLabels(lb *labels.Builder, conflictingExposedLabels []labels.Label) { sort.SliceStable(conflictingExposedLabels, func(i, j int) bool { return len(conflictingExposedLabels[i].Name) < len(conflictingExposedLabels[j].Name) }) - for i, l := range conflictingExposedLabels { + for _, l := range conflictingExposedLabels { newName := l.Name for { newName = model.ExportedLabelPrefix + newName - if !exposedLabels.Has(newName) && - !targetLabels.Has(newName) && - !labelSliceHas(conflictingExposedLabels[:i], newName) { - conflictingExposedLabels[i].Name = newName + if lb.Get(newName) == "" { + lb.Set(newName, l.Value) break } } } - - for _, l := range conflictingExposedLabels { - lb.Set(l.Name, l.Value) - } -} - -func labelSliceHas(lbls []labels.Label, name string) bool { - for _, l := range lbls { - if l.Name == name { - return true - } - } - return false } func mutateReportSampleLabels(lset labels.Labels, target *Target) labels.Labels { lb := labels.NewBuilder(lset) - target.Labels().Range(func(l labels.Label) { + target.LabelsRange(func(l labels.Label) { lb.Set(model.ExportedLabelPrefix+l.Name, lset.Get(l.Name)) lb.Set(l.Name, l.Value) }) - return lb.Labels(labels.EmptyLabels()) + return lb.Labels() } // appender returns an appender for ingested samples from the target. @@ -959,9 +947,10 @@ func (c *scrapeCache) iterDone(flushCache bool) { count := len(c.series) + len(c.droppedSeries) + len(c.metadata) c.metaMtx.Unlock() - if flushCache { + switch { + case flushCache: c.successfulCount = count - } else if count > c.successfulCount*2+1000 { + case count > c.successfulCount*2+1000: // If a target had varying labels in scrapes that ultimately failed, // the caches would grow indefinitely. Force a flush when this happens. // We use the heuristic that this is a doubling of the cache size diff --git a/vendor/github.com/prometheus/prometheus/scrape/target.go b/vendor/github.com/prometheus/prometheus/scrape/target.go index ae952b420..6c4703118 100644 --- a/vendor/github.com/prometheus/prometheus/scrape/target.go +++ b/vendor/github.com/prometheus/prometheus/scrape/target.go @@ -181,6 +181,15 @@ func (t *Target) Labels() labels.Labels { return b.Labels() } +// LabelsRange calls f on each public label of the target. +func (t *Target) LabelsRange(f func(l labels.Label)) { + t.labels.Range(func(l labels.Label) { + if !strings.HasPrefix(l.Name, model.ReservedLabelPrefix) { + f(l) + } + }) +} + // DiscoveredLabels returns a copy of the target's labels before any processing. func (t *Target) DiscoveredLabels() labels.Labels { t.mtx.Lock() @@ -371,7 +380,7 @@ func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig, noDefaultPort } } - preRelabelLabels := lb.Labels(labels.EmptyLabels()) + preRelabelLabels := lb.Labels() keep := relabel.ProcessBuilder(lb, cfg.RelabelConfigs...) // Check if the target was dropped. @@ -404,9 +413,9 @@ func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig, noDefaultPort // Addresses reaching this point are already wrapped in [] if necessary. switch scheme { case "http", "": - addr = addr + ":80" + addr += ":80" case "https": - addr = addr + ":443" + addr += ":443" default: return labels.EmptyLabels(), labels.EmptyLabels(), errors.Errorf("invalid scheme: %q", cfg.Scheme) } @@ -467,7 +476,7 @@ func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig, noDefaultPort lb.Set(model.InstanceLabel, addr) } - res = lb.Labels(labels.EmptyLabels()) + res = lb.Labels() err = res.Validate(func(l labels.Label) error { // Check label values are valid, drop the target if not. if !model.LabelValue(l.Value).IsValid() { diff --git a/vendor/github.com/prometheus/prometheus/storage/buffer.go b/vendor/github.com/prometheus/prometheus/storage/buffer.go index 92767cdd7..38f559103 100644 --- a/vendor/github.com/prometheus/prometheus/storage/buffer.go +++ b/vendor/github.com/prometheus/prometheus/storage/buffer.go @@ -19,6 +19,7 @@ import ( "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/prometheus/prometheus/tsdb/tsdbutil" ) // BufferedSeriesIterator wraps an iterator with a look-back buffer. @@ -43,7 +44,7 @@ func NewBuffer(delta int64) *BufferedSeriesIterator { func NewBufferIterator(it chunkenc.Iterator, delta int64) *BufferedSeriesIterator { // TODO(codesome): based on encoding, allocate different buffer. bit := &BufferedSeriesIterator{ - buf: newSampleRing(delta, 16), + buf: newSampleRing(delta, 0, chunkenc.ValNone), delta: delta, } bit.Reset(it) @@ -68,11 +69,8 @@ func (b *BufferedSeriesIterator) ReduceDelta(delta int64) bool { // PeekBack returns the nth previous element of the iterator. If there is none buffered, // ok is false. -func (b *BufferedSeriesIterator) PeekBack(n int) ( - t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, ok bool, -) { - s, ok := b.buf.nthLast(n) - return s.t, s.v, s.h, s.fh, ok +func (b *BufferedSeriesIterator) PeekBack(n int) (sample tsdbutil.Sample, ok bool) { + return b.buf.nthLast(n) } // Buffer returns an iterator over the buffered data. Invalidates previously @@ -122,14 +120,14 @@ func (b *BufferedSeriesIterator) Next() chunkenc.ValueType { case chunkenc.ValNone: return chunkenc.ValNone case chunkenc.ValFloat: - t, v := b.it.At() - b.buf.add(sample{t: t, v: v}) + t, f := b.it.At() + b.buf.addF(fSample{t: t, f: f}) case chunkenc.ValHistogram: t, h := b.it.AtHistogram() - b.buf.add(sample{t: t, h: h}) + b.buf.addH(hSample{t: t, h: h}) case chunkenc.ValFloatHistogram: t, fh := b.it.AtFloatHistogram() - b.buf.add(sample{t: t, fh: fh}) + b.buf.addFH(fhSample{t: t, fh: fh}) default: panic(fmt.Errorf("BufferedSeriesIterator: unknown value type %v", b.valueType)) } @@ -166,56 +164,133 @@ func (b *BufferedSeriesIterator) Err() error { return b.it.Err() } -// TODO(beorn7): Consider having different sample types for different value types. -type sample struct { +type fSample struct { + t int64 + f float64 +} + +func (s fSample) T() int64 { + return s.t +} + +func (s fSample) F() float64 { + return s.f +} + +func (s fSample) H() *histogram.Histogram { + panic("H() called for fSample") +} + +func (s fSample) FH() *histogram.FloatHistogram { + panic("FH() called for fSample") +} + +func (s fSample) Type() chunkenc.ValueType { + return chunkenc.ValFloat +} + +type hSample struct { + t int64 + h *histogram.Histogram +} + +func (s hSample) T() int64 { + return s.t +} + +func (s hSample) F() float64 { + panic("F() called for hSample") +} + +func (s hSample) H() *histogram.Histogram { + return s.h +} + +func (s hSample) FH() *histogram.FloatHistogram { + return s.h.ToFloat() +} + +func (s hSample) Type() chunkenc.ValueType { + return chunkenc.ValHistogram +} + +type fhSample struct { t int64 - v float64 - h *histogram.Histogram fh *histogram.FloatHistogram } -func (s sample) T() int64 { +func (s fhSample) T() int64 { return s.t } -func (s sample) V() float64 { - return s.v +func (s fhSample) F() float64 { + panic("F() called for fhSample") } -func (s sample) H() *histogram.Histogram { - return s.h +func (s fhSample) H() *histogram.Histogram { + panic("H() called for fhSample") } -func (s sample) FH() *histogram.FloatHistogram { +func (s fhSample) FH() *histogram.FloatHistogram { return s.fh } -func (s sample) Type() chunkenc.ValueType { - switch { - case s.h != nil: - return chunkenc.ValHistogram - case s.fh != nil: - return chunkenc.ValFloatHistogram - default: - return chunkenc.ValFloat - } +func (s fhSample) Type() chunkenc.ValueType { + return chunkenc.ValFloatHistogram } type sampleRing struct { delta int64 - buf []sample // lookback buffer - i int // position of most recent element in ring buffer - f int // position of first element in ring buffer - l int // number of elements in buffer + // Lookback buffers. We use iBuf for mixed samples, but one of the three + // concrete ones for homogenous samples. (Only one of the four bufs is + // allowed to be populated!) This avoids the overhead of the interface + // wrapper for the happy (and by far most common) case of homogenous + // samples. + iBuf []tsdbutil.Sample + fBuf []fSample + hBuf []hSample + fhBuf []fhSample + bufInUse bufType + + i int // Position of most recent element in ring buffer. + f int // Position of first element in ring buffer. + l int // Number of elements in buffer. it sampleRingIterator } -func newSampleRing(delta int64, sz int) *sampleRing { - r := &sampleRing{delta: delta, buf: make([]sample, sz)} - r.reset() +type bufType int +const ( + noBuf bufType = iota // Nothing yet stored in sampleRing. + iBuf + fBuf + hBuf + fhBuf +) + +// newSampleRing creates a new sampleRing. If you do not know the prefereed +// value type yet, use a size of 0 (in which case the provided typ doesn't +// matter). On the first add, a buffer of size 16 will be allocated with the +// preferred type being the type of the first added sample. +func newSampleRing(delta int64, size int, typ chunkenc.ValueType) *sampleRing { + r := &sampleRing{delta: delta} + r.reset() + if size <= 0 { + // Will initialize on first add. + return r + } + switch typ { + case chunkenc.ValFloat: + r.fBuf = make([]fSample, size) + case chunkenc.ValHistogram: + r.hBuf = make([]hSample, size) + case chunkenc.ValFloatHistogram: + r.fhBuf = make([]fhSample, size) + default: + r.iBuf = make([]tsdbutil.Sample, size) + } return r } @@ -223,6 +298,7 @@ func (r *sampleRing) reset() { r.l = 0 r.i = -1 r.f = 0 + r.bufInUse = noBuf } // Returns the current iterator. Invalidates previously returned iterators. @@ -236,7 +312,7 @@ type sampleRingIterator struct { r *sampleRing i int t int64 - v float64 + f float64 h *histogram.Histogram fh *histogram.FloatHistogram } @@ -246,17 +322,36 @@ func (it *sampleRingIterator) Next() chunkenc.ValueType { if it.i >= it.r.l { return chunkenc.ValNone } - s := it.r.at(it.i) - it.t = s.t - switch { - case s.h != nil: + switch it.r.bufInUse { + case fBuf: + s := it.r.atF(it.i) + it.t = s.t + it.f = s.f + return chunkenc.ValFloat + case hBuf: + s := it.r.atH(it.i) + it.t = s.t it.h = s.h return chunkenc.ValHistogram - case s.fh != nil: + case fhBuf: + s := it.r.atFH(it.i) + it.t = s.t it.fh = s.fh return chunkenc.ValFloatHistogram + } + s := it.r.at(it.i) + it.t = s.T() + switch s.Type() { + case chunkenc.ValHistogram: + it.h = s.H() + it.fh = nil + return chunkenc.ValHistogram + case chunkenc.ValFloatHistogram: + it.fh = s.FH() + it.h = nil + return chunkenc.ValFloatHistogram default: - it.v = s.v + it.f = s.F() return chunkenc.ValFloat } } @@ -270,7 +365,7 @@ func (it *sampleRingIterator) Err() error { } func (it *sampleRingIterator) At() (int64, float64) { - return it.t, it.v + return it.t, it.f } func (it *sampleRingIterator) AtHistogram() (int64, *histogram.Histogram) { @@ -288,22 +383,321 @@ func (it *sampleRingIterator) AtT() int64 { return it.t } -func (r *sampleRing) at(i int) sample { - j := (r.f + i) % len(r.buf) - return r.buf[j] +func (r *sampleRing) at(i int) tsdbutil.Sample { + j := (r.f + i) % len(r.iBuf) + return r.iBuf[j] +} + +func (r *sampleRing) atF(i int) fSample { + j := (r.f + i) % len(r.fBuf) + return r.fBuf[j] +} + +func (r *sampleRing) atH(i int) hSample { + j := (r.f + i) % len(r.hBuf) + return r.hBuf[j] +} + +func (r *sampleRing) atFH(i int) fhSample { + j := (r.f + i) % len(r.fhBuf) + return r.fhBuf[j] +} + +// add adds a sample to the ring buffer and frees all samples that fall out of +// the delta range. Note that this method works for any sample +// implementation. If you know you are dealing with one of the implementations +// from this package (fSample, hSample, fhSample), call one of the specialized +// methods addF, addH, or addFH for better performance. +func (r *sampleRing) add(s tsdbutil.Sample) { + if r.bufInUse == noBuf { + // First sample. + switch s := s.(type) { + case fSample: + r.bufInUse = fBuf + r.fBuf = addF(s, r.fBuf, r) + case hSample: + r.bufInUse = hBuf + r.hBuf = addH(s, r.hBuf, r) + case fhSample: + r.bufInUse = fhBuf + r.fhBuf = addFH(s, r.fhBuf, r) + } + return + } + if r.bufInUse != iBuf { + // Nothing added to the interface buf yet. Let's check if we can + // stay specialized. + switch s := s.(type) { + case fSample: + if r.bufInUse == fBuf { + r.fBuf = addF(s, r.fBuf, r) + return + } + case hSample: + if r.bufInUse == hBuf { + r.hBuf = addH(s, r.hBuf, r) + return + } + case fhSample: + if r.bufInUse == fhBuf { + r.fhBuf = addFH(s, r.fhBuf, r) + return + } + } + // The new sample isn't a fit for the already existing + // ones. Copy the latter into the interface buffer where needed. + switch r.bufInUse { + case fBuf: + for _, s := range r.fBuf { + r.iBuf = append(r.iBuf, s) + } + r.fBuf = nil + case hBuf: + for _, s := range r.hBuf { + r.iBuf = append(r.iBuf, s) + } + r.hBuf = nil + case fhBuf: + for _, s := range r.fhBuf { + r.iBuf = append(r.iBuf, s) + } + r.fhBuf = nil + } + r.bufInUse = iBuf + } + r.iBuf = addSample(s, r.iBuf, r) +} + +// addF is a version of the add method specialized for fSample. +func (r *sampleRing) addF(s fSample) { + switch r.bufInUse { + case fBuf: // Add to existing fSamples. + r.fBuf = addF(s, r.fBuf, r) + case noBuf: // Add first sample. + r.fBuf = addF(s, r.fBuf, r) + r.bufInUse = fBuf + case iBuf: // Already have interface samples. Add to the interface buf. + r.iBuf = addSample(s, r.iBuf, r) + default: + // Already have specialized samples that are not fSamples. + // Need to call the checked add method for conversion. + r.add(s) + } +} + +// addH is a version of the add method specialized for hSample. +func (r *sampleRing) addH(s hSample) { + switch r.bufInUse { + case hBuf: // Add to existing hSamples. + r.hBuf = addH(s, r.hBuf, r) + case noBuf: // Add first sample. + r.hBuf = addH(s, r.hBuf, r) + r.bufInUse = hBuf + case iBuf: // Already have interface samples. Add to the interface buf. + r.iBuf = addSample(s, r.iBuf, r) + default: + // Already have specialized samples that are not hSamples. + // Need to call the checked add method for conversion. + r.add(s) + } +} + +// addFH is a version of the add method specialized for fhSample. +func (r *sampleRing) addFH(s fhSample) { + switch r.bufInUse { + case fhBuf: // Add to existing fhSamples. + r.fhBuf = addFH(s, r.fhBuf, r) + case noBuf: // Add first sample. + r.fhBuf = addFH(s, r.fhBuf, r) + r.bufInUse = fhBuf + case iBuf: // Already have interface samples. Add to the interface buf. + r.iBuf = addSample(s, r.iBuf, r) + default: + // Already have specialized samples that are not fhSamples. + // Need to call the checked add method for conversion. + r.add(s) + } +} + +// genericAdd is a generic implementation of adding a tsdbutil.Sample +// implementation to a buffer of a sample ring. However, the Go compiler +// currently (go1.20) decides to not expand the code during compile time, but +// creates dynamic code to handle the different types. That has a significant +// overhead during runtime, noticeable in PromQL benchmarks. For example, the +// "RangeQuery/expr=rate(a_hundred[1d]),steps=.*" benchmarks show about 7% +// longer runtime, 9% higher allocation size, and 10% more allocations. +// Therefore, genericAdd has been manually implemented for all the types +// (addSample, addF, addH, addFH) below. +// +// func genericAdd[T tsdbutil.Sample](s T, buf []T, r *sampleRing) []T { +// l := len(buf) +// // Grow the ring buffer if it fits no more elements. +// if l == 0 { +// buf = make([]T, 16) +// l = 16 +// } +// if l == r.l { +// newBuf := make([]T, 2*l) +// copy(newBuf[l+r.f:], buf[r.f:]) +// copy(newBuf, buf[:r.f]) +// +// buf = newBuf +// r.i = r.f +// r.f += l +// l = 2 * l +// } else { +// r.i++ +// if r.i >= l { +// r.i -= l +// } +// } +// +// buf[r.i] = s +// r.l++ +// +// // Free head of the buffer of samples that just fell out of the range. +// tmin := s.T() - r.delta +// for buf[r.f].T() < tmin { +// r.f++ +// if r.f >= l { +// r.f -= l +// } +// r.l-- +// } +// return buf +// } + +// addSample is a handcoded specialization of genericAdd (see above). +func addSample(s tsdbutil.Sample, buf []tsdbutil.Sample, r *sampleRing) []tsdbutil.Sample { + l := len(buf) + // Grow the ring buffer if it fits no more elements. + if l == 0 { + buf = make([]tsdbutil.Sample, 16) + l = 16 + } + if l == r.l { + newBuf := make([]tsdbutil.Sample, 2*l) + copy(newBuf[l+r.f:], buf[r.f:]) + copy(newBuf, buf[:r.f]) + + buf = newBuf + r.i = r.f + r.f += l + l = 2 * l + } else { + r.i++ + if r.i >= l { + r.i -= l + } + } + + buf[r.i] = s + r.l++ + + // Free head of the buffer of samples that just fell out of the range. + tmin := s.T() - r.delta + for buf[r.f].T() < tmin { + r.f++ + if r.f >= l { + r.f -= l + } + r.l-- + } + return buf +} + +// addF is a handcoded specialization of genericAdd (see above). +func addF(s fSample, buf []fSample, r *sampleRing) []fSample { + l := len(buf) + // Grow the ring buffer if it fits no more elements. + if l == 0 { + buf = make([]fSample, 16) + l = 16 + } + if l == r.l { + newBuf := make([]fSample, 2*l) + copy(newBuf[l+r.f:], buf[r.f:]) + copy(newBuf, buf[:r.f]) + + buf = newBuf + r.i = r.f + r.f += l + l = 2 * l + } else { + r.i++ + if r.i >= l { + r.i -= l + } + } + + buf[r.i] = s + r.l++ + + // Free head of the buffer of samples that just fell out of the range. + tmin := s.T() - r.delta + for buf[r.f].T() < tmin { + r.f++ + if r.f >= l { + r.f -= l + } + r.l-- + } + return buf +} + +// addH is a handcoded specialization of genericAdd (see above). +func addH(s hSample, buf []hSample, r *sampleRing) []hSample { + l := len(buf) + // Grow the ring buffer if it fits no more elements. + if l == 0 { + buf = make([]hSample, 16) + l = 16 + } + if l == r.l { + newBuf := make([]hSample, 2*l) + copy(newBuf[l+r.f:], buf[r.f:]) + copy(newBuf, buf[:r.f]) + + buf = newBuf + r.i = r.f + r.f += l + l = 2 * l + } else { + r.i++ + if r.i >= l { + r.i -= l + } + } + + buf[r.i] = s + r.l++ + + // Free head of the buffer of samples that just fell out of the range. + tmin := s.T() - r.delta + for buf[r.f].T() < tmin { + r.f++ + if r.f >= l { + r.f -= l + } + r.l-- + } + return buf } -// add adds a sample to the ring buffer and frees all samples that fall -// out of the delta range. -func (r *sampleRing) add(s sample) { - l := len(r.buf) +// addFH is a handcoded specialization of genericAdd (see above). +func addFH(s fhSample, buf []fhSample, r *sampleRing) []fhSample { + l := len(buf) // Grow the ring buffer if it fits no more elements. + if l == 0 { + buf = make([]fhSample, 16) + l = 16 + } if l == r.l { - buf := make([]sample, 2*l) - copy(buf[l+r.f:], r.buf[r.f:]) - copy(buf, r.buf[:r.f]) + newBuf := make([]fhSample, 2*l) + copy(newBuf[l+r.f:], buf[r.f:]) + copy(newBuf, buf[:r.f]) - r.buf = buf + buf = newBuf r.i = r.f r.f += l l = 2 * l @@ -314,18 +708,19 @@ func (r *sampleRing) add(s sample) { } } - r.buf[r.i] = s + buf[r.i] = s r.l++ // Free head of the buffer of samples that just fell out of the range. - tmin := s.t - r.delta - for r.buf[r.f].t < tmin { + tmin := s.T() - r.delta + for buf[r.f].T() < tmin { r.f++ if r.f >= l { r.f -= l } r.l-- } + return buf } // reduceDelta lowers the buffered time delta, dropping any samples that are @@ -340,39 +735,98 @@ func (r *sampleRing) reduceDelta(delta int64) bool { return true } + switch r.bufInUse { + case fBuf: + genericReduceDelta(r.fBuf, r) + case hBuf: + genericReduceDelta(r.hBuf, r) + case fhBuf: + genericReduceDelta(r.fhBuf, r) + default: + genericReduceDelta(r.iBuf, r) + } + return true +} + +func genericReduceDelta[T tsdbutil.Sample](buf []T, r *sampleRing) { // Free head of the buffer of samples that just fell out of the range. - l := len(r.buf) - tmin := r.buf[r.i].t - delta - for r.buf[r.f].t < tmin { + l := len(buf) + tmin := buf[r.i].T() - r.delta + for buf[r.f].T() < tmin { r.f++ if r.f >= l { r.f -= l } r.l-- } - return true } // nthLast returns the nth most recent element added to the ring. -func (r *sampleRing) nthLast(n int) (sample, bool) { +func (r *sampleRing) nthLast(n int) (tsdbutil.Sample, bool) { if n > r.l { - return sample{}, false + return fSample{}, false + } + i := r.l - n + switch r.bufInUse { + case fBuf: + return r.atF(i), true + case hBuf: + return r.atH(i), true + case fhBuf: + return r.atFH(i), true + default: + return r.at(i), true } - return r.at(r.l - n), true } -func (r *sampleRing) samples() []sample { - res := make([]sample, r.l) +func (r *sampleRing) samples() []tsdbutil.Sample { + res := make([]tsdbutil.Sample, r.l) k := r.f + r.l var j int - if k > len(r.buf) { - k = len(r.buf) - j = r.l - k + r.f - } - n := copy(res, r.buf[r.f:k]) - copy(res[n:], r.buf[:j]) + switch r.bufInUse { + case iBuf: + if k > len(r.iBuf) { + k = len(r.iBuf) + j = r.l - k + r.f + } + n := copy(res, r.iBuf[r.f:k]) + copy(res[n:], r.iBuf[:j]) + case fBuf: + if k > len(r.fBuf) { + k = len(r.fBuf) + j = r.l - k + r.f + } + resF := make([]fSample, r.l) + n := copy(resF, r.fBuf[r.f:k]) + copy(resF[n:], r.fBuf[:j]) + for i, s := range resF { + res[i] = s + } + case hBuf: + if k > len(r.hBuf) { + k = len(r.hBuf) + j = r.l - k + r.f + } + resH := make([]hSample, r.l) + n := copy(resH, r.hBuf[r.f:k]) + copy(resH[n:], r.hBuf[:j]) + for i, s := range resH { + res[i] = s + } + case fhBuf: + if k > len(r.fhBuf) { + k = len(r.fhBuf) + j = r.l - k + r.f + } + resFH := make([]fhSample, r.l) + n := copy(resFH, r.fhBuf[r.f:k]) + copy(resFH[n:], r.fhBuf[:j]) + for i, s := range resFH { + res[i] = s + } + } return res } diff --git a/vendor/github.com/prometheus/prometheus/storage/fanout.go b/vendor/github.com/prometheus/prometheus/storage/fanout.go index 4f995afba..a9db4f628 100644 --- a/vendor/github.com/prometheus/prometheus/storage/fanout.go +++ b/vendor/github.com/prometheus/prometheus/storage/fanout.go @@ -222,9 +222,10 @@ func (f *fanoutAppender) Rollback() (err error) { for _, appender := range f.secondaries { rollbackErr := appender.Rollback() - if err == nil { + switch { + case err == nil: err = rollbackErr - } else if rollbackErr != nil { + case rollbackErr != nil: level.Error(f.logger).Log("msg", "Squashed rollback error on rollback", "err", rollbackErr) } } diff --git a/vendor/github.com/prometheus/prometheus/storage/interface.go b/vendor/github.com/prometheus/prometheus/storage/interface.go index 5cf70a351..b282f1fc6 100644 --- a/vendor/github.com/prometheus/prometheus/storage/interface.go +++ b/vendor/github.com/prometheus/prometheus/storage/interface.go @@ -99,7 +99,7 @@ type MockQueryable struct { MockQuerier Querier } -func (q *MockQueryable) Querier(ctx context.Context, mint, maxt int64) (Querier, error) { +func (q *MockQueryable) Querier(context.Context, int64, int64) (Querier, error) { return q.MockQuerier, nil } @@ -118,11 +118,11 @@ type MockQuerier struct { SelectMockFunction func(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) SeriesSet } -func (q *MockQuerier) LabelValues(name string, matchers ...*labels.Matcher) ([]string, Warnings, error) { +func (q *MockQuerier) LabelValues(string, ...*labels.Matcher) ([]string, Warnings, error) { return nil, nil, nil } -func (q *MockQuerier) LabelNames(matchers ...*labels.Matcher) ([]string, Warnings, error) { +func (q *MockQuerier) LabelNames(...*labels.Matcher) ([]string, Warnings, error) { return nil, nil, nil } diff --git a/vendor/github.com/prometheus/prometheus/storage/merge.go b/vendor/github.com/prometheus/prometheus/storage/merge.go index 8db1f7ae8..c0665d720 100644 --- a/vendor/github.com/prometheus/prometheus/storage/merge.go +++ b/vendor/github.com/prometheus/prometheus/storage/merge.go @@ -197,13 +197,14 @@ func mergeStrings(a, b []string) []string { res := make([]string, 0, maxl*10/9) for len(a) > 0 && len(b) > 0 { - if a[0] == b[0] { + switch { + case a[0] == b[0]: res = append(res, a[0]) a, b = a[1:], b[1:] - } else if a[0] < b[0] { + case a[0] < b[0]: res = append(res, a[0]) a = a[1:] - } else { + default: res = append(res, b[0]) b = b[1:] } @@ -722,12 +723,11 @@ func (c *compactChunkIterator) Next() bool { break } - if next.MinTime == prev.MinTime && - next.MaxTime == prev.MaxTime && - bytes.Equal(next.Chunk.Bytes(), prev.Chunk.Bytes()) { - // 1:1 duplicates, skip it. - } else { - // We operate on same series, so labels does not matter here. + // Only do something if it is not a perfect duplicate. + if next.MinTime != prev.MinTime || + next.MaxTime != prev.MaxTime || + !bytes.Equal(next.Chunk.Bytes(), prev.Chunk.Bytes()) { + // We operate on same series, so labels do not matter here. overlapping = append(overlapping, newChunkToSeriesDecoder(labels.EmptyLabels(), next)) if next.MaxTime > oMaxTime { oMaxTime = next.MaxTime diff --git a/vendor/github.com/prometheus/prometheus/storage/series.go b/vendor/github.com/prometheus/prometheus/storage/series.go index dcb6dd82e..f609df3f0 100644 --- a/vendor/github.com/prometheus/prometheus/storage/series.go +++ b/vendor/github.com/prometheus/prometheus/storage/series.go @@ -109,7 +109,7 @@ func (it *listSeriesIterator) Reset(samples Samples) { func (it *listSeriesIterator) At() (int64, float64) { s := it.samples.Get(it.idx) - return s.T(), s.V() + return s.T(), s.F() } func (it *listSeriesIterator) AtHistogram() (int64, *histogram.Histogram) { @@ -376,10 +376,17 @@ func (e errChunksIterator) Err() error { return e.err } // ExpandSamples iterates over all samples in the iterator, buffering all in slice. // Optionally it takes samples constructor, useful when you want to compare sample slices with different // sample implementations. if nil, sample type from this package will be used. -func ExpandSamples(iter chunkenc.Iterator, newSampleFn func(t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram) tsdbutil.Sample) ([]tsdbutil.Sample, error) { +func ExpandSamples(iter chunkenc.Iterator, newSampleFn func(t int64, f float64, h *histogram.Histogram, fh *histogram.FloatHistogram) tsdbutil.Sample) ([]tsdbutil.Sample, error) { if newSampleFn == nil { - newSampleFn = func(t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram) tsdbutil.Sample { - return sample{t, v, h, fh} + newSampleFn = func(t int64, f float64, h *histogram.Histogram, fh *histogram.FloatHistogram) tsdbutil.Sample { + switch { + case h != nil: + return hSample{t, h} + case fh != nil: + return fhSample{t, fh} + default: + return fSample{t, f} + } } } @@ -389,12 +396,12 @@ func ExpandSamples(iter chunkenc.Iterator, newSampleFn func(t int64, v float64, case chunkenc.ValNone: return result, iter.Err() case chunkenc.ValFloat: - t, v := iter.At() + t, f := iter.At() // NaNs can't be compared normally, so substitute for another value. - if math.IsNaN(v) { - v = -42 + if math.IsNaN(f) { + f = -42 } - result = append(result, newSampleFn(t, v, nil, nil)) + result = append(result, newSampleFn(t, f, nil, nil)) case chunkenc.ValHistogram: t, h := iter.AtHistogram() result = append(result, newSampleFn(t, 0, h, nil)) diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/bstream.go b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/bstream.go index 60531023b..7b17f4686 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/bstream.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/bstream.go @@ -182,7 +182,7 @@ func (b *bstreamReader) readBits(nbits uint8) (uint64, error) { } bitmask = (uint64(1) << nbits) - 1 - v = v | ((b.buffer >> (b.valid - nbits)) & bitmask) + v |= ((b.buffer >> (b.valid - nbits)) & bitmask) b.valid -= nbits return v, nil @@ -242,13 +242,13 @@ func (b *bstreamReader) loadNextBuffer(nbits uint8) bool { if b.streamOffset+nbytes == len(b.stream) { // There can be concurrent writes happening on the very last byte // of the stream, so use the copy we took at initialization time. - buffer = buffer | uint64(b.last) + buffer |= uint64(b.last) // Read up to the byte before skip = 1 } for i := 0; i < nbytes-skip; i++ { - buffer = buffer | (uint64(b.stream[b.streamOffset+i]) << uint(8*(nbytes-i-1))) + buffer |= (uint64(b.stream[b.streamOffset+i]) << uint(8*(nbytes-i-1))) } b.buffer = buffer diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/chunk.go b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/chunk.go index b7d240123..1ebef3eb1 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/chunk.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/chunk.go @@ -47,20 +47,9 @@ func (e Encoding) String() string { return "" } -// Chunk encodings for out-of-order chunks. -// These encodings must be only used by the Head block for its internal bookkeeping. -const ( - OutOfOrderMask = 0b10000000 - EncOOOXOR = EncXOR | OutOfOrderMask -) - -func IsOutOfOrderChunk(e Encoding) bool { - return (e & OutOfOrderMask) != 0 -} - // IsValidEncoding returns true for supported encodings. func IsValidEncoding(e Encoding) bool { - return e == EncXOR || e == EncOOOXOR || e == EncHistogram || e == EncFloatHistogram + return e == EncXOR || e == EncHistogram || e == EncFloatHistogram } // Chunk holds a sequence of sample pairs that can be iterated over and appended to. @@ -107,7 +96,7 @@ type Iterator interface { // timestamp equal or greater than t. If the current sample found by a // previous `Next` or `Seek` operation already has this property, Seek // has no effect. If a sample has been found, Seek returns the type of - // its value. Otherwise, it returns ValNone, after with the iterator is + // its value. Otherwise, it returns ValNone, after which the iterator is // exhausted. Seek(t int64) ValueType // At returns the current timestamp/value pair if the value is a float. @@ -262,7 +251,7 @@ func NewPool() Pool { func (p *pool) Get(e Encoding, b []byte) (Chunk, error) { switch e { - case EncXOR, EncOOOXOR: + case EncXOR: c := p.xor.Get().(*XORChunk) c.b.stream = b c.b.count = 0 @@ -283,7 +272,7 @@ func (p *pool) Get(e Encoding, b []byte) (Chunk, error) { func (p *pool) Put(c Chunk) error { switch c.Encoding() { - case EncXOR, EncOOOXOR: + case EncXOR: xc, ok := c.(*XORChunk) // This may happen often with wrapped chunks. Nothing we can really do about // it but returning an error would cause a lot of allocations again. Thus, @@ -327,7 +316,7 @@ func (p *pool) Put(c Chunk) error { // bytes. func FromData(e Encoding, d []byte) (Chunk, error) { switch e { - case EncXOR, EncOOOXOR: + case EncXOR: return &XORChunk{b: bstream{count: 0, stream: d}}, nil case EncHistogram: return &HistogramChunk{b: bstream{count: 0, stream: d}}, nil diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/float_histogram.go b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/float_histogram.go index b462c6d9f..0349de9ab 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/float_histogram.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/float_histogram.go @@ -107,7 +107,7 @@ func (c *FloatHistogramChunk) Appender() (Appender, error) { // To get an appender, we must know the state it would have if we had // appended all existing data from scratch. We iterate through the end // and populate via the iterator's state. - for it.Next() == ValFloatHistogram { + for it.Next() == ValFloatHistogram { // nolint:revive } if err := it.Err(); err != nil { return nil, err @@ -785,7 +785,7 @@ func (it *floatHistogramIterator) Next() ValueType { it.err = err return ValNone } - it.tDelta = it.tDelta + tDod + it.tDelta += tDod it.t += it.tDelta if ok := it.readXor(&it.cnt.value, &it.cnt.leading, &it.cnt.trailing); !ok { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram.go b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram.go index 7b6a9cacb..f9a63d18f 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram.go @@ -126,7 +126,7 @@ func (c *HistogramChunk) Appender() (Appender, error) { // To get an appender, we must know the state it would have if we had // appended all existing data from scratch. We iterate through the end // and populate via the iterator's state. - for it.Next() == ValHistogram { + for it.Next() == ValHistogram { // nolint:revive } if err := it.Err(); err != nil { return nil, err @@ -875,7 +875,7 @@ func (it *histogramIterator) Next() ValueType { it.err = err return ValNone } - it.tDelta = it.tDelta + tDod + it.tDelta += tDod it.t += it.tDelta cntDod, err := readVarbitInt(&it.br) @@ -883,7 +883,7 @@ func (it *histogramIterator) Next() ValueType { it.err = err return ValNone } - it.cntDelta = it.cntDelta + cntDod + it.cntDelta += cntDod it.cnt = uint64(int64(it.cnt) + it.cntDelta) zcntDod, err := readVarbitInt(&it.br) @@ -891,7 +891,7 @@ func (it *histogramIterator) Next() ValueType { it.err = err return ValNone } - it.zCntDelta = it.zCntDelta + zcntDod + it.zCntDelta += zcntDod it.zCnt = uint64(int64(it.zCnt) + it.zCntDelta) ok := it.readSum() diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/varbit.go b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/varbit.go index b3b14cf41..449f9fbac 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/varbit.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/varbit.go @@ -122,7 +122,7 @@ func readVarbitInt(b *bstreamReader) (int64, error) { } if bits > (1 << (sz - 1)) { // Or something. - bits = bits - (1 << sz) + bits -= (1 << sz) } val = int64(bits) } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/xor.go b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/xor.go index 62e90cbaa..aa6b689a7 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/xor.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/xor.go @@ -99,7 +99,7 @@ func (c *XORChunk) Appender() (Appender, error) { // To get an appender we must know the state it would have if we had // appended all existing data from scratch. // We iterate through the end and populate via the iterator's state. - for it.Next() != ValNone { + for it.Next() != ValNone { // nolint:revive } if err := it.Err(); err != nil { return nil, err @@ -152,26 +152,25 @@ type xorAppender struct { trailing uint8 } -func (a *xorAppender) AppendHistogram(t int64, h *histogram.Histogram) { +func (a *xorAppender) AppendHistogram(int64, *histogram.Histogram) { panic("appended a histogram to an xor chunk") } -func (a *xorAppender) AppendFloatHistogram(t int64, h *histogram.FloatHistogram) { +func (a *xorAppender) AppendFloatHistogram(int64, *histogram.FloatHistogram) { panic("appended a float histogram to an xor chunk") } func (a *xorAppender) Append(t int64, v float64) { var tDelta uint64 num := binary.BigEndian.Uint16(a.b.bytes()) - - if num == 0 { + switch num { + case 0: buf := make([]byte, binary.MaxVarintLen64) for _, b := range buf[:binary.PutVarint(buf, t)] { a.b.writeByte(b) } a.b.writeBits(math.Float64bits(v), 64) - - } else if num == 1 { + case 1: tDelta = uint64(t - a.t) buf := make([]byte, binary.MaxVarintLen64) @@ -181,7 +180,7 @@ func (a *xorAppender) Append(t int64, v float64) { a.writeVDelta(v) - } else { + default: tDelta = uint64(t - a.t) dod := int64(tDelta - a.tDelta) @@ -321,7 +320,7 @@ func (it *xorIterator) Next() ValueType { return ValNone } it.tDelta = tDelta - it.t = it.t + int64(it.tDelta) + it.t += int64(it.tDelta) return it.readValue() } @@ -384,7 +383,7 @@ func (it *xorIterator) Next() ValueType { } it.tDelta = uint64(int64(it.tDelta) + dod) - it.t = it.t + int64(it.tDelta) + it.t += int64(it.tDelta) return it.readValue() } @@ -506,12 +505,3 @@ func xorRead(br *bstreamReader, value *float64, leading, trailing *uint8) error *value = math.Float64frombits(vbits) return nil } - -// OOOXORChunk holds a XORChunk and overrides the Encoding() method. -type OOOXORChunk struct { - *XORChunk -} - -func (c *OOOXORChunk) Encoding() Encoding { - return EncOOOXOR -} diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunks/chunk_write_queue.go b/vendor/github.com/prometheus/prometheus/tsdb/chunks/chunk_write_queue.go index ab34eb06c..6d2dc743b 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/chunks/chunk_write_queue.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunks/chunk_write_queue.go @@ -42,6 +42,7 @@ type chunkWriteJob struct { maxt int64 chk chunkenc.Chunk ref ChunkDiskMapperRef + isOOO bool callback func(error) } @@ -76,7 +77,7 @@ type chunkWriteQueue struct { } // writeChunkF is a function which writes chunks, it is dynamic to allow mocking in tests. -type writeChunkF func(HeadSeriesRef, int64, int64, chunkenc.Chunk, ChunkDiskMapperRef, bool) error +type writeChunkF func(HeadSeriesRef, int64, int64, chunkenc.Chunk, ChunkDiskMapperRef, bool, bool) error func newChunkWriteQueue(reg prometheus.Registerer, size int, writeChunk writeChunkF) *chunkWriteQueue { counters := prometheus.NewCounterVec( @@ -133,7 +134,7 @@ func (c *chunkWriteQueue) start() { } func (c *chunkWriteQueue) processJob(job chunkWriteJob) { - err := c.writeChunk(job.seriesRef, job.mint, job.maxt, job.chk, job.ref, job.cutFile) + err := c.writeChunk(job.seriesRef, job.mint, job.maxt, job.chk, job.ref, job.isOOO, job.cutFile) if job.callback != nil { job.callback(err) } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go b/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go index a0bd735b8..bcdab2125 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go @@ -273,6 +273,26 @@ func NewChunkDiskMapper(reg prometheus.Registerer, dir string, pool chunkenc.Poo return m, m.openMMapFiles() } +// Chunk encodings for out-of-order chunks. +// These encodings must be only used by the Head block for its internal bookkeeping. +const ( + OutOfOrderMask = uint8(0b10000000) +) + +func (cdm *ChunkDiskMapper) ApplyOutOfOrderMask(sourceEncoding chunkenc.Encoding) chunkenc.Encoding { + enc := uint8(sourceEncoding) | OutOfOrderMask + return chunkenc.Encoding(enc) +} + +func (cdm *ChunkDiskMapper) IsOutOfOrderChunk(e chunkenc.Encoding) bool { + return (uint8(e) & OutOfOrderMask) != 0 +} + +func (cdm *ChunkDiskMapper) RemoveMasks(sourceEncoding chunkenc.Encoding) chunkenc.Encoding { + restored := uint8(sourceEncoding) & (^OutOfOrderMask) + return chunkenc.Encoding(restored) +} + // openMMapFiles opens all files within dir for mmapping. func (cdm *ChunkDiskMapper) openMMapFiles() (returnErr error) { cdm.mmappedChunkFiles = map[int]*mmappedChunkFile{} @@ -403,17 +423,17 @@ func repairLastChunkFile(files map[int]string) (_ map[int]string, returnErr erro // WriteChunk writes the chunk to the disk. // The returned chunk ref is the reference from where the chunk encoding starts for the chunk. -func (cdm *ChunkDiskMapper) WriteChunk(seriesRef HeadSeriesRef, mint, maxt int64, chk chunkenc.Chunk, callback func(err error)) (chkRef ChunkDiskMapperRef) { +func (cdm *ChunkDiskMapper) WriteChunk(seriesRef HeadSeriesRef, mint, maxt int64, chk chunkenc.Chunk, isOOO bool, callback func(err error)) (chkRef ChunkDiskMapperRef) { // cdm.evtlPosMtx must be held to serialize the calls to cdm.evtlPos.getNextChunkRef() and the writing of the chunk (either with or without queue). cdm.evtlPosMtx.Lock() defer cdm.evtlPosMtx.Unlock() ref, cutFile := cdm.evtlPos.getNextChunkRef(chk) if cdm.writeQueue != nil { - return cdm.writeChunkViaQueue(ref, cutFile, seriesRef, mint, maxt, chk, callback) + return cdm.writeChunkViaQueue(ref, isOOO, cutFile, seriesRef, mint, maxt, chk, callback) } - err := cdm.writeChunk(seriesRef, mint, maxt, chk, ref, cutFile) + err := cdm.writeChunk(seriesRef, mint, maxt, chk, ref, isOOO, cutFile) if callback != nil { callback(err) } @@ -421,7 +441,7 @@ func (cdm *ChunkDiskMapper) WriteChunk(seriesRef HeadSeriesRef, mint, maxt int64 return ref } -func (cdm *ChunkDiskMapper) writeChunkViaQueue(ref ChunkDiskMapperRef, cutFile bool, seriesRef HeadSeriesRef, mint, maxt int64, chk chunkenc.Chunk, callback func(err error)) (chkRef ChunkDiskMapperRef) { +func (cdm *ChunkDiskMapper) writeChunkViaQueue(ref ChunkDiskMapperRef, isOOO, cutFile bool, seriesRef HeadSeriesRef, mint, maxt int64, chk chunkenc.Chunk, callback func(err error)) (chkRef ChunkDiskMapperRef) { var err error if callback != nil { defer func() { @@ -438,13 +458,14 @@ func (cdm *ChunkDiskMapper) writeChunkViaQueue(ref ChunkDiskMapperRef, cutFile b maxt: maxt, chk: chk, ref: ref, + isOOO: isOOO, callback: callback, }) return ref } -func (cdm *ChunkDiskMapper) writeChunk(seriesRef HeadSeriesRef, mint, maxt int64, chk chunkenc.Chunk, ref ChunkDiskMapperRef, cutFile bool) (err error) { +func (cdm *ChunkDiskMapper) writeChunk(seriesRef HeadSeriesRef, mint, maxt int64, chk chunkenc.Chunk, ref ChunkDiskMapperRef, isOOO, cutFile bool) (err error) { cdm.writePathMtx.Lock() defer cdm.writePathMtx.Unlock() @@ -476,7 +497,11 @@ func (cdm *ChunkDiskMapper) writeChunk(seriesRef HeadSeriesRef, mint, maxt int64 bytesWritten += MintMaxtSize binary.BigEndian.PutUint64(cdm.byteBuf[bytesWritten:], uint64(maxt)) bytesWritten += MintMaxtSize - cdm.byteBuf[bytesWritten] = byte(chk.Encoding()) + enc := chk.Encoding() + if isOOO { + enc = cdm.ApplyOutOfOrderMask(enc) + } + cdm.byteBuf[bytesWritten] = byte(enc) bytesWritten += ChunkEncodingSize n := binary.PutUvarint(cdm.byteBuf[bytesWritten:], uint64(len(chk.Bytes()))) bytesWritten += n @@ -696,7 +721,9 @@ func (cdm *ChunkDiskMapper) Chunk(ref ChunkDiskMapperRef) (chunkenc.Chunk, error // Encoding. chkEnc := mmapFile.byteSlice.Range(chkStart, chkStart+ChunkEncodingSize)[0] - + sourceChkEnc := chunkenc.Encoding(chkEnc) + // Extract the encoding from the byte. ChunkDiskMapper uses only the last 7 bits for the encoding. + chkEnc = byte(cdm.RemoveMasks(sourceChkEnc)) // Data length. // With the minimum chunk length this should never cause us reading // over the end of the slice. @@ -762,7 +789,7 @@ func (cdm *ChunkDiskMapper) Chunk(ref ChunkDiskMapperRef) (chunkenc.Chunk, error // and runs the provided function with information about each chunk. It returns on the first error encountered. // NOTE: This method needs to be called at least once after creating ChunkDiskMapper // to set the maxt of all the file. -func (cdm *ChunkDiskMapper) IterateAllChunks(f func(seriesRef HeadSeriesRef, chunkRef ChunkDiskMapperRef, mint, maxt int64, numSamples uint16, encoding chunkenc.Encoding) error) (err error) { +func (cdm *ChunkDiskMapper) IterateAllChunks(f func(seriesRef HeadSeriesRef, chunkRef ChunkDiskMapperRef, mint, maxt int64, numSamples uint16, encoding chunkenc.Encoding, isOOO bool) error) (err error) { cdm.writePathMtx.Lock() defer cdm.writePathMtx.Unlock() @@ -860,8 +887,10 @@ func (cdm *ChunkDiskMapper) IterateAllChunks(f func(seriesRef HeadSeriesRef, chu if maxt > mmapFile.maxt { mmapFile.maxt = maxt } - - if err := f(seriesRef, chunkRef, mint, maxt, numSamples, chkEnc); err != nil { + isOOO := cdm.IsOutOfOrderChunk(chkEnc) + // Extract the encoding from the byte. ChunkDiskMapper uses only the last 7 bits for the encoding. + chkEnc = cdm.RemoveMasks(chkEnc) + if err := f(seriesRef, chunkRef, mint, maxt, numSamples, chkEnc, isOOO); err != nil { if cerr, ok := err.(*CorruptionErr); ok { cerr.Dir = cdm.dir.Name() cerr.FileIndex = segID @@ -970,9 +999,10 @@ func (cdm *ChunkDiskMapper) DeleteCorrupted(originalErr error) error { cdm.readPathMtx.RLock() lastSeq := 0 for seg := range cdm.mmappedChunkFiles { - if seg >= cerr.FileIndex { + switch { + case seg >= cerr.FileIndex: segs = append(segs, seg) - } else if seg > lastSeq { + case seg > lastSeq: lastSeq = seg } } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/errors/errors.go b/vendor/github.com/prometheus/prometheus/tsdb/errors/errors.go index 607a7782a..aa0a4b1b3 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/errors/errors.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/errors/errors.go @@ -16,6 +16,7 @@ package errors import ( "bytes" + "errors" "fmt" "io" ) @@ -79,6 +80,19 @@ func (es nonNilMultiError) Error() string { return buf.String() } +// Is attempts to match the provided error against errors in the error list. +// +// This function allows errors.Is to traverse the values stored in the MultiError. +// It returns true if any of the errors in the list match the target. +func (es nonNilMultiError) Is(target error) bool { + for _, err := range es.errs { + if errors.Is(err, target) { + return true + } + } + return false +} + // CloseAll closes all given closers while recording error in MultiError. func CloseAll(cs []io.Closer) error { errs := NewMulti() diff --git a/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/chunks.go b/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/chunks.go index f9981ffe1..02a7dd619 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/chunks.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/chunks.go @@ -28,7 +28,7 @@ type Samples interface { type Sample interface { T() int64 - V() float64 + F() float64 H() *histogram.Histogram FH() *histogram.FloatHistogram Type() chunkenc.ValueType @@ -69,7 +69,7 @@ func ChunkFromSamplesGeneric(s Samples) chunks.Meta { for i := 0; i < s.Len(); i++ { switch sampleType { case chunkenc.ValFloat: - ca.Append(s.Get(i).T(), s.Get(i).V()) + ca.Append(s.Get(i).T(), s.Get(i).F()) case chunkenc.ValHistogram: ca.AppendHistogram(s.Get(i).T(), s.Get(i).H()) case chunkenc.ValFloatHistogram: @@ -87,7 +87,7 @@ func ChunkFromSamplesGeneric(s Samples) chunks.Meta { type sample struct { t int64 - v float64 + f float64 h *histogram.Histogram fh *histogram.FloatHistogram } @@ -96,8 +96,8 @@ func (s sample) T() int64 { return s.t } -func (s sample) V() float64 { - return s.v +func (s sample) F() float64 { + return s.f } func (s sample) H() *histogram.Histogram { @@ -123,7 +123,7 @@ func (s sample) Type() chunkenc.ValueType { func PopulatedChunk(numSamples int, minTime int64) chunks.Meta { samples := make([]Sample, numSamples) for i := 0; i < numSamples; i++ { - samples[i] = sample{t: minTime + int64(i*1000), v: 1.0} + samples[i] = sample{t: minTime + int64(i*1000), f: 1.0} } return ChunkFromSamples(samples) } @@ -133,7 +133,7 @@ func GenerateSamples(start, numSamples int) []Sample { return generateSamples(start, numSamples, func(i int) Sample { return sample{ t: int64(i), - v: float64(i), + f: float64(i), } }) } diff --git a/vendor/github.com/prometheus/prometheus/util/testutil/context.go b/vendor/github.com/prometheus/prometheus/util/testutil/context.go index cf730421b..c1f4a831c 100644 --- a/vendor/github.com/prometheus/prometheus/util/testutil/context.go +++ b/vendor/github.com/prometheus/prometheus/util/testutil/context.go @@ -37,6 +37,6 @@ func (c *MockContext) Err() error { } // Value ignores the Value and always returns nil -func (c *MockContext) Value(key interface{}) interface{} { +func (c *MockContext) Value(interface{}) interface{} { return nil } diff --git a/vendor/github.com/prometheus/prometheus/util/testutil/roundtrip.go b/vendor/github.com/prometheus/prometheus/util/testutil/roundtrip.go index a93991a13..364e0c264 100644 --- a/vendor/github.com/prometheus/prometheus/util/testutil/roundtrip.go +++ b/vendor/github.com/prometheus/prometheus/util/testutil/roundtrip.go @@ -22,7 +22,7 @@ type roundTrip struct { theError error } -func (rt *roundTrip) RoundTrip(r *http.Request) (*http.Response, error) { +func (rt *roundTrip) RoundTrip(*http.Request) (*http.Response, error) { return rt.theResponse, rt.theError } diff --git a/vendor/github.com/prometheus/prometheus/util/treecache/treecache.go b/vendor/github.com/prometheus/prometheus/util/treecache/treecache.go index 7dd41dced..bece9d5c8 100644 --- a/vendor/github.com/prometheus/prometheus/util/treecache/treecache.go +++ b/vendor/github.com/prometheus/prometheus/util/treecache/treecache.go @@ -116,7 +116,7 @@ func (tc *ZookeeperTreeCache) Stop() { tc.stop <- struct{}{} go func() { // Drain tc.head.events so that go routines can make progress and exit. - for range tc.head.events { + for range tc.head.events { // nolint:revive } }() go func() { @@ -176,11 +176,11 @@ func (tc *ZookeeperTreeCache) loop(path string) { node = childNode } - err := tc.recursiveNodeUpdate(ev.Path, node) - if err != nil { + switch err := tc.recursiveNodeUpdate(ev.Path, node); { + case err != nil: level.Error(tc.logger).Log("msg", "Error during processing of Zookeeper event", "err", err) failure() - } else if tc.head.data == nil { + case tc.head.data == nil: level.Error(tc.logger).Log("msg", "Error during processing of Zookeeper event", "err", "path no longer exists", "path", tc.prefix) failure() } @@ -214,13 +214,14 @@ func (tc *ZookeeperTreeCache) loop(path string) { func (tc *ZookeeperTreeCache) recursiveNodeUpdate(path string, node *zookeeperTreeCacheNode) error { data, _, dataWatcher, err := tc.conn.GetW(path) - if errors.Is(err, zk.ErrNoNode) { + switch { + case errors.Is(err, zk.ErrNoNode): tc.recursiveDelete(path, node) if node == tc.head { return fmt.Errorf("path %s does not exist", path) } return nil - } else if err != nil { + case err != nil: return err } @@ -230,10 +231,11 @@ func (tc *ZookeeperTreeCache) recursiveNodeUpdate(path string, node *zookeeperTr } children, _, childWatcher, err := tc.conn.ChildrenW(path) - if errors.Is(err, zk.ErrNoNode) { + switch { + case errors.Is(err, zk.ErrNoNode): tc.recursiveDelete(path, node) return nil - } else if err != nil { + case err != nil: return err } diff --git a/vendor/github.com/scaleway/scaleway-sdk-go/api/baremetal/v1/baremetal_sdk.go b/vendor/github.com/scaleway/scaleway-sdk-go/api/baremetal/v1/baremetal_sdk.go index 6b63350f5..968bf5428 100644 --- a/vendor/github.com/scaleway/scaleway-sdk-go/api/baremetal/v1/baremetal_sdk.go +++ b/vendor/github.com/scaleway/scaleway-sdk-go/api/baremetal/v1/baremetal_sdk.go @@ -39,7 +39,8 @@ var ( _ = namegenerator.GetRandomName ) -// API: this API allows to manage your Bare metal server. +// API: this API allows to manage your Elastic Metal server. +// Elastic Metal API. type API struct { client *scw.Client } @@ -557,9 +558,9 @@ type BMCAccess struct { type CPU struct { // Name: name of the CPU. Name string `json:"name"` - // CoreCount: number of cores of the CPU. + // CoreCount: number of CPU cores. CoreCount uint32 `json:"core_count"` - // ThreadCount: number of threads of the CPU. + // ThreadCount: number CPU threads. ThreadCount uint32 `json:"thread_count"` // Frequency: frequency of the CPU in MHz. Frequency uint32 `json:"frequency"` @@ -569,17 +570,17 @@ type CPU struct { // CreateServerRequestInstall: create server request. install. type CreateServerRequestInstall struct { - // OsID: ID of the OS to install on the server. + // OsID: ID of the OS to installation on the server. OsID string `json:"os_id"` // Hostname: hostname of the server. Hostname string `json:"hostname"` // SSHKeyIDs: SSH key IDs authorized on the server. SSHKeyIDs []string `json:"ssh_key_ids"` - // User: user used for the installation. + // User: user for the installation. User *string `json:"user"` - // Password: password used for the installation. + // Password: password for the installation. Password *string `json:"password"` - // ServiceUser: user used for the service to install. + // ServiceUser: regular user that runs the service to be installed on the server. ServiceUser *string `json:"service_user"` // ServicePassword: password used for the service to install. ServicePassword *string `json:"service_password"` @@ -595,7 +596,7 @@ type Disk struct { // GetServerMetricsResponse: get server metrics response. type GetServerMetricsResponse struct { - // Pings: timeseries of ping on the server. + // Pings: timeseries object representing pings on the server. Pings *scw.TimeSeries `json:"pings"` } @@ -613,7 +614,7 @@ type IP struct { // ReverseStatus: status of the reverse. // Default value: unknown ReverseStatus IPReverseStatus `json:"reverse_status"` - // ReverseStatusMessage: a message related to the reverse status, in case of an error for example. + // ReverseStatusMessage: a message related to the reverse status, e.g. in case of an error. ReverseStatusMessage string `json:"reverse_status_message"` } @@ -659,13 +660,13 @@ type ListServerPrivateNetworksResponse struct { type ListServersResponse struct { // TotalCount: total count of matching servers. TotalCount uint32 `json:"total_count"` - // Servers: servers that match filters. + // Servers: array of Elastic Metal server objects matching the filters in the request. Servers []*Server `json:"servers"` } // ListSettingsResponse: list settings response. type ListSettingsResponse struct { - // TotalCount: total count of matching sttings. + // TotalCount: total count of matching settings. TotalCount uint32 `json:"total_count"` // Settings: settings that match filters. Settings []*Setting `json:"settings"` @@ -691,17 +692,17 @@ type OS struct { Name string `json:"name"` // Version: version of the OS. Version string `json:"version"` - // LogoURL: URL of this os's logo. + // LogoURL: URL of this OS's logo. LogoURL string `json:"logo_url"` - // SSH: define the SSH requirements to install the OS. + // SSH: object defining the SSH requirements to install the OS. SSH *OSOSField `json:"ssh"` - // User: define the username requirements to install the OS. + // User: object defining the username requirements to install the OS. User *OSOSField `json:"user"` - // Password: define the password requirements to install the OS. + // Password: object defining the password requirements to install the OS. Password *OSOSField `json:"password"` - // ServiceUser: define the username requirements to install the service. + // ServiceUser: object defining the username requirements to install the service. ServiceUser *OSOSField `json:"service_user"` - // ServicePassword: define the password requirements to install the service. + // ServicePassword: object defining the password requirements to install the service. ServicePassword *OSOSField `json:"service_password"` // Enabled: state of OS. Enabled bool `json:"enabled"` @@ -726,17 +727,17 @@ type Offer struct { // Stock: stock level. // Default value: empty Stock OfferStock `json:"stock"` - // Bandwidth: public Bandwidth available in bits/s with the offer. + // Bandwidth: public bandwidth available (in bits/s) with the offer. Bandwidth uint64 `json:"bandwidth"` // CommercialRange: commercial range of the offer. CommercialRange string `json:"commercial_range"` // PricePerHour: price of the offer for the next 60 minutes (a server order at 11h32 will be payed until 12h32). PricePerHour *scw.Money `json:"price_per_hour"` - // PricePerMonth: price of the offer per months. + // PricePerMonth: monthly price of the offer, if subscribing on a monthly basis. PricePerMonth *scw.Money `json:"price_per_month"` // Disks: disks specifications of the offer. Disks []*Disk `json:"disks"` - // Enable: true if the offer is currently available. + // Enable: defines whether the offer is currently available. Enable bool `json:"enable"` // CPUs: CPU specifications of the offer. CPUs []*CPU `json:"cpus"` @@ -748,20 +749,20 @@ type Offer struct { PersistentMemories []*PersistentMemory `json:"persistent_memories"` // RaidControllers: raid controller specifications of the offer. RaidControllers []*RaidController `json:"raid_controllers"` - // IncompatibleOsIDs: array of incompatible OS ids. + // IncompatibleOsIDs: array of OS images IDs incompatible with the server. IncompatibleOsIDs []string `json:"incompatible_os_ids"` // SubscriptionPeriod: period of subscription for the offer. // Default value: unknown_subscription_period SubscriptionPeriod OfferSubscriptionPeriod `json:"subscription_period"` // OperationPath: operation path of the service. OperationPath string `json:"operation_path"` - // Fee: fee to pay on order. + // Fee: one time fee invoiced by Scaleway for the setup and activation of the server. Fee *scw.Money `json:"fee"` - // Options: options available on offer. + // Options: available options for customization of the server. Options []*OfferOptionOffer `json:"options"` // PrivateBandwidth: private bandwidth available in bits/s with the offer. PrivateBandwidth uint64 `json:"private_bandwidth"` - // SharedBandwidth: the offer is shared or not. + // SharedBandwidth: defines whether the offer's bandwidth is shared or not. SharedBandwidth bool `json:"shared_bandwidth"` // Tags: array of tags attached to the offer. Tags []string `json:"tags"` @@ -773,7 +774,7 @@ type OfferOptionOffer struct { ID string `json:"id"` // Name: name of the option. Name string `json:"name"` - // Enabled: if true the option is enabled and included by default in the offer + // Enabled: if true the option is enabled and included by default in the offer. // If false the option is available for the offer but not included by default. Enabled bool `json:"enabled"` // SubscriptionPeriod: period of subscription for the offer. @@ -793,7 +794,7 @@ type Option struct { ID string `json:"id"` // Name: name of the option. Name string `json:"name"` - // Manageable: is false if the option could not be added or removed. + // Manageable: defines whether the option is manageable (could be added or removed). Manageable bool `json:"manageable"` } @@ -825,9 +826,9 @@ type Server struct { Name string `json:"name"` // Description: description of the server. Description string `json:"description"` - // UpdatedAt: date of last modification of the server. + // UpdatedAt: last modification date of the server. UpdatedAt *time.Time `json:"updated_at"` - // CreatedAt: date of creation of the server. + // CreatedAt: creation date of the server. CreatedAt *time.Time `json:"created_at"` // Status: status of the server. // Default value: unknown @@ -836,7 +837,7 @@ type Server struct { OfferID string `json:"offer_id"` // OfferName: offer name of the server. OfferName string `json:"offer_name"` - // Tags: array of customs tags attached to the server. + // Tags: array of custom tags attached to the server. Tags []string `json:"tags"` // IPs: array of IPs attached to the server. IPs []*IP `json:"ips"` @@ -845,14 +846,14 @@ type Server struct { // BootType: boot type of the server. // Default value: unknown_boot_type BootType ServerBootType `json:"boot_type"` - // Zone: the zone in which is the server. + // Zone: zone in which is the server located. Zone scw.Zone `json:"zone"` - // Install: configuration of installation. + // Install: configuration of the installation. Install *ServerInstall `json:"install"` - // PingStatus: server status of ping. + // PingStatus: status of server ping. // Default value: ping_status_unknown PingStatus ServerPingStatus `json:"ping_status"` - // Options: options enabled on server. + // Options: options enabled on the server. Options []*ServerOption `json:"options"` // RescueServer: configuration of rescue boot. RescueServer *ServerRescueServer `json:"rescue_server"` @@ -860,7 +861,7 @@ type Server struct { // ServerEvent: server event. type ServerEvent struct { - // ID: ID of the server for whom the action will be applied. + // ID: ID of the server to which the action will be applied. ID string `json:"id"` // Action: the action that will be applied to the server. Action string `json:"action"` @@ -874,18 +875,18 @@ type ServerEvent struct { type ServerInstall struct { // OsID: ID of the OS. OsID string `json:"os_id"` - // Hostname: host defined in the server install. + // Hostname: host defined during the server installation. Hostname string `json:"hostname"` - // SSHKeyIDs: SSH public key IDs defined in the server install. + // SSHKeyIDs: SSH public key IDs defined during server installation. SSHKeyIDs []string `json:"ssh_key_ids"` - // Status: status of the server install. + // Status: status of the server installation. // Default value: unknown Status ServerInstallStatus `json:"status"` - // User: user defined in the server install or the default one if none were specified. + // User: user defined in the server installation, or the default user if none were specified. User string `json:"user"` - // ServiceUser: service user defined in the server install or the default one if none were specified. + // ServiceUser: service user defined in the server installation, or the default user if none were specified. ServiceUser string `json:"service_user"` - // ServiceURL: the address of the installed service. + // ServiceURL: address of the installed service. ServiceURL string `json:"service_url"` } @@ -895,10 +896,10 @@ type ServerOption struct { ID string `json:"id"` // Name: name of the option. Name string `json:"name"` - // Status: status of the option. + // Status: status of the option on this server. // Default value: option_status_unknown Status ServerOptionOptionStatus `json:"status"` - // Manageable: is false if the option could not be added or removed. + // Manageable: defines whether the option can be managed (added or removed). Manageable bool `json:"manageable"` // ExpiresAt: auto expiration date for compatible options. ExpiresAt *time.Time `json:"expires_at"` @@ -906,22 +907,22 @@ type ServerOption struct { // ServerPrivateNetwork: server private network. type ServerPrivateNetwork struct { - // ID: the private network ID. + // ID: the Private Network ID. ID string `json:"id"` - // ProjectID: the private network project ID. + // ProjectID: the Private Network Project ID. ProjectID string `json:"project_id"` // ServerID: the server ID. ServerID string `json:"server_id"` - // PrivateNetworkID: the private network ID. + // PrivateNetworkID: the Private Network ID. PrivateNetworkID string `json:"private_network_id"` - // Vlan: the VLAN ID associated to the private network. + // Vlan: the VLAN ID associated to the Private Network. Vlan *uint32 `json:"vlan"` - // Status: the configuration status of the private network. + // Status: the configuration status of the Private Network. // Default value: unknown Status ServerPrivateNetworkStatus `json:"status"` - // CreatedAt: the private network creation date. + // CreatedAt: the Private Network creation date. CreatedAt *time.Time `json:"created_at"` - // UpdatedAt: the date the private network was last modified. + // UpdatedAt: the date the Private Network was last modified. UpdatedAt *time.Time `json:"updated_at"` } @@ -946,7 +947,7 @@ type Setting struct { Type SettingType `json:"type"` // ProjectID: ID of the project ID. ProjectID string `json:"project_id"` - // Enabled: the setting is enable or disable. + // Enabled: defines whether the setting is enabled. Enabled bool `json:"enabled"` } @@ -962,26 +963,27 @@ type ListServersRequest struct { Zone scw.Zone `json:"-"` // Page: page number. Page *int32 `json:"-"` - // PageSize: number of server per page. + // PageSize: number of servers per page. PageSize *uint32 `json:"-"` // OrderBy: order of the servers. // Default value: created_at_asc OrderBy ListServersRequestOrderBy `json:"-"` - // Tags: filter by tags. + // Tags: tags to filter for. Tags []string `json:"-"` - // Status: filter by status. + // Status: status to filter for. Status []string `json:"-"` - // Name: filter by name. + // Name: names to filter for. Name *string `json:"-"` - // OrganizationID: filter by organization ID. + // OrganizationID: organization ID to filter for. OrganizationID *string `json:"-"` - // ProjectID: filter by project ID. + // ProjectID: project ID to filter for. ProjectID *string `json:"-"` - // OptionID: filter by option ID. + // OptionID: option ID to filter for. OptionID *string `json:"-"` } -// ListServers: list elastic metal servers for organization. +// ListServers: list Elastic Metal servers for an organization. +// List Elastic Metal servers for a specific organization. func (s *API) ListServers(req *ListServersRequest, opts ...scw.RequestOption) (*ListServersResponse, error) { var err error @@ -1033,7 +1035,8 @@ type GetServerRequest struct { ServerID string `json:"-"` } -// GetServer: get the server associated with the given ID. +// GetServer: get a specific Elastic Metal server. +// Get full details of an existing Elastic Metal server associated with the ID. func (s *API) GetServer(req *GetServerRequest, opts ...scw.RequestOption) (*Server, error) { var err error @@ -1078,17 +1081,18 @@ type CreateServerRequest struct { ProjectID *string `json:"project_id,omitempty"` // Name: name of the server (≠hostname). Name string `json:"name"` - // Description: description associated to the server, max 255 characters. + // Description: description associated with the server, max 255 characters. Description string `json:"description"` // Tags: tags to associate to the server. Tags []string `json:"tags"` - // Install: configuration of installation. + // Install: object describing the configuration details of the OS installation on the server. Install *CreateServerRequestInstall `json:"install"` // OptionIDs: iDs of options to enable on server. OptionIDs []string `json:"option_ids"` } -// CreateServer: create a new elastic metal server. Once the server is created, you probably want to install an OS. +// CreateServer: create an Elastic Metal server. +// Create a new Elastic Metal server. Once the server is created, proceed with the [installation of an OS](#post-3e949e). func (s *API) CreateServer(req *CreateServerRequest, opts ...scw.RequestOption) (*Server, error) { var err error @@ -1138,13 +1142,14 @@ type UpdateServerRequest struct { ServerID string `json:"-"` // Name: name of the server (≠hostname), not updated if null. Name *string `json:"name"` - // Description: description associated to the server, max 255 characters, not updated if null. + // Description: description associated with the server, max 255 characters, not updated if null. Description *string `json:"description"` - // Tags: tags associated to the server, not updated if null. + // Tags: tags associated with the server, not updated if null. Tags *[]string `json:"tags"` } -// UpdateServer: update the server associated with the given ID. +// UpdateServer: update an Elastic Metal server. +// Update the server associated with the ID. You can update parameters such as the server's name, tags and description. Any parameters left null in the request body are not updated. func (s *API) UpdateServer(req *UpdateServerRequest, opts ...scw.RequestOption) (*Server, error) { var err error @@ -1186,7 +1191,7 @@ type InstallServerRequest struct { Zone scw.Zone `json:"-"` // ServerID: server ID to install. ServerID string `json:"-"` - // OsID: ID of the OS to install on the server. + // OsID: ID of the OS to installation on the server. OsID string `json:"os_id"` // Hostname: hostname of the server. Hostname string `json:"hostname"` @@ -1202,7 +1207,8 @@ type InstallServerRequest struct { ServicePassword *string `json:"service_password"` } -// InstallServer: install an OS on the server associated with the given ID. +// InstallServer: install an Elastic Metal server. +// Install an Operating System (OS) on the Elastic Metal server with a specific ID. func (s *API) InstallServer(req *InstallServerRequest, opts ...scw.RequestOption) (*Server, error) { var err error @@ -1246,7 +1252,8 @@ type GetServerMetricsRequest struct { ServerID string `json:"-"` } -// GetServerMetrics: give the ping status on the server associated with the given ID. +// GetServerMetrics: return server metrics. +// Get the ping status of the server associated with the ID. func (s *API) GetServerMetrics(req *GetServerMetricsRequest, opts ...scw.RequestOption) (*GetServerMetricsResponse, error) { var err error @@ -1285,7 +1292,8 @@ type DeleteServerRequest struct { ServerID string `json:"-"` } -// DeleteServer: delete the server associated with the given ID. +// DeleteServer: delete an Elastic Metal server. +// Delete the server associated with the ID. func (s *API) DeleteServer(req *DeleteServerRequest, opts ...scw.RequestOption) (*Server, error) { var err error @@ -1327,7 +1335,8 @@ type RebootServerRequest struct { BootType ServerBootType `json:"boot_type"` } -// RebootServer: reboot the server associated with the given ID, use boot param to reboot in rescue. +// RebootServer: reboot an Elastic Metal server. +// Reboot the Elastic Metal server associated with the ID, use the `boot_type` `rescue` to reboot the server in rescue mode. func (s *API) RebootServer(req *RebootServerRequest, opts ...scw.RequestOption) (*Server, error) { var err error @@ -1374,7 +1383,8 @@ type StartServerRequest struct { BootType ServerBootType `json:"boot_type"` } -// StartServer: start the server associated with the given ID. +// StartServer: start an Elastic Metal server. +// Start the server associated with the ID. func (s *API) StartServer(req *StartServerRequest, opts ...scw.RequestOption) (*Server, error) { var err error @@ -1418,7 +1428,8 @@ type StopServerRequest struct { ServerID string `json:"-"` } -// StopServer: stop the server associated with the given ID. +// StopServer: stop an Elastic Metal server. +// Stop the server associated with the ID. The server remains allocated to your account and all data remains on the local storage of the server. func (s *API) StopServer(req *StopServerRequest, opts ...scw.RequestOption) (*Server, error) { var err error @@ -1469,7 +1480,8 @@ type ListServerEventsRequest struct { OrderBy ListServerEventsRequestOrderBy `json:"-"` } -// ListServerEvents: list events associated to the given server ID. +// ListServerEvents: list server events. +// List event (i.e. start/stop/reboot) associated to the server ID. func (s *API) ListServerEvents(req *ListServerEventsRequest, opts ...scw.RequestOption) (*ListServerEventsResponse, error) { var err error @@ -1517,14 +1529,15 @@ type StartBMCAccessRequest struct { Zone scw.Zone `json:"-"` // ServerID: ID of the server. ServerID string `json:"-"` - // IP: the IP authorized to connect to the given server. + // IP: the IP authorized to connect to the server. IP net.IP `json:"ip"` } -// StartBMCAccess: start BMC (Baseboard Management Controller) access associated with the given ID. +// StartBMCAccess: start BMC access. +// Start BMC (Baseboard Management Controller) access associated with the ID. // The BMC (Baseboard Management Controller) access is available one hour after the installation of the server. -// You need first to create an option Remote Access. You will find the ID and the price with a call to listOffers (https://developers.scaleway.com/en/products/baremetal/api/#get-78db92). Then you can add the option https://developers.scaleway.com/en/products/baremetal/api/#post-b14abd. Do not forget to delete the Option. -// After start BMC, you need to Get Remote Access to get the login/password https://developers.scaleway.com/en/products/baremetal/api/#get-cefc0f. +// You need first to create an option Remote Access. You will find the ID and the price with a call to listOffers (https://developers.scaleway.com/en/products/baremetal/api/#get-78db92). Then add the option https://developers.scaleway.com/en/products/baremetal/api/#post-b14abd. +// After adding the BMC option, you need to Get Remote Access to get the login/password https://developers.scaleway.com/en/products/baremetal/api/#get-cefc0f. Do not forget to delete the Option after use. func (s *API) StartBMCAccess(req *StartBMCAccessRequest, opts ...scw.RequestOption) (*BMCAccess, error) { var err error @@ -1568,7 +1581,8 @@ type GetBMCAccessRequest struct { ServerID string `json:"-"` } -// GetBMCAccess: get the BMC (Baseboard Management Controller) access associated with the given ID. +// GetBMCAccess: get BMC access. +// Get the BMC (Baseboard Management Controller) access associated with the ID, including the URL and login information needed to connect. func (s *API) GetBMCAccess(req *GetBMCAccessRequest, opts ...scw.RequestOption) (*BMCAccess, error) { var err error @@ -1607,7 +1621,8 @@ type StopBMCAccessRequest struct { ServerID string `json:"-"` } -// StopBMCAccess: stop BMC (Baseboard Management Controller) access associated with the given ID. +// StopBMCAccess: stop BMC access. +// Stop BMC (Baseboard Management Controller) access associated with the ID. func (s *API) StopBMCAccess(req *StopBMCAccessRequest, opts ...scw.RequestOption) error { var err error @@ -1648,7 +1663,8 @@ type UpdateIPRequest struct { Reverse *string `json:"reverse"` } -// UpdateIP: configure ip associated with the given server ID and ipID. You can use this method to set a reverse dns for an IP. +// UpdateIP: update IP. +// Configure the IP address associated with the server ID and IP ID. You can use this method to set a reverse DNS for an IP address. func (s *API) UpdateIP(req *UpdateIPRequest, opts ...scw.RequestOption) (*IP, error) { var err error @@ -1700,7 +1716,8 @@ type AddOptionServerRequest struct { ExpiresAt *time.Time `json:"expires_at"` } -// AddOptionServer: add an option to a specific server. +// AddOptionServer: add server option. +// Add an option, such as Private Networks, to a specific server. func (s *API) AddOptionServer(req *AddOptionServerRequest, opts ...scw.RequestOption) (*Server, error) { var err error @@ -1750,7 +1767,8 @@ type DeleteOptionServerRequest struct { OptionID string `json:"-"` } -// DeleteOptionServer: delete an option from a specific server. +// DeleteOptionServer: delete server option. +// Delete an option from a specific server. func (s *API) DeleteOptionServer(req *DeleteOptionServerRequest, opts ...scw.RequestOption) (*Server, error) { var err error @@ -1793,12 +1811,13 @@ type ListOffersRequest struct { Page *int32 `json:"-"` // PageSize: number of offers per page. PageSize *uint32 `json:"-"` - // SubscriptionPeriod: period of subscription to filter offers. + // SubscriptionPeriod: subscription period type to filter offers by. // Default value: unknown_subscription_period SubscriptionPeriod OfferSubscriptionPeriod `json:"-"` } -// ListOffers: list all available server offers. +// ListOffers: list offers. +// List all available Elastic Metal server configurations. func (s *API) ListOffers(req *ListOffersRequest, opts ...scw.RequestOption) (*ListOffersResponse, error) { var err error @@ -1844,7 +1863,8 @@ type GetOfferRequest struct { OfferID string `json:"-"` } -// GetOffer: return specific offer for the given ID. +// GetOffer: get offer. +// Get details of an offer identified by its offer ID. func (s *API) GetOffer(req *GetOfferRequest, opts ...scw.RequestOption) (*Offer, error) { var err error @@ -1883,7 +1903,8 @@ type GetOptionRequest struct { OptionID string `json:"-"` } -// GetOption: return specific option for the given ID. +// GetOption: get option. +// Return specific option for the ID. func (s *API) GetOption(req *GetOptionRequest, opts ...scw.RequestOption) (*Option, error) { var err error @@ -1922,13 +1943,14 @@ type ListOptionsRequest struct { Page *int32 `json:"-"` // PageSize: number of options per page. PageSize *uint32 `json:"-"` - // OfferID: filter options by offer_id. + // OfferID: offer ID to filter options for. OfferID *string `json:"-"` - // Name: filter options by name. + // Name: name to filter options for. Name *string `json:"-"` } -// ListOptions: list all options matching with filters. +// ListOptions: list options. +// List all options matching with filters. func (s *API) ListOptions(req *ListOptionsRequest, opts ...scw.RequestOption) (*ListOptionsResponse, error) { var err error @@ -1975,14 +1997,15 @@ type ListSettingsRequest struct { Page *int32 `json:"-"` // PageSize: set the maximum list size. PageSize *uint32 `json:"-"` - // OrderBy: order the response. + // OrderBy: sort order for items in the response. // Default value: created_at_asc OrderBy ListSettingsRequestOrderBy `json:"-"` // ProjectID: ID of the project. ProjectID *string `json:"-"` } -// ListSettings: return all settings for a project ID. +// ListSettings: list all settings. +// Return all settings for a project ID. func (s *API) ListSettings(req *ListSettingsRequest, opts ...scw.RequestOption) (*ListSettingsResponse, error) { var err error @@ -2027,11 +2050,12 @@ type UpdateSettingRequest struct { Zone scw.Zone `json:"-"` // SettingID: ID of the setting. SettingID string `json:"-"` - // Enabled: enable/Disable the setting. + // Enabled: defines whether the setting is enabled. Enabled *bool `json:"enabled"` } -// UpdateSetting: update a setting for a project ID (enable or disable). +// UpdateSetting: update setting. +// Update a setting for a project ID (enable or disable). func (s *API) UpdateSetting(req *UpdateSettingRequest, opts ...scw.RequestOption) (*Setting, error) { var err error @@ -2075,11 +2099,12 @@ type ListOSRequest struct { Page *int32 `json:"-"` // PageSize: number of OS per page. PageSize *uint32 `json:"-"` - // OfferID: filter OS by offer ID. + // OfferID: offer IDs to filter OSes for. OfferID *string `json:"-"` } -// ListOS: list all available OS that can be install on an elastic metal server. +// ListOS: list available OSes. +// List all OSes that are available for installation on Elastic Metal servers. func (s *API) ListOS(req *ListOSRequest, opts ...scw.RequestOption) (*ListOSResponse, error) { var err error @@ -2125,7 +2150,8 @@ type GetOSRequest struct { OsID string `json:"-"` } -// GetOS: return specific OS for the given ID. +// GetOS: get an OS with an ID. +// Return specific OS for the ID. func (s *API) GetOS(req *GetOSRequest, opts ...scw.RequestOption) (*OS, error) { var err error @@ -2169,11 +2195,11 @@ type PrivateNetworkAPIAddServerPrivateNetworkRequest struct { Zone scw.Zone `json:"-"` // ServerID: the ID of the server. ServerID string `json:"-"` - // PrivateNetworkID: the ID of the private network. + // PrivateNetworkID: the ID of the Private Network. PrivateNetworkID string `json:"private_network_id"` } -// AddServerPrivateNetwork: add a server to a private network. +// AddServerPrivateNetwork: add a server to a Private Network. func (s *PrivateNetworkAPI) AddServerPrivateNetwork(req *PrivateNetworkAPIAddServerPrivateNetworkRequest, opts ...scw.RequestOption) (*ServerPrivateNetwork, error) { var err error @@ -2215,11 +2241,11 @@ type PrivateNetworkAPISetServerPrivateNetworksRequest struct { Zone scw.Zone `json:"-"` // ServerID: the ID of the server. ServerID string `json:"-"` - // PrivateNetworkIDs: the IDs of the private networks. + // PrivateNetworkIDs: the IDs of the Private Networks. PrivateNetworkIDs []string `json:"private_network_ids"` } -// SetServerPrivateNetworks: set multiple private networks on a server. +// SetServerPrivateNetworks: set multiple Private Networks on a server. func (s *PrivateNetworkAPI) SetServerPrivateNetworks(req *PrivateNetworkAPISetServerPrivateNetworksRequest, opts ...scw.RequestOption) (*SetServerPrivateNetworksResponse, error) { var err error @@ -2259,24 +2285,24 @@ func (s *PrivateNetworkAPI) SetServerPrivateNetworks(req *PrivateNetworkAPISetSe type PrivateNetworkAPIListServerPrivateNetworksRequest struct { // Zone: zone to target. If none is passed will use default zone from the config. Zone scw.Zone `json:"-"` - // OrderBy: the sort order for the returned private networks. + // OrderBy: the sort order for the returned Private Networks. // Default value: created_at_asc OrderBy ListServerPrivateNetworksRequestOrderBy `json:"-"` - // Page: the page number for the returned private networks. + // Page: the page number for the returned Private Networks. Page *int32 `json:"-"` - // PageSize: the maximum number of private networks per page. + // PageSize: the maximum number of Private Networks per page. PageSize *uint32 `json:"-"` - // ServerID: filter private networks by server ID. + // ServerID: filter Private Networks by server ID. ServerID *string `json:"-"` - // PrivateNetworkID: filter private networks by private network ID. + // PrivateNetworkID: filter Private Networks by Private Network ID. PrivateNetworkID *string `json:"-"` - // OrganizationID: filter private networks by organization ID. + // OrganizationID: filter Private Networks by Organization ID. OrganizationID *string `json:"-"` - // ProjectID: filter private networks by project ID. + // ProjectID: filter Private Networks by Project ID. ProjectID *string `json:"-"` } -// ListServerPrivateNetworks: list the private networks of a server. +// ListServerPrivateNetworks: list the Private Networks of a server. func (s *PrivateNetworkAPI) ListServerPrivateNetworks(req *PrivateNetworkAPIListServerPrivateNetworksRequest, opts ...scw.RequestOption) (*ListServerPrivateNetworksResponse, error) { var err error @@ -2324,11 +2350,11 @@ type PrivateNetworkAPIDeleteServerPrivateNetworkRequest struct { Zone scw.Zone `json:"-"` // ServerID: the ID of the server. ServerID string `json:"-"` - // PrivateNetworkID: the ID of the private network. + // PrivateNetworkID: the ID of the Private Network. PrivateNetworkID string `json:"-"` } -// DeleteServerPrivateNetwork: delete a private network. +// DeleteServerPrivateNetwork: delete a Private Network. func (s *PrivateNetworkAPI) DeleteServerPrivateNetwork(req *PrivateNetworkAPIDeleteServerPrivateNetworkRequest, opts ...scw.RequestOption) error { var err error diff --git a/vendor/github.com/scaleway/scaleway-sdk-go/api/instance/v1/instance_sdk.go b/vendor/github.com/scaleway/scaleway-sdk-go/api/instance/v1/instance_sdk.go index 462627409..0f4ddab9c 100644 --- a/vendor/github.com/scaleway/scaleway-sdk-go/api/instance/v1/instance_sdk.go +++ b/vendor/github.com/scaleway/scaleway-sdk-go/api/instance/v1/instance_sdk.go @@ -896,8 +896,7 @@ type GetServerTypesAvailabilityResponse struct { } type GetServerTypesAvailabilityResponseAvailability struct { - // Availability: - // Default value: available + // Availability: default value: available Availability ServerTypesAvailability `json:"availability"` } @@ -931,8 +930,7 @@ type Image struct { ID string `json:"id"` Name string `json:"name"` - // Arch: - // Default value: x86_64 + // Arch: default value: x86_64 Arch Arch `json:"arch"` CreationDate *time.Time `json:"creation_date"` @@ -950,8 +948,7 @@ type Image struct { Public bool `json:"public"` RootVolume *VolumeSummary `json:"root_volume"` - // State: - // Default value: available + // State: default value: available State ImageState `json:"state"` Project string `json:"project"` @@ -1161,14 +1158,11 @@ type SecurityGroup struct { type SecurityGroupRule struct { ID string `json:"id"` - // Protocol: - // Default value: TCP + // Protocol: default value: TCP Protocol SecurityGroupRuleProtocol `json:"protocol"` - // Direction: - // Default value: inbound + // Direction: default value: inbound Direction SecurityGroupRuleDirection `json:"direction"` - // Action: - // Default value: accept + // Action: default value: accept Action SecurityGroupRuleAction `json:"action"` IPRange scw.IPNet `json:"ip_range"` @@ -1263,7 +1257,8 @@ type Server struct { // ServerActionRequestVolumeBackupTemplate: server action request. volume backup template. type ServerActionRequestVolumeBackupTemplate struct { - // VolumeType: overrides the volume_type of the snapshot for this volume. + // VolumeType: the snapshot's volume type. + // Overrides the volume_type of the snapshot for this volume. // If omitted, the volume type of the original volume will be used. // Default value: unknown_volume_type VolumeType SnapshotVolumeType `json:"volume_type,omitempty"` @@ -1552,15 +1547,13 @@ type VolumeServer struct { Server *ServerSummary `json:"server"` Size scw.Size `json:"size"` - // VolumeType: - // Default value: l_ssd + // VolumeType: default value: l_ssd VolumeType VolumeServerVolumeType `json:"volume_type"` CreationDate *time.Time `json:"creation_date"` ModificationDate *time.Time `json:"modification_date"` - // State: - // Default value: available + // State: default value: available State VolumeServerState `json:"state"` Project string `json:"project"` @@ -1598,8 +1591,7 @@ type VolumeSummary struct { Name string `json:"name"` Size scw.Size `json:"size"` - // VolumeType: - // Default value: l_ssd + // VolumeType: default value: l_ssd VolumeType VolumeVolumeType `json:"volume_type"` } @@ -1681,7 +1673,8 @@ type GetServerTypesAvailabilityRequest struct { Page *int32 `json:"-"` } -// GetServerTypesAvailability: get availability for all server types. +// GetServerTypesAvailability: get availability. +// Get availability for all server types. func (s *API) GetServerTypesAvailability(req *GetServerTypesAvailabilityRequest, opts ...scw.RequestOption) (*GetServerTypesAvailabilityResponse, error) { var err error @@ -1728,7 +1721,8 @@ type ListServersTypesRequest struct { Page *int32 `json:"-"` } -// ListServersTypes: get server types technical details. +// ListServersTypes: list server types. +// Get server types technical details. func (s *API) ListServersTypes(req *ListServersTypesRequest, opts ...scw.RequestOption) (*ListServersTypesResponse, error) { var err error @@ -1775,7 +1769,8 @@ type ListVolumesTypesRequest struct { Page *int32 `json:"-"` } -// ListVolumesTypes: get volumes technical details. +// ListVolumesTypes: list volumes types. +// Get volumes technical details. func (s *API) ListVolumesTypes(req *ListVolumesTypesRequest, opts ...scw.RequestOption) (*ListVolumesTypesResponse, error) { var err error @@ -1931,7 +1926,8 @@ type CreateServerRequest struct { PlacementGroup *string `json:"placement_group,omitempty"` } -// createServer: the `volumes` key is a dictionary composed of the volume position as key and the volume parameters as value. +// createServer: create a server. +// The `volumes` key is a dictionary composed of the volume position as key and the volume parameters as value. // Depending of the volume parameters, you can achieve different behaviours : // // Create a volume from a snapshot of an image : @@ -2002,7 +1998,8 @@ type DeleteServerRequest struct { ServerID string `json:"-"` } -// DeleteServer: delete a server with the given ID. +// DeleteServer: delete a server. +// Delete a server with the given ID. func (s *API) DeleteServer(req *DeleteServerRequest, opts ...scw.RequestOption) error { var err error @@ -2039,7 +2036,8 @@ type GetServerRequest struct { ServerID string `json:"-"` } -// GetServer: get the details of a specified Server. +// GetServer: get a server. +// Get the details of a specified Server. func (s *API) GetServer(req *GetServerRequest, opts ...scw.RequestOption) (*GetServerResponse, error) { var err error @@ -2188,8 +2186,7 @@ type UpdateServerRequest struct { ServerID string `json:"-"` // Name: name of the server. Name *string `json:"name,omitempty"` - // BootType: - // Default value: local + // BootType: default value: local BootType *BootType `json:"boot_type,omitempty"` // Tags: tags of the server. Tags *[]string `json:"tags,omitempty"` @@ -2255,7 +2252,8 @@ type ListServerActionsRequest struct { ServerID string `json:"-"` } -// ListServerActions: list all actions that can currently be performed on a server. +// ListServerActions: list server actions. +// List all actions that can currently be performed on a server. func (s *API) ListServerActions(req *ListServerActionsRequest, opts ...scw.RequestOption) (*ListServerActionsResponse, error) { var err error @@ -2296,14 +2294,17 @@ type ServerActionRequest struct { // Default value: poweron Action ServerAction `json:"action"` // Name: the name of the backup you want to create. + // The name of the backup you want to create. // This field should only be specified when performing a backup action. Name *string `json:"name,omitempty"` // Volumes: for each volume UUID, the snapshot parameters of the volume. + // For each volume UUID, the snapshot parameters of the volume. // This field should only be specified when performing a backup action. Volumes map[string]*ServerActionRequestVolumeBackupTemplate `json:"volumes,omitempty"` } -// ServerAction: perform power related actions on a server. Be wary that when terminating a server, all the attached volumes (local *and* block storage) are deleted. So, if you want to keep your local volumes, you must use the `archive` action instead of `terminate`. And if you want to keep block-storage volumes, **you must** detach it beforehand you issue the `terminate` call. For more information, read the [Volumes](#volumes-7e8a39) documentation. +// ServerAction: perform action. +// Perform power related actions on a server. Be wary that when terminating a server, all the attached volumes (local *and* block storage) are deleted. So, if you want to keep your local volumes, you must use the `archive` action instead of `terminate`. And if you want to keep block-storage volumes, **you must** detach it beforehand you issue the `terminate` call. For more information, read the [Volumes](#volumes-7e8a39) documentation. func (s *API) ServerAction(req *ServerActionRequest, opts ...scw.RequestOption) (*ServerActionResponse, error) { var err error @@ -2347,7 +2348,8 @@ type ListServerUserDataRequest struct { ServerID string `json:"-"` } -// ListServerUserData: list all user data keys registered on a given server. +// ListServerUserData: list user data. +// List all user data keys registered on a given server. func (s *API) ListServerUserData(req *ListServerUserDataRequest, opts ...scw.RequestOption) (*ListServerUserDataResponse, error) { var err error @@ -2388,7 +2390,8 @@ type DeleteServerUserDataRequest struct { Key string `json:"-"` } -// DeleteServerUserData: delete the given key from a server user data. +// DeleteServerUserData: delete user data. +// Delete the given key from a server user data. func (s *API) DeleteServerUserData(req *DeleteServerUserDataRequest, opts ...scw.RequestOption) error { var err error @@ -2443,7 +2446,8 @@ type ListImagesRequest struct { Tags *string `json:"-"` } -// ListImages: list all images available in an account. +// ListImages: list instance images. +// List all images available in an account. func (s *API) ListImages(req *ListImagesRequest, opts ...scw.RequestOption) (*ListImagesResponse, error) { var err error @@ -2494,7 +2498,8 @@ type GetImageRequest struct { ImageID string `json:"-"` } -// GetImage: get details of an image with the given ID. +// GetImage: get an instance image. +// Get details of an image with the given ID. func (s *API) GetImage(req *GetImageRequest, opts ...scw.RequestOption) (*GetImageResponse, error) { var err error @@ -2606,8 +2611,7 @@ type SetImageRequest struct { ID string `json:"-"` Name string `json:"name"` - // Arch: - // Default value: x86_64 + // Arch: default value: x86_64 Arch Arch `json:"arch"` CreationDate *time.Time `json:"creation_date"` @@ -2625,8 +2629,7 @@ type SetImageRequest struct { Public bool `json:"public"` RootVolume *VolumeSummary `json:"root_volume"` - // State: - // Default value: available + // State: default value: available State ImageState `json:"state"` Project string `json:"project"` @@ -2634,7 +2637,8 @@ type SetImageRequest struct { Tags *[]string `json:"tags"` } -// setImage: replace all image properties with an image message. +// setImage: update image. +// Replace all image properties with an image message. func (s *API) setImage(req *SetImageRequest, opts ...scw.RequestOption) (*setImageResponse, error) { var err error @@ -2688,7 +2692,8 @@ type DeleteImageRequest struct { ImageID string `json:"-"` } -// DeleteImage: delete the image with the given ID. +// DeleteImage: delete an instance image. +// Delete the image with the given ID. func (s *API) DeleteImage(req *DeleteImageRequest, opts ...scw.RequestOption) error { var err error @@ -2792,7 +2797,8 @@ type CreateSnapshotRequest struct { // Project: project ID of the snapshot. // Precisely one of Organization, Project must be set. Project *string `json:"project,omitempty"` - // VolumeType: overrides the volume_type of the snapshot. + // VolumeType: the volume type of the snapshot. + // Overrides the volume_type of the snapshot. // If omitted, the volume type of the original volume will be used. // Default value: unknown_volume_type VolumeType SnapshotVolumeType `json:"volume_type"` @@ -2858,7 +2864,8 @@ type GetSnapshotRequest struct { SnapshotID string `json:"-"` } -// GetSnapshot: get details of a snapshot with the given ID. +// GetSnapshot: get a snapshot. +// Get details of a snapshot with the given ID. func (s *API) GetSnapshot(req *GetSnapshotRequest, opts ...scw.RequestOption) (*GetSnapshotResponse, error) { var err error @@ -2901,13 +2908,11 @@ type setSnapshotRequest struct { Name string `json:"name"` Organization string `json:"organization"` - // VolumeType: - // Default value: l_ssd + // VolumeType: default value: l_ssd VolumeType VolumeVolumeType `json:"volume_type"` Size scw.Size `json:"size"` - // State: - // Default value: available + // State: default value: available State SnapshotState `json:"state"` BaseVolume *SnapshotBaseVolume `json:"base_volume"` @@ -2921,7 +2926,8 @@ type setSnapshotRequest struct { Tags *[]string `json:"tags"` } -// setSnapshot: replace all snapshot properties with a snapshot message. +// setSnapshot: update snapshot. +// Replace all snapshot properties with a snapshot message. func (s *API) setSnapshot(req *setSnapshotRequest, opts ...scw.RequestOption) (*setSnapshotResponse, error) { var err error @@ -2975,7 +2981,8 @@ type DeleteSnapshotRequest struct { SnapshotID string `json:"-"` } -// DeleteSnapshot: delete the snapshot with the given ID. +// DeleteSnapshot: delete a snapshot. +// Delete the snapshot with the given ID. func (s *API) DeleteSnapshot(req *DeleteSnapshotRequest, opts ...scw.RequestOption) error { var err error @@ -3016,7 +3023,8 @@ type ExportSnapshotRequest struct { Key string `json:"key,omitempty"` } -// ExportSnapshot: export a snapshot to a given S3 bucket in the same region. +// ExportSnapshot: export a snapshot. +// Export a snapshot to a given S3 bucket in the same region. func (s *API) ExportSnapshot(req *ExportSnapshotRequest, opts ...scw.RequestOption) (*ExportSnapshotResponse, error) { var err error @@ -3200,7 +3208,8 @@ type GetVolumeRequest struct { VolumeID string `json:"-"` } -// GetVolume: get details of a volume with the given ID. +// GetVolume: get a volume. +// Get details of a volume with the given ID. func (s *API) GetVolume(req *GetVolumeRequest, opts ...scw.RequestOption) (*GetVolumeResponse, error) { var err error @@ -3245,7 +3254,8 @@ type UpdateVolumeRequest struct { Size *scw.Size `json:"size,omitempty"` } -// UpdateVolume: replace name and/or size properties of given ID volume with the given value(s). Any volume name can be changed while, for now, only `b_ssd` volume growing is supported. +// UpdateVolume: update a volume. +// Replace name and/or size properties of given ID volume with the given value(s). Any volume name can be changed while, for now, only `b_ssd` volume growing is supported. func (s *API) UpdateVolume(req *UpdateVolumeRequest, opts ...scw.RequestOption) (*UpdateVolumeResponse, error) { var err error @@ -3289,7 +3299,8 @@ type DeleteVolumeRequest struct { VolumeID string `json:"-"` } -// DeleteVolume: delete the volume with the given ID. +// DeleteVolume: delete a volume. +// Delete the volume with the given ID. func (s *API) DeleteVolume(req *DeleteVolumeRequest, opts ...scw.RequestOption) error { var err error @@ -3339,7 +3350,8 @@ type ListSecurityGroupsRequest struct { Page *int32 `json:"-"` } -// ListSecurityGroups: list all security groups available in an account. +// ListSecurityGroups: list security groups. +// List all security groups available in an account. func (s *API) ListSecurityGroups(req *ListSecurityGroupsRequest, opts ...scw.RequestOption) (*ListSecurityGroupsResponse, error) { var err error @@ -3474,7 +3486,8 @@ type GetSecurityGroupRequest struct { SecurityGroupID string `json:"-"` } -// GetSecurityGroup: get the details of a Security Group with the given ID. +// GetSecurityGroup: get a security group. +// Get the details of a Security Group with the given ID. func (s *API) GetSecurityGroup(req *GetSecurityGroupRequest, opts ...scw.RequestOption) (*GetSecurityGroupResponse, error) { var err error @@ -3580,7 +3593,8 @@ type setSecurityGroupRequest struct { Stateful bool `json:"stateful"` } -// setSecurityGroup: replace all security group properties with a security group message. +// setSecurityGroup: update a security group. +// Replace all security group properties with a security group message. func (s *API) setSecurityGroup(req *setSecurityGroupRequest, opts ...scw.RequestOption) (*setSecurityGroupResponse, error) { var err error @@ -3632,7 +3646,8 @@ type ListDefaultSecurityGroupRulesRequest struct { Zone scw.Zone `json:"-"` } -// ListDefaultSecurityGroupRules: lists the default rules applied to all the security groups. +// ListDefaultSecurityGroupRules: get default rules. +// Lists the default rules applied to all the security groups. func (s *API) ListDefaultSecurityGroupRules(req *ListDefaultSecurityGroupRulesRequest, opts ...scw.RequestOption) (*ListSecurityGroupRulesResponse, error) { var err error @@ -3719,14 +3734,11 @@ type CreateSecurityGroupRuleRequest struct { Zone scw.Zone `json:"-"` // SecurityGroupID: UUID of the security group. SecurityGroupID string `json:"-"` - // Protocol: - // Default value: TCP + // Protocol: default value: TCP Protocol SecurityGroupRuleProtocol `json:"protocol"` - // Direction: - // Default value: inbound + // Direction: default value: inbound Direction SecurityGroupRuleDirection `json:"direction"` - // Action: - // Default value: accept + // Action: default value: accept Action SecurityGroupRuleAction `json:"action"` IPRange scw.IPNet `json:"ip_range,omitempty"` @@ -3786,7 +3798,8 @@ type SetSecurityGroupRulesRequest struct { Rules []*SetSecurityGroupRulesRequestRule `json:"rules"` } -// SetSecurityGroupRules: replaces the rules of the security group with the rules provided. This endpoint supports the update of existing rules, creation of new rules and deletion of existing rules when they are not passed in the request. +// SetSecurityGroupRules: update all the rules of a security group. +// Replaces the rules of the security group with the rules provided. This endpoint supports the update of existing rules, creation of new rules and deletion of existing rules when they are not passed in the request. func (s *API) SetSecurityGroupRules(req *SetSecurityGroupRulesRequest, opts ...scw.RequestOption) (*SetSecurityGroupRulesResponse, error) { var err error @@ -3832,7 +3845,8 @@ type DeleteSecurityGroupRuleRequest struct { SecurityGroupRuleID string `json:"-"` } -// DeleteSecurityGroupRule: delete a security group rule with the given ID. +// DeleteSecurityGroupRule: delete rule. +// Delete a security group rule with the given ID. func (s *API) DeleteSecurityGroupRule(req *DeleteSecurityGroupRuleRequest, opts ...scw.RequestOption) error { var err error @@ -3875,7 +3889,8 @@ type GetSecurityGroupRuleRequest struct { SecurityGroupRuleID string `json:"-"` } -// GetSecurityGroupRule: get details of a security group rule with the given ID. +// GetSecurityGroupRule: get rule. +// Get details of a security group rule with the given ID. func (s *API) GetSecurityGroupRule(req *GetSecurityGroupRuleRequest, opts ...scw.RequestOption) (*GetSecurityGroupRuleResponse, error) { var err error @@ -3920,14 +3935,11 @@ type setSecurityGroupRuleRequest struct { SecurityGroupRuleID string `json:"-"` ID string `json:"id"` - // Protocol: - // Default value: TCP + // Protocol: default value: TCP Protocol SecurityGroupRuleProtocol `json:"protocol"` - // Direction: - // Default value: inbound + // Direction: default value: inbound Direction SecurityGroupRuleDirection `json:"direction"` - // Action: - // Default value: accept + // Action: default value: accept Action SecurityGroupRuleAction `json:"action"` IPRange scw.IPNet `json:"ip_range"` @@ -4000,7 +4012,8 @@ type ListPlacementGroupsRequest struct { Name *string `json:"-"` } -// ListPlacementGroups: list all placement groups. +// ListPlacementGroups: list placement groups. +// List all placement groups. func (s *API) ListPlacementGroups(req *ListPlacementGroupsRequest, opts ...scw.RequestOption) (*ListPlacementGroupsResponse, error) { var err error @@ -4065,7 +4078,8 @@ type CreatePlacementGroupRequest struct { PolicyType PlacementGroupPolicyType `json:"policy_type"` } -// CreatePlacementGroup: create a new placement group. +// CreatePlacementGroup: create a placement group. +// Create a new placement group. func (s *API) CreatePlacementGroup(req *CreatePlacementGroupRequest, opts ...scw.RequestOption) (*CreatePlacementGroupResponse, error) { var err error @@ -4119,7 +4133,8 @@ type GetPlacementGroupRequest struct { PlacementGroupID string `json:"-"` } -// GetPlacementGroup: get the given placement group. +// GetPlacementGroup: get a placement group. +// Get the given placement group. func (s *API) GetPlacementGroup(req *GetPlacementGroupRequest, opts ...scw.RequestOption) (*GetPlacementGroupResponse, error) { var err error @@ -4160,11 +4175,9 @@ type SetPlacementGroupRequest struct { Name string `json:"name"` Organization string `json:"organization"` - // PolicyMode: - // Default value: optional + // PolicyMode: default value: optional PolicyMode PlacementGroupPolicyMode `json:"policy_mode"` - // PolicyType: - // Default value: max_availability + // PolicyType: default value: max_availability PolicyType PlacementGroupPolicyType `json:"policy_type"` Project string `json:"project"` @@ -4172,7 +4185,8 @@ type SetPlacementGroupRequest struct { Tags *[]string `json:"tags"` } -// SetPlacementGroup: set all parameters of the given placement group. +// SetPlacementGroup: set placement group. +// Set all parameters of the given placement group. func (s *API) SetPlacementGroup(req *SetPlacementGroupRequest, opts ...scw.RequestOption) (*SetPlacementGroupResponse, error) { var err error @@ -4236,7 +4250,8 @@ type UpdatePlacementGroupRequest struct { PolicyType *PlacementGroupPolicyType `json:"policy_type,omitempty"` } -// UpdatePlacementGroup: update one or more parameter of the given placement group. +// UpdatePlacementGroup: update a placement group. +// Update one or more parameter of the given placement group. func (s *API) UpdatePlacementGroup(req *UpdatePlacementGroupRequest, opts ...scw.RequestOption) (*UpdatePlacementGroupResponse, error) { var err error @@ -4317,7 +4332,8 @@ type GetPlacementGroupServersRequest struct { PlacementGroupID string `json:"-"` } -// GetPlacementGroupServers: get all servers belonging to the given placement group. +// GetPlacementGroupServers: get placement group servers. +// Get all servers belonging to the given placement group. func (s *API) GetPlacementGroupServers(req *GetPlacementGroupServersRequest, opts ...scw.RequestOption) (*GetPlacementGroupServersResponse, error) { var err error @@ -4358,7 +4374,8 @@ type SetPlacementGroupServersRequest struct { Servers []string `json:"servers"` } -// SetPlacementGroupServers: set all servers belonging to the given placement group. +// SetPlacementGroupServers: set placement group servers. +// Set all servers belonging to the given placement group. func (s *API) SetPlacementGroupServers(req *SetPlacementGroupServersRequest, opts ...scw.RequestOption) (*SetPlacementGroupServersResponse, error) { var err error @@ -4404,7 +4421,8 @@ type UpdatePlacementGroupServersRequest struct { Servers []string `json:"servers,omitempty"` } -// UpdatePlacementGroupServers: update all servers belonging to the given placement group. +// UpdatePlacementGroupServers: update placement group servers. +// Update all servers belonging to the given placement group. func (s *API) UpdatePlacementGroupServers(req *UpdatePlacementGroupServersRequest, opts ...scw.RequestOption) (*UpdatePlacementGroupServersResponse, error) { var err error @@ -4568,7 +4586,8 @@ type GetIPRequest struct { IP string `json:"-"` } -// GetIP: get details of an IP with the given ID or address. +// GetIP: get a flexible IP. +// Get details of an IP with the given ID or address. func (s *API) GetIP(req *GetIPRequest, opts ...scw.RequestOption) (*GetIPResponse, error) { var err error @@ -4657,7 +4676,8 @@ type DeleteIPRequest struct { IP string `json:"-"` } -// DeleteIP: delete the IP with the given ID. +// DeleteIP: delete a flexible IP. +// Delete the IP with the given ID. func (s *API) DeleteIP(req *DeleteIPRequest, opts ...scw.RequestOption) error { var err error @@ -4701,7 +4721,8 @@ type ListPrivateNICsRequest struct { Page *int32 `json:"-"` } -// ListPrivateNICs: list all private NICs of a given server. +// ListPrivateNICs: list all private NICs. +// List all private NICs of a given server. func (s *API) ListPrivateNICs(req *ListPrivateNICsRequest, opts ...scw.RequestOption) (*ListPrivateNICsResponse, error) { var err error @@ -4803,7 +4824,8 @@ type GetPrivateNICRequest struct { PrivateNicID string `json:"-"` } -// GetPrivateNIC: get private NIC properties. +// GetPrivateNIC: get a private NIC. +// Get private NIC properties. func (s *API) GetPrivateNIC(req *GetPrivateNICRequest, opts ...scw.RequestOption) (*GetPrivateNICResponse, error) { var err error @@ -4850,7 +4872,8 @@ type UpdatePrivateNICRequest struct { Tags *[]string `json:"tags,omitempty"` } -// UpdatePrivateNIC: update one or more parameter/s to a given private NIC. +// UpdatePrivateNIC: update a private NIC. +// Update one or more parameter/s to a given private NIC. func (s *API) UpdatePrivateNIC(req *UpdatePrivateNICRequest, opts ...scw.RequestOption) (*PrivateNIC, error) { var err error @@ -5000,7 +5023,8 @@ type GetBootscriptRequest struct { BootscriptID string `json:"-"` } -// Deprecated: GetBootscript: get details of a bootscript with the given ID. +// Deprecated: GetBootscript: get bootscripts. +// Get details of a bootscript with the given ID. func (s *API) GetBootscript(req *GetBootscriptRequest, opts ...scw.RequestOption) (*GetBootscriptResponse, error) { var err error diff --git a/vendor/github.com/scaleway/scaleway-sdk-go/api/marketplace/v1/marketplace_sdk.go b/vendor/github.com/scaleway/scaleway-sdk-go/api/marketplace/v1/marketplace_sdk.go index 04abb9cf4..b7a2311f8 100644 --- a/vendor/github.com/scaleway/scaleway-sdk-go/api/marketplace/v1/marketplace_sdk.go +++ b/vendor/github.com/scaleway/scaleway-sdk-go/api/marketplace/v1/marketplace_sdk.go @@ -77,7 +77,8 @@ type Image struct { ModificationDate *time.Time `json:"modification_date"` // ValidUntil: expiration date of this image. ValidUntil *time.Time `json:"valid_until"` - // Label: typically an identifier for a distribution (ex. "ubuntu_focal"). + // Label: label of this image. + // Typically an identifier for a distribution (ex. "ubuntu_focal"). Label string `json:"label"` // Versions: list of versions of this image. Versions []*Version `json:"versions"` @@ -101,7 +102,8 @@ type ListVersionsResponse struct { // LocalImage: local image. type LocalImage struct { - // ID: version you will typically use to define an image in an API call. + // ID: UUID of this local image. + // Version you will typically use to define an image in an API call. ID string `json:"id"` // CompatibleCommercialTypes: list of all commercial types that are compatible with this local image. CompatibleCommercialTypes []string `json:"compatible_commercial_types"` diff --git a/vendor/github.com/scaleway/scaleway-sdk-go/scw/client.go b/vendor/github.com/scaleway/scaleway-sdk-go/scw/client.go index 0d7eaf750..6bff81eea 100644 --- a/vendor/github.com/scaleway/scaleway-sdk-go/scw/client.go +++ b/vendor/github.com/scaleway/scaleway-sdk-go/scw/client.go @@ -253,11 +253,11 @@ func (c *Client) do(req *ScalewayRequest, res interface{}) (sdkErr error) { // Handle instance API X-Total-Count header xTotalCountStr := httpResponse.Header.Get("X-Total-Count") if legacyLister, isLegacyLister := res.(legacyLister); isLegacyLister && xTotalCountStr != "" { - xTotalCount, err := strconv.Atoi(xTotalCountStr) + xTotalCount, err := strconv.ParseInt(xTotalCountStr, 10, 32) if err != nil { return errors.Wrap(err, "could not parse X-Total-Count header") } - legacyLister.UnsafeSetTotalCount(xTotalCount) + legacyLister.UnsafeSetTotalCount(int(xTotalCount)) } } diff --git a/vendor/github.com/scaleway/scaleway-sdk-go/scw/config.go b/vendor/github.com/scaleway/scaleway-sdk-go/scw/config.go index 1c38525bf..7277aa9d2 100644 --- a/vendor/github.com/scaleway/scaleway-sdk-go/scw/config.go +++ b/vendor/github.com/scaleway/scaleway-sdk-go/scw/config.go @@ -324,6 +324,7 @@ func MergeProfiles(original *Profile, others ...*Profile) *Profile { DefaultProjectID: original.DefaultProjectID, DefaultRegion: original.DefaultRegion, DefaultZone: original.DefaultZone, + SendTelemetry: original.SendTelemetry, } for _, other := range others { @@ -351,6 +352,9 @@ func MergeProfiles(original *Profile, others ...*Profile) *Profile { if other.DefaultZone != nil { np.DefaultZone = other.DefaultZone } + if other.SendTelemetry != nil { + np.SendTelemetry = other.SendTelemetry + } } return np diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin.go b/vendor/github.com/shirou/gopsutil/v3/process/process_darwin.go index 55c31962a..176661cbd 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin.go +++ b/vendor/github.com/shirou/gopsutil/v3/process/process_darwin.go @@ -82,8 +82,6 @@ func (p *Process) NameWithContext(ctx context.Context) (string, error) { extendedName := filepath.Base(cmdName) if strings.HasPrefix(extendedName, p.name) { name = extendedName - } else { - name = cmdName } } } diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd.go b/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd.go index a123ccf9b..85134b7ee 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd.go +++ b/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd.go @@ -55,8 +55,6 @@ func (p *Process) NameWithContext(ctx context.Context) (string, error) { extendedName := filepath.Base(cmdlineSlice[0]) if strings.HasPrefix(extendedName, p.name) { name = extendedName - } else { - name = cmdlineSlice[0] } } } diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_linux.go b/vendor/github.com/shirou/gopsutil/v3/process/process_linux.go index 29c447390..37cb7ca44 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_linux.go +++ b/vendor/github.com/shirou/gopsutil/v3/process/process_linux.go @@ -845,8 +845,6 @@ func (p *Process) fillFromStatusWithContext(ctx context.Context) error { extendedName := filepath.Base(cmdlineSlice[0]) if strings.HasPrefix(extendedName, p.name) { p.name = extendedName - } else { - p.name = cmdlineSlice[0] } } } diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd.go b/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd.go index cbb1a77f6..a58c5eb11 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd.go +++ b/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd.go @@ -60,8 +60,6 @@ func (p *Process) NameWithContext(ctx context.Context) (string, error) { extendedName := filepath.Base(cmdlineSlice[0]) if strings.HasPrefix(extendedName, p.name) { name = extendedName - } else { - name = cmdlineSlice[0] } } } diff --git a/vendor/go.opentelemetry.io/collector/extension/extension.go b/vendor/go.opentelemetry.io/collector/extension/extension.go index 3f3b087cc..6b8df571b 100644 --- a/vendor/go.opentelemetry.io/collector/extension/extension.go +++ b/vendor/go.opentelemetry.io/collector/extension/extension.go @@ -8,6 +8,7 @@ import ( "fmt" "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/confmap" ) // Extension is the interface for objects hosted by the OpenTelemetry Collector that @@ -32,6 +33,13 @@ type PipelineWatcher interface { NotReady() error } +// ConfigWatcher is an interface that should be implemented by an extension that +// wishes to be notified of the Collector's effective configuration. +type ConfigWatcher interface { + // NotifyConfig notifies the extension of the Collector's current effective configuration. + NotifyConfig(ctx context.Context, conf *confmap.Conf) error +} + // CreateSettings is passed to Factory.Create(...) function. type CreateSettings struct { // ID returns the ID of the component that will be created. diff --git a/vendor/go.opentelemetry.io/collector/otelcol/collector.go b/vendor/go.opentelemetry.io/collector/otelcol/collector.go index 788c022e9..557ff6f18 100644 --- a/vendor/go.opentelemetry.io/collector/otelcol/collector.go +++ b/vendor/go.opentelemetry.io/collector/otelcol/collector.go @@ -18,6 +18,7 @@ import ( "go.uber.org/zap" "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/confmap" "go.opentelemetry.io/collector/connector" "go.opentelemetry.io/collector/exporter" "go.opentelemetry.io/collector/extension" @@ -143,6 +144,17 @@ func (col *Collector) Shutdown() { func (col *Collector) setupConfigurationComponents(ctx context.Context) error { col.setCollectorState(StateStarting) + var conf *confmap.Conf + + if cp, ok := col.set.ConfigProvider.(ConfmapProvider); ok { + var err error + conf, err = cp.GetConfmap(ctx) + + if err != nil { + return fmt.Errorf("failed to resolve config: %w", err) + } + } + cfg, err := col.set.ConfigProvider.Get(ctx, col.set.Factories) if err != nil { return fmt.Errorf("failed to get config: %w", err) @@ -154,6 +166,7 @@ func (col *Collector) setupConfigurationComponents(ctx context.Context) error { col.service, err = service.New(ctx, service.Settings{ BuildInfo: col.set.BuildInfo, + CollectorConf: conf, Receivers: receiver.NewBuilder(cfg.Receivers, col.set.Factories.Receivers), Processors: processor.NewBuilder(cfg.Processors, col.set.Factories.Processors), Exporters: exporter.NewBuilder(cfg.Exporters, col.set.Factories.Exporters), @@ -174,6 +187,7 @@ func (col *Collector) setupConfigurationComponents(ctx context.Context) error { return multierr.Combine(err, col.service.Shutdown(ctx)) } col.setCollectorState(StateRunning) + return nil } diff --git a/vendor/go.opentelemetry.io/collector/otelcol/configprovider.go b/vendor/go.opentelemetry.io/collector/otelcol/configprovider.go index b50b47efe..c266c9a47 100644 --- a/vendor/go.opentelemetry.io/collector/otelcol/configprovider.go +++ b/vendor/go.opentelemetry.io/collector/otelcol/configprovider.go @@ -50,10 +50,26 @@ type ConfigProvider interface { Shutdown(ctx context.Context) error } +// ConfmapProvider is an optional interface to be implemented by ConfigProviders +// to provide confmap.Conf objects representing a marshaled version of the +// Collector's configuration. +// +// The purpose of this interface is that otelcol.ConfigProvider structs do not +// necessarily need to use confmap.Conf as their underlying config structure. +type ConfmapProvider interface { + // GetConfmap resolves the Collector's configuration and provides it as a confmap.Conf object. + // + // Should never be called concurrently with itself or any ConfigProvider method. + GetConfmap(ctx context.Context) (*confmap.Conf, error) +} + type configProvider struct { mapResolver *confmap.Resolver } +var _ ConfigProvider = &configProvider{} +var _ ConfmapProvider = &configProvider{} + // ConfigProviderSettings are the settings to configure the behavior of the ConfigProvider. type ConfigProviderSettings struct { // ResolverSettings are the settings to configure the behavior of the confmap.Resolver. @@ -106,6 +122,15 @@ func (cm *configProvider) Shutdown(ctx context.Context) error { return cm.mapResolver.Shutdown(ctx) } +func (cm *configProvider) GetConfmap(ctx context.Context) (*confmap.Conf, error) { + conf, err := cm.mapResolver.Resolve(ctx) + if err != nil { + return nil, fmt.Errorf("cannot resolve the configuration: %w", err) + } + + return conf, nil +} + func newDefaultConfigProviderSettings(uris []string) ConfigProviderSettings { return ConfigProviderSettings{ ResolverSettings: confmap.ResolverSettings{ diff --git a/vendor/go.opentelemetry.io/collector/service/extensions/extensions.go b/vendor/go.opentelemetry.io/collector/service/extensions/extensions.go index 76d71fb7a..5ff92cd58 100644 --- a/vendor/go.opentelemetry.io/collector/service/extensions/extensions.go +++ b/vendor/go.opentelemetry.io/collector/service/extensions/extensions.go @@ -12,6 +12,7 @@ import ( "go.uber.org/multierr" "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/confmap" "go.opentelemetry.io/collector/extension" "go.opentelemetry.io/collector/service/internal/components" "go.opentelemetry.io/collector/service/internal/zpages" @@ -72,6 +73,17 @@ func (bes *Extensions) NotifyPipelineNotReady() error { return errs } +func (bes *Extensions) NotifyConfig(ctx context.Context, conf *confmap.Conf) error { + var errs error + for _, ext := range bes.extMap { + if cw, ok := ext.(extension.ConfigWatcher); ok { + clonedConf := confmap.NewFromStringMap(conf.ToStringMap()) + errs = multierr.Append(errs, cw.NotifyConfig(ctx, clonedConf)) + } + } + return errs +} + func (bes *Extensions) GetExtensions() map[component.ID]component.Component { result := make(map[component.ID]component.Component, len(bes.extMap)) for extID, v := range bes.extMap { diff --git a/vendor/go.opentelemetry.io/collector/service/service.go b/vendor/go.opentelemetry.io/collector/service/service.go index 318f99a86..ed9540ec9 100644 --- a/vendor/go.opentelemetry.io/collector/service/service.go +++ b/vendor/go.opentelemetry.io/collector/service/service.go @@ -17,6 +17,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/configtelemetry" + "go.opentelemetry.io/collector/confmap" "go.opentelemetry.io/collector/connector" "go.opentelemetry.io/collector/exporter" "go.opentelemetry.io/collector/extension" @@ -36,6 +37,9 @@ type Settings struct { // BuildInfo provides collector start information. BuildInfo component.BuildInfo + // CollectorConf contains the Collector's current configuration + CollectorConf *confmap.Conf + // Receivers builder for receivers. Receivers *receiver.Builder @@ -68,6 +72,7 @@ type Service struct { telemetrySettings component.TelemetrySettings host *serviceHost telemetryInitializer *telemetryInitializer + collectorConf *confmap.Conf } func New(ctx context.Context, set Settings, cfg Config) (*Service, error) { @@ -89,6 +94,7 @@ func New(ctx context.Context, set Settings, cfg Config) (*Service, error) { asyncErrorChannel: set.AsyncErrorChannel, }, telemetryInitializer: newColTelemetry(useOtel, disableHighCard, extendedConfig), + collectorConf: set.CollectorConf, } var err error srv.telemetry, err = telemetry.New(ctx, telemetry.Settings{ZapOptions: set.LoggingOptions}, cfg.Telemetry) @@ -138,6 +144,12 @@ func (srv *Service) Start(ctx context.Context) error { return fmt.Errorf("failed to start extensions: %w", err) } + if srv.collectorConf != nil { + if err := srv.host.serviceExtensions.NotifyConfig(ctx, srv.collectorConf); err != nil { + return err + } + } + if err := srv.host.pipelines.StartAll(ctx, srv.host); err != nil { return fmt.Errorf("cannot start pipelines: %w", err) } diff --git a/vendor/golang.org/x/oauth2/google/appengine_gen1.go b/vendor/golang.org/x/oauth2/google/appengine_gen1.go index 16c6c6b90..e61587945 100644 --- a/vendor/golang.org/x/oauth2/google/appengine_gen1.go +++ b/vendor/golang.org/x/oauth2/google/appengine_gen1.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build appengine -// +build appengine // This file applies to App Engine first generation runtimes (<= Go 1.9). diff --git a/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go b/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go index a7e27b3d2..9c79aa0a0 100644 --- a/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go +++ b/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !appengine -// +build !appengine // This file applies to App Engine second generation runtimes (>= Go 1.11) and App Engine flexible. diff --git a/vendor/golang.org/x/oauth2/internal/client_appengine.go b/vendor/golang.org/x/oauth2/internal/client_appengine.go index e1755d1d9..d28140f78 100644 --- a/vendor/golang.org/x/oauth2/internal/client_appengine.go +++ b/vendor/golang.org/x/oauth2/internal/client_appengine.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build appengine -// +build appengine package internal diff --git a/vendor/google.golang.org/api/compute/v1/compute-api.json b/vendor/google.golang.org/api/compute/v1/compute-api.json index 8b77f64d4..1abba41e1 100644 --- a/vendor/google.golang.org/api/compute/v1/compute-api.json +++ b/vendor/google.golang.org/api/compute/v1/compute-api.json @@ -26592,6 +26592,53 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "getNatIpInfo": { + "description": "Retrieves runtime NAT IP information.", + "flatPath": "projects/{project}/regions/{region}/routers/{router}/getNatIpInfo", + "httpMethod": "GET", + "id": "compute.routers.getNatIpInfo", + "parameterOrder": [ + "project", + "region", + "router" + ], + "parameters": { + "natName": { + "description": "Name of the nat service to filter the NAT IP information. If it is omitted, all nats for this router will be returned. Name should conform to RFC1035.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "router": { + "description": "Name of the Router resource to query for Nat IP information. The name should conform to RFC1035.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/routers/{router}/getNatIpInfo", + "response": { + "$ref": "NatIpInfoResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, "getNatMappingInfo": { "description": "Retrieves runtime Nat mapping information of VM endpoints.", "flatPath": "projects/{project}/regions/{region}/routers/{router}/getNatMappingInfo", @@ -33788,9 +33835,32 @@ } } }, - "revision": "20230711", + "revision": "20230725", "rootUrl": "https://compute.googleapis.com/", "schemas": { + "AWSV4Signature": { + "description": "Contains the configurations necessary to generate a signature for access to private storage buckets that support Signature Version 4 for authentication. The service name for generating the authentication header will always default to 's3'.", + "id": "AWSV4Signature", + "properties": { + "accessKey": { + "description": "The access key used for s3 bucket authentication. Required for updating or creating a backend that uses AWS v4 signature authentication, but will not be returned as part of the configuration when queried with a REST API GET request. @InputOnly", + "type": "string" + }, + "accessKeyId": { + "description": "The identifier of an access key used for s3 bucket authentication.", + "type": "string" + }, + "accessKeyVersion": { + "description": "The optional version identifier for the access key. You can use this to keep track of different iterations of your access key.", + "type": "string" + }, + "originRegion": { + "description": "The name of the cloud region of your origin. This is a free-form field with the name of the region your cloud uses to host your origin. For example, \"us-east-1\" for AWS or \"us-ashburn-1\" for OCI.", + "type": "string" + } + }, + "type": "object" + }, "AcceleratorConfig": { "description": "A specification of the type and number of accelerator cards attached to the instance.", "id": "AcceleratorConfig", @@ -35334,7 +35404,7 @@ "type": "string" }, "replicaZones": { - "description": "Required for each regional disk associated with the instance. Specify the URLs of the zones where the disk should be replicated to. You must provide exactly two replica zones, and one zone must be the same as the instance zone. You can't use this option with boot disks.", + "description": "Required for each regional disk associated with the instance. Specify the URLs of the zones where the disk should be replicated to. You must provide exactly two replica zones, and one zone must be the same as the instance zone.", "items": { "type": "string" }, @@ -38034,6 +38104,10 @@ "description": "Per-instance properties to be set on individual instances. To be extended in the future.", "id": "BulkInsertInstanceResourcePerInstanceProperties", "properties": { + "hostname": { + "description": "Specifies the hostname of the instance. More details in: https://cloud.google.com/compute/docs/instances/custom-hostname-vm#naming_convention", + "type": "string" + }, "name": { "description": "This field is only temporary. It will be removed. Do not use it.", "type": "string" @@ -38263,9 +38337,11 @@ "description": "The type of commitment, which affects the discount rate and the eligible resources. Type MEMORY_OPTIMIZED specifies a commitment that will only apply to memory optimized machines. Type ACCELERATOR_OPTIMIZED specifies a commitment that will only apply to accelerator optimized machines.", "enum": [ "ACCELERATOR_OPTIMIZED", + "ACCELERATOR_OPTIMIZED_A3", "COMPUTE_OPTIMIZED", "COMPUTE_OPTIMIZED_C2D", "COMPUTE_OPTIMIZED_C3", + "COMPUTE_OPTIMIZED_H3", "GENERAL_PURPOSE", "GENERAL_PURPOSE_E2", "GENERAL_PURPOSE_N2", @@ -38289,6 +38365,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -41965,7 +42043,7 @@ "type": "string" }, "noAutomateDnsZone": { - "description": "This is used in PSC consumer ForwardingRule to control whether it should try to auto-generate a DNS zone or not. Non-PSC forwarding rules do not use this field.", + "description": "This is used in PSC consumer ForwardingRule to control whether it should try to auto-generate a DNS zone or not. Non-PSC forwarding rules do not use this field. Once set, this field is not mutable.", "type": "boolean" }, "portRange": { @@ -42039,7 +42117,7 @@ "type": "string" }, "target": { - "description": "The URL of the target resource to receive the matched traffic. For regional forwarding rules, this target must be in the same region as the forwarding rule. For global forwarding rules, this target must be a global load balancing resource. The forwarded traffic must be of a type appropriate to the target object. - For load balancers, see the \"Target\" column in [Port specifications](https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts#ip_address_specifications). - For Private Service Connect forwarding rules that forward traffic to Google APIs, provide the name of a supported Google API bundle: - vpc-sc - APIs that support VPC Service Controls. - all-apis - All supported Google APIs. - For Private Service Connect forwarding rules that forward traffic to managed services, the target must be a service attachment. ", + "description": "The URL of the target resource to receive the matched traffic. For regional forwarding rules, this target must be in the same region as the forwarding rule. For global forwarding rules, this target must be a global load balancing resource. The forwarded traffic must be of a type appropriate to the target object. - For load balancers, see the \"Target\" column in [Port specifications](https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts#ip_address_specifications). - For Private Service Connect forwarding rules that forward traffic to Google APIs, provide the name of a supported Google API bundle: - vpc-sc - APIs that support VPC Service Controls. - all-apis - All supported Google APIs. - For Private Service Connect forwarding rules that forward traffic to managed services, the target must be a service attachment. The target is not mutable once set as a service attachment. ", "type": "string" } }, @@ -52529,6 +52607,72 @@ }, "type": "object" }, + "NatIpInfo": { + "description": "Contains NAT IP information of a NAT config (i.e. usage status, mode).", + "id": "NatIpInfo", + "properties": { + "natIpInfoMappings": { + "description": "A list of all NAT IPs assigned to this NAT config.", + "items": { + "$ref": "NatIpInfoNatIpInfoMapping" + }, + "type": "array" + }, + "natName": { + "description": "Name of the NAT config which the NAT IP belongs to.", + "type": "string" + } + }, + "type": "object" + }, + "NatIpInfoNatIpInfoMapping": { + "description": "Contains information of a NAT IP.", + "id": "NatIpInfoNatIpInfoMapping", + "properties": { + "mode": { + "description": "Specifies whether NAT IP is auto or manual.", + "enum": [ + "AUTO", + "MANUAL" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, + "natIp": { + "description": "NAT IP address. For example: 203.0.113.11.", + "type": "string" + }, + "usage": { + "description": "Specifies whether NAT IP is currently serving at least one endpoint or not.", + "enum": [ + "IN_USE", + "UNUSED" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "NatIpInfoResponse": { + "id": "NatIpInfoResponse", + "properties": { + "result": { + "description": "[Output Only] A list of NAT IP information.", + "items": { + "$ref": "NatIpInfo" + }, + "type": "array" + } + }, + "type": "object" + }, "Network": { "description": "Represents a VPC Network resource. Networks connect resources to each other and to the internet. For more information, read Virtual Private Cloud (VPC) Network.", "id": "Network", @@ -57084,7 +57228,7 @@ "type": "object" }, "Operation": { - "description": "Represents an Operation resource. Google Compute Engine has three Operation resources: * [Global](/compute/docs/reference/rest/v1/globalOperations) * [Regional](/compute/docs/reference/rest/v1/regionOperations) * [Zonal](/compute/docs/reference/rest/v1/zoneOperations) You can use an operation resource to manage asynchronous API requests. For more information, read Handling API responses. Operations can be global, regional or zonal. - For global operations, use the `globalOperations` resource. - For regional operations, use the `regionOperations` resource. - For zonal operations, use the `zonalOperations` resource. For more information, read Global, Regional, and Zonal Resources.", + "description": "Represents an Operation resource. Google Compute Engine has three Operation resources: * [Global](/compute/docs/reference/rest/v1/globalOperations) * [Regional](/compute/docs/reference/rest/v1/regionOperations) * [Zonal](/compute/docs/reference/rest/v1/zoneOperations) You can use an operation resource to manage asynchronous API requests. For more information, read Handling API responses. Operations can be global, regional or zonal. - For global operations, use the `globalOperations` resource. - For regional operations, use the `regionOperations` resource. - For zonal operations, use the `zoneOperations` resource. For more information, read Global, Regional, and Zonal Resources.", "id": "Operation", "properties": { "clientOperationId": { @@ -57199,6 +57343,10 @@ "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, + "setCommonInstanceMetadataOperationMetadata": { + "$ref": "SetCommonInstanceMetadataOperationMetadata", + "description": "[Output Only] If the operation is for projects.setCommonInstanceMetadata, this field will contain information on all underlying zonal actions and their state." + }, "startTime": { "description": "[Output Only] The time that this operation was started by the server. This value is in RFC3339 text format.", "type": "string" @@ -57231,7 +57379,7 @@ "type": "string" }, "user": { - "description": "[Output Only] User who requested the operation, for example: `user@example.com`.", + "description": "[Output Only] User who requested the operation, for example: `user@example.com` or `alice_smith_identifier (global/workforcePools/example-com-us-employees)`.", "type": "string" }, "warnings": { @@ -65870,6 +66018,40 @@ "" ], "type": "string" + }, + "thresholdConfigs": { + "description": "Configuration options for layer7 adaptive protection for various customizable thresholds.", + "items": { + "$ref": "SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfig" + }, + "type": "array" + } + }, + "type": "object" + }, + "SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfig": { + "id": "SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfig", + "properties": { + "autoDeployConfidenceThreshold": { + "format": "float", + "type": "number" + }, + "autoDeployExpirationSec": { + "format": "int32", + "type": "integer" + }, + "autoDeployImpactedBaselineThreshold": { + "format": "float", + "type": "number" + }, + "autoDeployLoadThreshold": { + "format": "float", + "type": "number" + }, + "name": { + "description": "The name must be 1-63 characters long, and comply with RFC1035. The name must be unique within the security policy.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" } }, "type": "object" @@ -66444,6 +66626,10 @@ "description": "The authentication and authorization settings for a BackendService.", "id": "SecuritySettings", "properties": { + "awsV4Authentication": { + "$ref": "AWSV4Signature", + "description": "The configuration needed to generate a signature for access to private storage buckets that support AWS's Signature Version 4 for authentication. Allowed only for INTERNET_IP_PORT and INTERNET_FQDN_PORT NEG backends." + }, "clientTlsPolicy": { "description": "Optional. A URL referring to a networksecurity.ClientTlsPolicy resource that describes how clients should authenticate with this service's backends. clientTlsPolicy only applies to a global BackendService with the loadBalancingScheme set to INTERNAL_SELF_MANAGED. If left blank, communications are not encrypted.", "type": "string" @@ -67147,6 +67333,53 @@ }, "type": "object" }, + "SetCommonInstanceMetadataOperationMetadata": { + "id": "SetCommonInstanceMetadataOperationMetadata", + "properties": { + "clientOperationId": { + "description": "[Output Only] The client operation id.", + "type": "string" + }, + "perLocationOperations": { + "additionalProperties": { + "$ref": "SetCommonInstanceMetadataOperationMetadataPerLocationOperationInfo" + }, + "description": "[Output Only] Status information per location (location name is key). Example key: zones/us-central1-a", + "type": "object" + } + }, + "type": "object" + }, + "SetCommonInstanceMetadataOperationMetadataPerLocationOperationInfo": { + "id": "SetCommonInstanceMetadataOperationMetadataPerLocationOperationInfo", + "properties": { + "error": { + "$ref": "Status", + "description": "[Output Only] If state is `ABANDONED` or `FAILED`, this field is populated." + }, + "state": { + "description": "[Output Only] Status of the action, which can be one of the following: `PROPAGATING`, `PROPAGATED`, `ABANDONED`, `FAILED`, or `DONE`.", + "enum": [ + "ABANDONED", + "DONE", + "FAILED", + "PROPAGATED", + "PROPAGATING", + "UNSPECIFIED" + ], + "enumDescriptions": [ + "Operation not tracked in this location e.g. zone is marked as DOWN.", + "Operation has completed successfully.", + "Operation is in an error state.", + "Operation is confirmed to be in the location.", + "Operation is not yet confirmed to have been created in the location.", + "" + ], + "type": "string" + } + }, + "type": "object" + }, "ShareSettings": { "description": "The share setting for reservations and sole tenancy node groups.", "id": "ShareSettings", @@ -69046,6 +69279,33 @@ }, "type": "object" }, + "Status": { + "description": "The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).", + "id": "Status", + "properties": { + "code": { + "description": "The status code, which should be an enum value of google.rpc.Code.", + "format": "int32", + "type": "integer" + }, + "details": { + "description": "A list of messages that carry the error details. There is a common set of message types for APIs to use.", + "items": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.", + "type": "string" + } + }, + "type": "object" + }, "Subnetwork": { "description": "Represents a Subnetwork resource. A subnetwork (also known as a subnet) is a logical partition of a Virtual Private Cloud network with one primary IP range and zero or more secondary IP ranges. For more information, read Virtual Private Cloud (VPC) Network.", "id": "Subnetwork", diff --git a/vendor/google.golang.org/api/compute/v1/compute-gen.go b/vendor/google.golang.org/api/compute/v1/compute-gen.go index 811675da1..ccac09b69 100644 --- a/vendor/google.golang.org/api/compute/v1/compute-gen.go +++ b/vendor/google.golang.org/api/compute/v1/compute-gen.go @@ -1265,6 +1265,55 @@ type ZonesService struct { s *Service } +// AWSV4Signature: Contains the configurations necessary to generate a +// signature for access to private storage buckets that support +// Signature Version 4 for authentication. The service name for +// generating the authentication header will always default to 's3'. +type AWSV4Signature struct { + // AccessKey: The access key used for s3 bucket authentication. Required + // for updating or creating a backend that uses AWS v4 signature + // authentication, but will not be returned as part of the configuration + // when queried with a REST API GET request. @InputOnly + AccessKey string `json:"accessKey,omitempty"` + + // AccessKeyId: The identifier of an access key used for s3 bucket + // authentication. + AccessKeyId string `json:"accessKeyId,omitempty"` + + // AccessKeyVersion: The optional version identifier for the access key. + // You can use this to keep track of different iterations of your access + // key. + AccessKeyVersion string `json:"accessKeyVersion,omitempty"` + + // OriginRegion: The name of the cloud region of your origin. This is a + // free-form field with the name of the region your cloud uses to host + // your origin. For example, "us-east-1" for AWS or "us-ashburn-1" for + // OCI. + OriginRegion string `json:"originRegion,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AccessKey") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AccessKey") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AWSV4Signature) MarshalJSON() ([]byte, error) { + type NoMethod AWSV4Signature + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // AcceleratorConfig: A specification of the type and number of // accelerator cards attached to the instance. type AcceleratorConfig struct { @@ -3331,8 +3380,7 @@ type AttachedDiskInitializeParams struct { // ReplicaZones: Required for each regional disk associated with the // instance. Specify the URLs of the zones where the disk should be // replicated to. You must provide exactly two replica zones, and one - // zone must be the same as the instance zone. You can't use this option - // with boot disks. + // zone must be the same as the instance zone. ReplicaZones []string `json:"replicaZones,omitempty"` // ResourceManagerTags: Resource manager tags to be bound to the disk. @@ -7581,11 +7629,15 @@ func (s *BulkInsertInstanceResource) MarshalJSON() ([]byte, error) { // properties to be set on individual instances. To be extended in the // future. type BulkInsertInstanceResourcePerInstanceProperties struct { + // Hostname: Specifies the hostname of the instance. More details in: + // https://cloud.google.com/compute/docs/instances/custom-hostname-vm#naming_convention + Hostname string `json:"hostname,omitempty"` + // Name: This field is only temporary. It will be removed. Do not use // it. Name string `json:"name,omitempty"` - // ForceSendFields is a list of field names (e.g. "Name") to + // ForceSendFields is a list of field names (e.g. "Hostname") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -7593,8 +7645,8 @@ type BulkInsertInstanceResourcePerInstanceProperties struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Name") to include in API - // requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Hostname") to include in + // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -7879,9 +7931,11 @@ type Commitment struct { // // Possible values: // "ACCELERATOR_OPTIMIZED" + // "ACCELERATOR_OPTIMIZED_A3" // "COMPUTE_OPTIMIZED" // "COMPUTE_OPTIMIZED_C2D" // "COMPUTE_OPTIMIZED_C3" + // "COMPUTE_OPTIMIZED_H3" // "GENERAL_PURPOSE" // "GENERAL_PURPOSE_E2" // "GENERAL_PURPOSE_N2" @@ -13176,7 +13230,8 @@ type ForwardingRule struct { // NoAutomateDnsZone: This is used in PSC consumer ForwardingRule to // control whether it should try to auto-generate a DNS zone or not. - // Non-PSC forwarding rules do not use this field. + // Non-PSC forwarding rules do not use this field. Once set, this field + // is not mutable. NoAutomateDnsZone bool `json:"noAutomateDnsZone,omitempty"` // PortRange: This field can only be used: - If IPProtocol is one of @@ -13287,7 +13342,8 @@ type ForwardingRule struct { // vpc-sc - APIs that support VPC Service Controls. - all-apis - All // supported Google APIs. - For Private Service Connect forwarding rules // that forward traffic to managed services, the target must be a - // service attachment. + // service attachment. The target is not mutable once set as a service + // attachment. Target string `json:"target,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -28394,6 +28450,113 @@ func (s *NamedPort) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// NatIpInfo: Contains NAT IP information of a NAT config (i.e. usage +// status, mode). +type NatIpInfo struct { + // NatIpInfoMappings: A list of all NAT IPs assigned to this NAT config. + NatIpInfoMappings []*NatIpInfoNatIpInfoMapping `json:"natIpInfoMappings,omitempty"` + + // NatName: Name of the NAT config which the NAT IP belongs to. + NatName string `json:"natName,omitempty"` + + // ForceSendFields is a list of field names (e.g. "NatIpInfoMappings") + // to unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NatIpInfoMappings") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *NatIpInfo) MarshalJSON() ([]byte, error) { + type NoMethod NatIpInfo + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// NatIpInfoNatIpInfoMapping: Contains information of a NAT IP. +type NatIpInfoNatIpInfoMapping struct { + // Mode: Specifies whether NAT IP is auto or manual. + // + // Possible values: + // "AUTO" + // "MANUAL" + Mode string `json:"mode,omitempty"` + + // NatIp: NAT IP address. For example: 203.0.113.11. + NatIp string `json:"natIp,omitempty"` + + // Usage: Specifies whether NAT IP is currently serving at least one + // endpoint or not. + // + // Possible values: + // "IN_USE" + // "UNUSED" + Usage string `json:"usage,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Mode") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Mode") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NatIpInfoNatIpInfoMapping) MarshalJSON() ([]byte, error) { + type NoMethod NatIpInfoNatIpInfoMapping + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type NatIpInfoResponse struct { + // Result: [Output Only] A list of NAT IP information. + Result []*NatIpInfo `json:"result,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Result") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Result") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NatIpInfoResponse) MarshalJSON() ([]byte, error) { + type NoMethod NatIpInfoResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Network: Represents a VPC Network resource. Networks connect // resources to each other and to the internet. For more information, // read Virtual Private Cloud (VPC) Network. @@ -34433,7 +34596,7 @@ func (s *NotificationEndpointListWarningData) MarshalJSON() ([]byte, error) { // regional or zonal. - For global operations, use the // `globalOperations` resource. - For regional operations, use the // `regionOperations` resource. - For zonal operations, use the -// `zonalOperations` resource. For more information, read Global, +// `zoneOperations` resource. For more information, read Global, // Regional, and Zonal Resources. type Operation struct { // ClientOperationId: [Output Only] The value of `requestId` if you @@ -34503,6 +34666,11 @@ type Operation struct { // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` + // SetCommonInstanceMetadataOperationMetadata: [Output Only] If the + // operation is for projects.setCommonInstanceMetadata, this field will + // contain information on all underlying zonal actions and their state. + SetCommonInstanceMetadataOperationMetadata *SetCommonInstanceMetadataOperationMetadata `json:"setCommonInstanceMetadataOperationMetadata,omitempty"` + // StartTime: [Output Only] The time that this operation was started by // the server. This value is in RFC3339 text format. StartTime string `json:"startTime,omitempty"` @@ -34530,7 +34698,8 @@ type Operation struct { TargetLink string `json:"targetLink,omitempty"` // User: [Output Only] User who requested the operation, for example: - // `user@example.com`. + // `user@example.com` or `alice_smith_identifier + // (global/workforcePools/example-com-us-employees)`. User string `json:"user,omitempty"` // Warnings: [Output Only] If warning messages are generated during @@ -46269,6 +46438,10 @@ type SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig struct { // "STANDARD" RuleVisibility string `json:"ruleVisibility,omitempty"` + // ThresholdConfigs: Configuration options for layer7 adaptive + // protection for various customizable thresholds. + ThresholdConfigs []*SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfig `json:"thresholdConfigs,omitempty"` + // ForceSendFields is a list of field names (e.g. "Enable") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any @@ -46292,6 +46465,62 @@ func (s *SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig) MarshalJ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfig struct { + AutoDeployConfidenceThreshold float64 `json:"autoDeployConfidenceThreshold,omitempty"` + + AutoDeployExpirationSec int64 `json:"autoDeployExpirationSec,omitempty"` + + AutoDeployImpactedBaselineThreshold float64 `json:"autoDeployImpactedBaselineThreshold,omitempty"` + + AutoDeployLoadThreshold float64 `json:"autoDeployLoadThreshold,omitempty"` + + // Name: The name must be 1-63 characters long, and comply with RFC1035. + // The name must be unique within the security policy. + Name string `json:"name,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "AutoDeployConfidenceThreshold") to unconditionally include in API + // requests. By default, fields with empty or default values are omitted + // from API requests. However, any non-pointer, non-interface field + // appearing in ForceSendFields will be sent to the server regardless of + // whether the field is empty or not. This may be used to include empty + // fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. + // "AutoDeployConfidenceThreshold") to include in API requests with the + // JSON null value. By default, fields with empty values are omitted + // from API requests. However, any field with an empty value appearing + // in NullFields will be sent to the server as null. It is an error if a + // field in this list has a non-empty value. This may be used to include + // null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfig) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +func (s *SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfig) UnmarshalJSON(data []byte) error { + type NoMethod SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfig + var s1 struct { + AutoDeployConfidenceThreshold gensupport.JSONFloat64 `json:"autoDeployConfidenceThreshold"` + AutoDeployImpactedBaselineThreshold gensupport.JSONFloat64 `json:"autoDeployImpactedBaselineThreshold"` + AutoDeployLoadThreshold gensupport.JSONFloat64 `json:"autoDeployLoadThreshold"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.AutoDeployConfidenceThreshold = float64(s1.AutoDeployConfidenceThreshold) + s.AutoDeployImpactedBaselineThreshold = float64(s1.AutoDeployImpactedBaselineThreshold) + s.AutoDeployLoadThreshold = float64(s1.AutoDeployLoadThreshold) + return nil +} + type SecurityPolicyAdvancedOptionsConfig struct { // JsonCustomConfig: Custom configuration to apply the JSON parsing. // Only applicable when json_parsing is set to STANDARD. @@ -47235,6 +47464,12 @@ func (s *SecurityPolicyRuleRedirectOptions) MarshalJSON() ([]byte, error) { // SecuritySettings: The authentication and authorization settings for a // BackendService. type SecuritySettings struct { + // AwsV4Authentication: The configuration needed to generate a signature + // for access to private storage buckets that support AWS's Signature + // Version 4 for authentication. Allowed only for INTERNET_IP_PORT and + // INTERNET_FQDN_PORT NEG backends. + AwsV4Authentication *AWSV4Signature `json:"awsV4Authentication,omitempty"` + // ClientTlsPolicy: Optional. A URL referring to a // networksecurity.ClientTlsPolicy resource that describes how clients // should authenticate with this service's backends. clientTlsPolicy @@ -47258,15 +47493,15 @@ type SecuritySettings struct { // attached clientTlsPolicy with clientCertificate (mTLS mode). SubjectAltNames []string `json:"subjectAltNames,omitempty"` - // ForceSendFields is a list of field names (e.g. "ClientTlsPolicy") to - // unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "AwsV4Authentication") + // to unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be // sent to the server regardless of whether the field is empty or not. // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ClientTlsPolicy") to + // NullFields is a list of field names (e.g. "AwsV4Authentication") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -48188,6 +48423,81 @@ func (s *ServiceAttachmentsScopedListWarningData) MarshalJSON() ([]byte, error) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type SetCommonInstanceMetadataOperationMetadata struct { + // ClientOperationId: [Output Only] The client operation id. + ClientOperationId string `json:"clientOperationId,omitempty"` + + // PerLocationOperations: [Output Only] Status information per location + // (location name is key). Example key: zones/us-central1-a + PerLocationOperations map[string]SetCommonInstanceMetadataOperationMetadataPerLocationOperationInfo `json:"perLocationOperations,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ClientOperationId") + // to unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ClientOperationId") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *SetCommonInstanceMetadataOperationMetadata) MarshalJSON() ([]byte, error) { + type NoMethod SetCommonInstanceMetadataOperationMetadata + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SetCommonInstanceMetadataOperationMetadataPerLocationOperationInfo struct { + // Error: [Output Only] If state is `ABANDONED` or `FAILED`, this field + // is populated. + Error *Status `json:"error,omitempty"` + + // State: [Output Only] Status of the action, which can be one of the + // following: `PROPAGATING`, `PROPAGATED`, `ABANDONED`, `FAILED`, or + // `DONE`. + // + // Possible values: + // "ABANDONED" - Operation not tracked in this location e.g. zone is + // marked as DOWN. + // "DONE" - Operation has completed successfully. + // "FAILED" - Operation is in an error state. + // "PROPAGATED" - Operation is confirmed to be in the location. + // "PROPAGATING" - Operation is not yet confirmed to have been created + // in the location. + // "UNSPECIFIED" + State string `json:"state,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Error") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Error") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SetCommonInstanceMetadataOperationMetadataPerLocationOperationInfo) MarshalJSON() ([]byte, error) { + type NoMethod SetCommonInstanceMetadataOperationMetadataPerLocationOperationInfo + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // ShareSettings: The share setting for reservations and sole tenancy // node groups. type ShareSettings struct { @@ -50729,6 +51039,50 @@ func (s *StatefulPolicyPreservedStateDiskDevice) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// Status: The `Status` type defines a logical error model that is +// suitable for different programming environments, including REST APIs +// and RPC APIs. It is used by gRPC (https://github.com/grpc). Each +// `Status` message contains three pieces of data: error code, error +// message, and error details. You can find out more about this error +// model and how to work with it in the API Design Guide +// (https://cloud.google.com/apis/design/errors). +type Status struct { + // Code: The status code, which should be an enum value of + // google.rpc.Code. + Code int64 `json:"code,omitempty"` + + // Details: A list of messages that carry the error details. There is a + // common set of message types for APIs to use. + Details []googleapi.RawMessage `json:"details,omitempty"` + + // Message: A developer-facing error message, which should be in + // English. Any user-facing error message should be localized and sent + // in the google.rpc.Status.details field, or localized by the client. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Status) MarshalJSON() ([]byte, error) { + type NoMethod Status + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Subnetwork: Represents a Subnetwork resource. A subnetwork (also // known as a subnet) is a logical partition of a Virtual Private Cloud // network with one primary IP range and zero or more secondary IP @@ -171967,6 +172321,191 @@ func (c *RoutersGetCall) Do(opts ...googleapi.CallOption) (*Router, error) { } +// method id "compute.routers.getNatIpInfo": + +type RoutersGetNatIpInfoCall struct { + s *Service + project string + region string + router string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetNatIpInfo: Retrieves runtime NAT IP information. +// +// - project: Project ID for this request. +// - region: Name of the region for this request. +// - router: Name of the Router resource to query for Nat IP +// information. The name should conform to RFC1035. +func (r *RoutersService) GetNatIpInfo(project string, region string, router string) *RoutersGetNatIpInfoCall { + c := &RoutersGetNatIpInfoCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.router = router + return c +} + +// NatName sets the optional parameter "natName": Name of the nat +// service to filter the NAT IP information. If it is omitted, all nats +// for this router will be returned. Name should conform to RFC1035. +func (c *RoutersGetNatIpInfoCall) NatName(natName string) *RoutersGetNatIpInfoCall { + c.urlParams_.Set("natName", natName) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RoutersGetNatIpInfoCall) Fields(s ...googleapi.Field) *RoutersGetNatIpInfoCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RoutersGetNatIpInfoCall) IfNoneMatch(entityTag string) *RoutersGetNatIpInfoCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RoutersGetNatIpInfoCall) Context(ctx context.Context) *RoutersGetNatIpInfoCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RoutersGetNatIpInfoCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RoutersGetNatIpInfoCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/routers/{router}/getNatIpInfo") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "router": c.router, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.routers.getNatIpInfo" call. +// Exactly one of *NatIpInfoResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *NatIpInfoResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RoutersGetNatIpInfoCall) Do(opts ...googleapi.CallOption) (*NatIpInfoResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &NatIpInfoResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves runtime NAT IP information.", + // "flatPath": "projects/{project}/regions/{region}/routers/{router}/getNatIpInfo", + // "httpMethod": "GET", + // "id": "compute.routers.getNatIpInfo", + // "parameterOrder": [ + // "project", + // "region", + // "router" + // ], + // "parameters": { + // "natName": { + // "description": "Name of the nat service to filter the NAT IP information. If it is omitted, all nats for this router will be returned. Name should conform to RFC1035.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "router": { + // "description": "Name of the Router resource to query for Nat IP information. The name should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/routers/{router}/getNatIpInfo", + // "response": { + // "$ref": "NatIpInfoResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + // method id "compute.routers.getNatMappingInfo": type RoutersGetNatMappingInfoCall struct { diff --git a/vendor/google.golang.org/api/internal/cba.go b/vendor/google.golang.org/api/internal/cba.go index cecbb9ba1..6923d3a71 100644 --- a/vendor/google.golang.org/api/internal/cba.go +++ b/vendor/google.golang.org/api/internal/cba.go @@ -91,16 +91,10 @@ func getTransportConfig(settings *DialSettings) (*transportConfig, error) { s2aMTLSEndpoint: "", } - // Check the env to determine whether to use S2A. - if !isGoogleS2AEnabled() { + if !shouldUseS2A(clientCertSource, settings) { return &defaultTransportConfig, nil } - // If client cert is found, use that over S2A. - // If MTLS is not enabled for the endpoint, skip S2A. - if clientCertSource != nil || !mtlsEndpointEnabledForS2A() { - return &defaultTransportConfig, nil - } s2aMTLSEndpoint := settings.DefaultMTLSEndpoint // If there is endpoint override, honor it. if settings.Endpoint != "" { @@ -118,10 +112,6 @@ func getTransportConfig(settings *DialSettings) (*transportConfig, error) { }, nil } -func isGoogleS2AEnabled() bool { - return strings.ToLower(os.Getenv(googleAPIUseS2AEnv)) == "true" -} - // getClientCertificateSource returns a default client certificate source, if // not provided by the user. // @@ -275,8 +265,36 @@ func GetHTTPTransportConfigAndEndpoint(settings *DialSettings) (cert.Source, fun return nil, dialTLSContextFunc, config.s2aMTLSEndpoint, nil } +func shouldUseS2A(clientCertSource cert.Source, settings *DialSettings) bool { + // If client cert is found, use that over S2A. + if clientCertSource != nil { + return false + } + // If EXPERIMENTAL_GOOGLE_API_USE_S2A is not set to true, skip S2A. + if !isGoogleS2AEnabled() { + return false + } + // If DefaultMTLSEndpoint is not set, skip S2A. + if settings.DefaultMTLSEndpoint == "" { + return false + } + // If MTLS is not enabled for this endpoint, skip S2A. + if !mtlsEndpointEnabledForS2A() { + return false + } + // If custom HTTP client is provided, skip S2A. + if settings.HTTPClient != nil { + return false + } + return true +} + // mtlsEndpointEnabledForS2A checks if the endpoint is indeed MTLS-enabled, so that we can use S2A for MTLS connection. var mtlsEndpointEnabledForS2A = func() bool { // TODO(xmenxk): determine this via discovery config. return true } + +func isGoogleS2AEnabled() bool { + return strings.ToLower(os.Getenv(googleAPIUseS2AEnv)) == "true" +} diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go index 6fdda3b79..1a2b858c8 100644 --- a/vendor/google.golang.org/api/internal/version.go +++ b/vendor/google.golang.org/api/internal/version.go @@ -5,4 +5,4 @@ package internal // Version is the current tagged release of the library. -const Version = "0.134.0" +const Version = "0.136.0" diff --git a/vendor/modules.txt b/vendor/modules.txt index 22ff6cb4a..ca0b81cd4 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,4 +1,4 @@ -# cloud.google.com/go/compute v1.20.1 +# cloud.google.com/go/compute v1.23.0 ## explicit; go 1.19 cloud.google.com/go/compute/internal # cloud.google.com/go/compute/metadata v0.2.4-0.20230617002413-005d2dfb6b68 @@ -19,7 +19,7 @@ github.com/Azure/go-autorest ## explicit; go 1.15 github.com/Azure/go-autorest/autorest github.com/Azure/go-autorest/autorest/azure -# github.com/Azure/go-autorest/autorest/adal v0.9.22 +# github.com/Azure/go-autorest/autorest/adal v0.9.23 ## explicit; go 1.15 github.com/Azure/go-autorest/autorest/adal # github.com/Azure/go-autorest/autorest/date v0.3.0 @@ -40,10 +40,13 @@ github.com/Azure/go-autorest/tracing # github.com/DataDog/agent-payload/v5 v5.0.89 ## explicit; go 1.18 github.com/DataDog/agent-payload/v5/gogen -# github.com/DataDog/datadog-agent/pkg/obfuscate v0.47.0-rc.3 +# github.com/DataDog/datadog-agent/pkg/obfuscate v0.48.0-beta.1 ## explicit; go 1.12 github.com/DataDog/datadog-agent/pkg/obfuscate -# github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.47.0-rc.3 +# github.com/DataDog/datadog-agent/pkg/proto v0.48.0-beta.1 +## explicit; go 1.19 +github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace +# github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.48.0-beta.1 ## explicit; go 1.18 github.com/DataDog/datadog-agent/pkg/remoteconfig/state github.com/DataDog/datadog-agent/pkg/remoteconfig/state/products/apmsampling @@ -68,16 +71,16 @@ github.com/DataDog/datadog-agent/pkg/trace/telemetry github.com/DataDog/datadog-agent/pkg/trace/traceutil github.com/DataDog/datadog-agent/pkg/trace/watchdog github.com/DataDog/datadog-agent/pkg/trace/writer -# github.com/DataDog/datadog-agent/pkg/util/cgroups v0.47.0-rc.3 +# github.com/DataDog/datadog-agent/pkg/util/cgroups v0.48.0-beta.1 ## explicit; go 1.18 github.com/DataDog/datadog-agent/pkg/util/cgroups -# github.com/DataDog/datadog-agent/pkg/util/log v0.47.0-rc.3 +# github.com/DataDog/datadog-agent/pkg/util/log v0.48.0-beta.1 ## explicit; go 1.18 github.com/DataDog/datadog-agent/pkg/util/log -# github.com/DataDog/datadog-agent/pkg/util/pointer v0.47.0-rc.3 +# github.com/DataDog/datadog-agent/pkg/util/pointer v0.48.0-beta.1 ## explicit; go 1.18 github.com/DataDog/datadog-agent/pkg/util/pointer -# github.com/DataDog/datadog-agent/pkg/util/scrubber v0.47.0-rc.3 +# github.com/DataDog/datadog-agent/pkg/util/scrubber v0.48.0-beta.1 ## explicit; go 1.18 github.com/DataDog/datadog-agent/pkg/util/scrubber # github.com/DataDog/datadog-api-client-go/v2 v2.14.0 @@ -89,8 +92,8 @@ github.com/DataDog/datadog-api-client-go/v2/api/datadogV2 # github.com/DataDog/datadog-go/v5 v5.1.1 ## explicit; go 1.13 github.com/DataDog/datadog-go/v5/statsd -# github.com/DataDog/go-tuf v0.3.0--fix-localmeta-fork -## explicit; go 1.16 +# github.com/DataDog/go-tuf v1.0.1-0.5.2 +## explicit; go 1.18 github.com/DataDog/go-tuf/client github.com/DataDog/go-tuf/data github.com/DataDog/go-tuf/internal/roles @@ -109,29 +112,29 @@ github.com/DataDog/gohai/platform github.com/DataDog/gohai/processes github.com/DataDog/gohai/processes/gops github.com/DataDog/gohai/utils -# github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.5.2 +# github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.7.0 ## explicit; go 1.19 github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata/gohai github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata/internal/hostmap github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata/payload -# github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.5.2 +# github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.7.0 ## explicit; go 1.19 github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes/azure github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes/ec2 github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes/gcp github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes/source -# github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.5.2 +# github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.7.0 ## explicit; go 1.19 github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs -# github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.5.2 +# github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.7.0 ## explicit; go 1.19 github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics/internal/instrumentationlibrary github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics/internal/instrumentationscope github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics/internal/utils -# github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.5.2 +# github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.7.0 ## explicit; go 1.19 github.com/DataDog/opentelemetry-mapping-go/pkg/quantile github.com/DataDog/opentelemetry-mapping-go/pkg/quantile/summary @@ -149,6 +152,9 @@ github.com/DataDog/zstd # github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.18.0 ## explicit; go 1.20 github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp +# github.com/IBM/sarama v1.40.1 +## explicit; go 1.17 +github.com/IBM/sarama # github.com/Microsoft/go-winio v0.6.1 ## explicit; go 1.17 github.com/Microsoft/go-winio @@ -156,9 +162,6 @@ github.com/Microsoft/go-winio/internal/fs github.com/Microsoft/go-winio/internal/socket github.com/Microsoft/go-winio/internal/stringbuffer github.com/Microsoft/go-winio/pkg/guid -# github.com/Shopify/sarama v1.38.1 -## explicit; go 1.17 -github.com/Shopify/sarama # github.com/Showmax/go-fqdn v1.0.0 ## explicit; go 1.15 github.com/Showmax/go-fqdn @@ -169,7 +172,7 @@ github.com/alecthomas/participle/v2/lexer # github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 ## explicit; go 1.15 github.com/alecthomas/units -# github.com/antonmedv/expr v1.12.5 +# github.com/antonmedv/expr v1.13.0 ## explicit; go 1.13 github.com/antonmedv/expr github.com/antonmedv/expr/ast @@ -189,7 +192,7 @@ github.com/apache/thrift/lib/go/thrift # github.com/armon/go-metrics v0.4.1 ## explicit; go 1.12 github.com/armon/go-metrics -# github.com/aws/aws-sdk-go v1.44.320 +# github.com/aws/aws-sdk-go v1.44.323 ## explicit; go 1.11 github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/arn @@ -414,7 +417,7 @@ github.com/davecgh/go-spew/spew # github.com/dennwc/varint v1.0.0 ## explicit; go 1.12 github.com/dennwc/varint -# github.com/digitalocean/godo v1.97.0 +# github.com/digitalocean/godo v1.98.0 ## explicit; go 1.18 github.com/digitalocean/godo github.com/digitalocean/godo/metrics @@ -681,7 +684,7 @@ github.com/googleapis/gax-go/v2/apierror github.com/googleapis/gax-go/v2/apierror/internal/proto github.com/googleapis/gax-go/v2/callctx github.com/googleapis/gax-go/v2/internal -# github.com/gophercloud/gophercloud v1.2.0 +# github.com/gophercloud/gophercloud v1.3.0 ## explicit; go 1.14 github.com/gophercloud/gophercloud github.com/gophercloud/gophercloud/openstack @@ -705,12 +708,12 @@ github.com/gorilla/websocket ## explicit; go 1.17 github.com/grafana/regexp github.com/grafana/regexp/syntax -# github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 +# github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.2 ## explicit; go 1.17 github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule github.com/grpc-ecosystem/grpc-gateway/v2/runtime github.com/grpc-ecosystem/grpc-gateway/v2/utilities -# github.com/hashicorp/consul/api v1.23.0 +# github.com/hashicorp/consul/api v1.24.0 ## explicit; go 1.19 github.com/hashicorp/consul/api # github.com/hashicorp/cronexpr v1.1.1 @@ -740,17 +743,17 @@ github.com/hashicorp/go-rootcerts # github.com/hashicorp/go-uuid v1.0.3 ## explicit github.com/hashicorp/go-uuid -# github.com/hashicorp/golang-lru v0.6.0 +# github.com/hashicorp/golang-lru v1.0.2 ## explicit; go 1.12 github.com/hashicorp/golang-lru/simplelru -# github.com/hashicorp/nomad/api v0.0.0-20230308192510-48e7d70fcd4b +# github.com/hashicorp/nomad/api v0.0.0-20230418003350-3067191c5197 ## explicit; go 1.19 github.com/hashicorp/nomad/api github.com/hashicorp/nomad/api/contexts # github.com/hashicorp/serf v0.10.1 ## explicit; go 1.12 github.com/hashicorp/serf/coordinate -# github.com/hetznercloud/hcloud-go v1.41.0 +# github.com/hetznercloud/hcloud-go v1.42.0 ## explicit; go 1.19 github.com/hetznercloud/hcloud-go/hcloud github.com/hetznercloud/hcloud-go/hcloud/internal/instrumentation @@ -764,8 +767,8 @@ github.com/imdario/mergo # github.com/inconshreveable/mousetrap v1.1.0 ## explicit; go 1.18 github.com/inconshreveable/mousetrap -# github.com/ionos-cloud/sdk-go/v6 v6.1.4 -## explicit; go 1.17 +# github.com/ionos-cloud/sdk-go/v6 v6.1.6 +## explicit; go 1.19 github.com/ionos-cloud/sdk-go/v6 # github.com/jaegertracing/jaeger v1.41.0 ## explicit; go 1.19 @@ -885,7 +888,7 @@ github.com/lightstep/go-expohisto/mapping/exponent github.com/lightstep/go-expohisto/mapping/internal github.com/lightstep/go-expohisto/mapping/logarithm github.com/lightstep/go-expohisto/structure -# github.com/linode/linodego v1.14.1 +# github.com/linode/linodego v1.16.1 ## explicit; go 1.18 github.com/linode/linodego github.com/linode/linodego/internal/duration @@ -908,8 +911,8 @@ github.com/mattn/go-isatty # github.com/matttproud/golang_protobuf_extensions v1.0.4 ## explicit; go 1.9 github.com/matttproud/golang_protobuf_extensions/pbutil -# github.com/miekg/dns v1.1.51 -## explicit; go 1.14 +# github.com/miekg/dns v1.1.53 +## explicit; go 1.19 github.com/miekg/dns # github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible ## explicit @@ -953,20 +956,20 @@ github.com/munnerz/goautoneg # github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f ## explicit github.com/mwitkow/go-conntrack -# github.com/open-telemetry/opentelemetry-collector-contrib/confmap/provider/s3provider v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/confmap/provider/s3provider v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/confmap/provider/s3provider -# github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter/internal/metadata -# github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter/internal/metadata github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter/internal/translator -# github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/clientutil github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/hostmetadata @@ -984,43 +987,43 @@ github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexport github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/metrics github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/metrics/sketches github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/scrub -# github.com/open-telemetry/opentelemetry-collector-contrib/exporter/dynatraceexporter v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/exporter/dynatraceexporter v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/exporter/dynatraceexporter github.com/open-telemetry/opentelemetry-collector-contrib/exporter/dynatraceexporter/config github.com/open-telemetry/opentelemetry-collector-contrib/exporter/dynatraceexporter/internal/metadata github.com/open-telemetry/opentelemetry-collector-contrib/exporter/dynatraceexporter/internal/serialization -# github.com/open-telemetry/opentelemetry-collector-contrib/exporter/fileexporter v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/exporter/fileexporter v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/exporter/fileexporter github.com/open-telemetry/opentelemetry-collector-contrib/exporter/fileexporter/internal/metadata -# github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/internal/awsmsk github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/internal/metadata -# github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter/internal/metadata -# github.com/open-telemetry/opentelemetry-collector-contrib/exporter/logzioexporter v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/exporter/logzioexporter v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/exporter/logzioexporter github.com/open-telemetry/opentelemetry-collector-contrib/exporter/logzioexporter/internal/metadata -# github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter/internal/metadata -# github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/internal/metadata -# github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter/internal/metadata -# github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter/internal/correlation github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter/internal/dimensions @@ -1028,62 +1031,62 @@ github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexpor github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter/internal/metadata github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter/internal/translation github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter/internal/translation/dpfilters -# github.com/open-telemetry/opentelemetry-collector-contrib/extension/awsproxy v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/extension/awsproxy v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/extension/awsproxy github.com/open-telemetry/opentelemetry-collector-contrib/extension/awsproxy/internal/metadata -# github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension/internal/metadata -# github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver/internal/errctx github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver/internal/metadata -# github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension/internal/metadata -# github.com/open-telemetry/opentelemetry-collector-contrib/extension/sigv4authextension v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/extension/sigv4authextension v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/extension/sigv4authextension github.com/open-telemetry/opentelemetry-collector-contrib/extension/sigv4authextension/internal/metadata -# github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/awsutil v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/awsutil v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/awsutil -# github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/containerinsight v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/containerinsight v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/containerinsight -# github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/cwlogs v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/cwlogs v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/cwlogs github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/cwlogs/handler -# github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil/endpoints -# github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/k8s v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/k8s v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/k8s/k8sclient github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/k8s/k8sutil -# github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/metrics v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/metrics v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/metrics -# github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/proxy v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/proxy v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/proxy -# github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray/telemetry -# github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/docker github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/sanitize github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/ttlmap -# github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/attraction github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/idutils github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/occonventions @@ -1092,8 +1095,8 @@ github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/ github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/timeutils/internal/ctimefmt github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/tracetranslator github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/traceutil -# github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/expr github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterconfig github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterexpr @@ -1105,14 +1108,14 @@ github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filter github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset/regexp github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset/strict github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterspan -# github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig -# github.com/open-telemetry/opentelemetry-collector-contrib/internal/kubelet v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/internal/kubelet v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/internal/kubelet -# github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders/aws/ec2 github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders/azure github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders/consul @@ -1120,23 +1123,23 @@ github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataprovi github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders/internal github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders/openshift github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders/system -# github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent -# github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk -# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchperresourceattr v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchperresourceattr v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchperresourceattr -# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal -# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experimentalmetricmetadata v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experimentalmetricmetadata v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experimentalmetricmetadata -# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottldatapoint @@ -1147,70 +1150,70 @@ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottl github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspanevent github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/internal/ottlcommon github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs -# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil -# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry -# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger -# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus -# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite -# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/signalfx v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/signalfx v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/signalfx -# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin/internal/zipkin github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin/zipkinv1 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin/zipkinv2 -# github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor/internal/metadata -# github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor/internal/metadata github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor/internal/tracking -# github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatorateprocessor v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatorateprocessor v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatorateprocessor github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatorateprocessor/internal/metadata -# github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/internal/metadata -# github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbytraceprocessor v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbytraceprocessor v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbytraceprocessor github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbytraceprocessor/internal/metadata -# github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor/internal/kube github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor/internal/metadata github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor/internal/observability -# github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricsgenerationprocessor v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricsgenerationprocessor v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricsgenerationprocessor github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricsgenerationprocessor/internal/metadata -# github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor/internal/metadata -# github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor/internal/metadata -# github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal/aws/ec2 @@ -1241,22 +1244,22 @@ github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedete github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal/openshift/internal/metadata github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal/system github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal/system/internal/metadata -# github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor/internal/metadata -# github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanprocessor v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanprocessor v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanprocessor github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanprocessor/internal/metadata -# github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/idbatcher github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/metadata github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/sampling -# github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver/internal/cadvisor github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver/internal/cadvisor/extractors @@ -1266,13 +1269,13 @@ github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontaineri github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver/internal/metadata github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver/internal/stores github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver/internal/stores/kubeletutil -# github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsecscontainermetricsreceiver v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsecscontainermetricsreceiver v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsecscontainermetricsreceiver github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsecscontainermetricsreceiver/internal/awsecscontainermetrics github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsecscontainermetricsreceiver/internal/metadata -# github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsxrayreceiver github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsxrayreceiver/internal/errors github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsxrayreceiver/internal/metadata @@ -1280,34 +1283,34 @@ github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsxrayreceiv github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsxrayreceiver/internal/tracesegment github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsxrayreceiver/internal/translator github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsxrayreceiver/internal/udppoller -# github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/internal/metadata -# github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/internal/metadata -# github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver/internal github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver/internal/metadata -# github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver v0.82.0 -## explicit; go 1.19 +# github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/internal/metadata -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/protocol -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/transport -# github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.82.0 -## explicit; go 1.19 +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/internal/protocol +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/internal/transport +# github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.83.0 +## explicit; go 1.20 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/internal/metadata # github.com/opencontainers/go-digest v1.0.0 ## explicit; go 1.13 github.com/opencontainers/go-digest -# github.com/opencontainers/image-spec v1.1.0-rc2 -## explicit; go 1.17 +# github.com/opencontainers/image-spec v1.1.0-rc4 +## explicit; go 1.18 github.com/opencontainers/image-spec/specs-go github.com/opencontainers/image-spec/specs-go/v1 # github.com/opencontainers/runc v1.1.8 @@ -1337,7 +1340,7 @@ github.com/opencontainers/runc/libcontainer/user github.com/opencontainers/runc/libcontainer/userns github.com/opencontainers/runc/libcontainer/utils github.com/opencontainers/runc/types -# github.com/opencontainers/runtime-spec v1.0.3-0.20220909204839-494a5a6aca78 +# github.com/opencontainers/runtime-spec v1.1.0-rc.3 ## explicit github.com/opencontainers/runtime-spec/specs-go # github.com/opencontainers/selinux v1.10.0 @@ -1359,7 +1362,7 @@ github.com/openshift/client-go/quota/clientset/versioned/typed/quota/v1 github.com/opentracing/opentracing-go github.com/opentracing/opentracing-go/ext github.com/opentracing/opentracing-go/log -# github.com/openzipkin/zipkin-go v0.4.1 +# github.com/openzipkin/zipkin-go v0.4.2 ## explicit; go 1.18 github.com/openzipkin/zipkin-go/model github.com/openzipkin/zipkin-go/proto/zipkin_proto3 @@ -1369,8 +1372,8 @@ github.com/openzipkin/zipkin-go/reporter github.com/outcaste-io/ristretto github.com/outcaste-io/ristretto/z github.com/outcaste-io/ristretto/z/simd -# github.com/ovh/go-ovh v1.3.0 -## explicit; go 1.12 +# github.com/ovh/go-ovh v1.4.1 +## explicit; go 1.18 github.com/ovh/go-ovh/ovh # github.com/patrickmn/go-cache v2.1.0+incompatible ## explicit @@ -1419,8 +1422,8 @@ github.com/prometheus/common/sigv4 github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/prometheus/prometheus v0.43.1 -## explicit; go 1.18 +# github.com/prometheus/prometheus v0.44.0 +## explicit; go 1.19 github.com/prometheus/prometheus/config github.com/prometheus/prometheus/discovery github.com/prometheus/prometheus/discovery/aws @@ -1484,7 +1487,7 @@ github.com/rcrowley/go-metrics # github.com/rs/cors v1.9.0 ## explicit; go 1.13 github.com/rs/cors -# github.com/scaleway/scaleway-sdk-go v1.0.0-beta.14 +# github.com/scaleway/scaleway-sdk-go v1.0.0-beta.15 ## explicit; go 1.17 github.com/scaleway/scaleway-sdk-go/api/baremetal/v1 github.com/scaleway/scaleway-sdk-go/api/instance/v1 @@ -1502,10 +1505,10 @@ github.com/scaleway/scaleway-sdk-go/validation # github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646 ## explicit; go 1.14 github.com/seccomp/libseccomp-golang -# github.com/secure-systems-lab/go-securesystemslib v0.5.0 -## explicit; go 1.17 +# github.com/secure-systems-lab/go-securesystemslib v0.7.0 +## explicit; go 1.20 github.com/secure-systems-lab/go-securesystemslib/cjson -# github.com/shirou/gopsutil/v3 v3.23.6 +# github.com/shirou/gopsutil/v3 v3.23.7 ## explicit; go 1.15 github.com/shirou/gopsutil/v3/common github.com/shirou/gopsutil/v3/cpu @@ -1652,8 +1655,8 @@ go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/propagation go.opencensus.io/trace/tracestate -# go.opentelemetry.io/collector v0.82.0 -## explicit; go 1.19 +# go.opentelemetry.io/collector v0.83.0 +## explicit; go 1.20 go.opentelemetry.io/collector/client go.opentelemetry.io/collector/internal/cgroups go.opentelemetry.io/collector/internal/fanoutconsumer @@ -1674,38 +1677,38 @@ go.opentelemetry.io/collector/service/internal/proctelemetry go.opentelemetry.io/collector/service/internal/zpages go.opentelemetry.io/collector/service/pipelines go.opentelemetry.io/collector/service/telemetry -# go.opentelemetry.io/collector/component v0.82.0 -## explicit; go 1.19 +# go.opentelemetry.io/collector/component v0.83.0 +## explicit; go 1.20 go.opentelemetry.io/collector/component -# go.opentelemetry.io/collector/config/configauth v0.82.0 -## explicit; go 1.19 +# go.opentelemetry.io/collector/config/configauth v0.83.0 +## explicit; go 1.20 go.opentelemetry.io/collector/config/configauth -# go.opentelemetry.io/collector/config/configcompression v0.82.0 -## explicit; go 1.19 +# go.opentelemetry.io/collector/config/configcompression v0.83.0 +## explicit; go 1.20 go.opentelemetry.io/collector/config/configcompression -# go.opentelemetry.io/collector/config/configgrpc v0.82.0 -## explicit; go 1.19 +# go.opentelemetry.io/collector/config/configgrpc v0.83.0 +## explicit; go 1.20 go.opentelemetry.io/collector/config/configgrpc -# go.opentelemetry.io/collector/config/confighttp v0.82.0 -## explicit; go 1.19 +# go.opentelemetry.io/collector/config/confighttp v0.83.0 +## explicit; go 1.20 go.opentelemetry.io/collector/config/confighttp -# go.opentelemetry.io/collector/config/confignet v0.82.0 -## explicit; go 1.19 +# go.opentelemetry.io/collector/config/confignet v0.83.0 +## explicit; go 1.20 go.opentelemetry.io/collector/config/confignet -# go.opentelemetry.io/collector/config/configopaque v0.82.0 -## explicit; go 1.19 +# go.opentelemetry.io/collector/config/configopaque v0.83.0 +## explicit; go 1.20 go.opentelemetry.io/collector/config/configopaque -# go.opentelemetry.io/collector/config/configtelemetry v0.82.0 -## explicit; go 1.19 +# go.opentelemetry.io/collector/config/configtelemetry v0.83.0 +## explicit; go 1.20 go.opentelemetry.io/collector/config/configtelemetry -# go.opentelemetry.io/collector/config/configtls v0.82.0 -## explicit; go 1.19 +# go.opentelemetry.io/collector/config/configtls v0.83.0 +## explicit; go 1.20 go.opentelemetry.io/collector/config/configtls -# go.opentelemetry.io/collector/config/internal v0.82.0 -## explicit; go 1.19 +# go.opentelemetry.io/collector/config/internal v0.83.0 +## explicit; go 1.20 go.opentelemetry.io/collector/config/internal -# go.opentelemetry.io/collector/confmap v0.82.0 -## explicit; go 1.19 +# go.opentelemetry.io/collector/confmap v0.83.0 +## explicit; go 1.20 go.opentelemetry.io/collector/confmap go.opentelemetry.io/collector/confmap/converter/expandconverter go.opentelemetry.io/collector/confmap/internal/mapstructure @@ -1716,40 +1719,40 @@ go.opentelemetry.io/collector/confmap/provider/httpsprovider go.opentelemetry.io/collector/confmap/provider/internal go.opentelemetry.io/collector/confmap/provider/internal/configurablehttpprovider go.opentelemetry.io/collector/confmap/provider/yamlprovider -# go.opentelemetry.io/collector/connector v0.82.0 -## explicit; go 1.19 +# go.opentelemetry.io/collector/connector v0.83.0 +## explicit; go 1.20 go.opentelemetry.io/collector/connector -# go.opentelemetry.io/collector/consumer v0.82.0 -## explicit; go 1.19 +# go.opentelemetry.io/collector/consumer v0.83.0 +## explicit; go 1.20 go.opentelemetry.io/collector/consumer go.opentelemetry.io/collector/consumer/consumererror -# go.opentelemetry.io/collector/exporter v0.82.0 -## explicit; go 1.19 +# go.opentelemetry.io/collector/exporter v0.83.0 +## explicit; go 1.20 go.opentelemetry.io/collector/exporter go.opentelemetry.io/collector/exporter/exporterhelper go.opentelemetry.io/collector/exporter/exporterhelper/internal -# go.opentelemetry.io/collector/exporter/loggingexporter v0.82.0 -## explicit; go 1.19 +# go.opentelemetry.io/collector/exporter/loggingexporter v0.83.0 +## explicit; go 1.20 go.opentelemetry.io/collector/exporter/loggingexporter go.opentelemetry.io/collector/exporter/loggingexporter/internal/otlptext -# go.opentelemetry.io/collector/exporter/otlpexporter v0.82.0 -## explicit; go 1.19 +# go.opentelemetry.io/collector/exporter/otlpexporter v0.83.0 +## explicit; go 1.20 go.opentelemetry.io/collector/exporter/otlpexporter -# go.opentelemetry.io/collector/exporter/otlphttpexporter v0.82.0 -## explicit; go 1.19 +# go.opentelemetry.io/collector/exporter/otlphttpexporter v0.83.0 +## explicit; go 1.20 go.opentelemetry.io/collector/exporter/otlphttpexporter -# go.opentelemetry.io/collector/extension v0.82.0 -## explicit; go 1.19 +# go.opentelemetry.io/collector/extension v0.83.0 +## explicit; go 1.20 go.opentelemetry.io/collector/extension go.opentelemetry.io/collector/extension/experimental/storage -# go.opentelemetry.io/collector/extension/auth v0.82.0 -## explicit; go 1.19 +# go.opentelemetry.io/collector/extension/auth v0.83.0 +## explicit; go 1.20 go.opentelemetry.io/collector/extension/auth -# go.opentelemetry.io/collector/extension/ballastextension v0.82.0 -## explicit; go 1.19 +# go.opentelemetry.io/collector/extension/ballastextension v0.83.0 +## explicit; go 1.20 go.opentelemetry.io/collector/extension/ballastextension -# go.opentelemetry.io/collector/extension/zpagesextension v0.82.0 -## explicit; go 1.19 +# go.opentelemetry.io/collector/extension/zpagesextension v0.83.0 +## explicit; go 1.20 go.opentelemetry.io/collector/extension/zpagesextension # go.opentelemetry.io/collector/featuregate v1.0.0-rcv0014 ## explicit; go 1.19 @@ -1775,28 +1778,28 @@ go.opentelemetry.io/collector/pdata/pmetric go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp go.opentelemetry.io/collector/pdata/ptrace go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp -# go.opentelemetry.io/collector/processor v0.82.0 -## explicit; go 1.19 +# go.opentelemetry.io/collector/processor v0.83.0 +## explicit; go 1.20 go.opentelemetry.io/collector/processor go.opentelemetry.io/collector/processor/processorhelper -# go.opentelemetry.io/collector/processor/batchprocessor v0.82.0 -## explicit; go 1.19 +# go.opentelemetry.io/collector/processor/batchprocessor v0.83.0 +## explicit; go 1.20 go.opentelemetry.io/collector/processor/batchprocessor -# go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.82.0 -## explicit; go 1.19 +# go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.83.0 +## explicit; go 1.20 go.opentelemetry.io/collector/processor/memorylimiterprocessor -# go.opentelemetry.io/collector/receiver v0.82.0 -## explicit; go 1.19 +# go.opentelemetry.io/collector/receiver v0.83.0 +## explicit; go 1.20 go.opentelemetry.io/collector/receiver go.opentelemetry.io/collector/receiver/scrapererror -# go.opentelemetry.io/collector/receiver/otlpreceiver v0.82.0 -## explicit; go 1.19 +# go.opentelemetry.io/collector/receiver/otlpreceiver v0.83.0 +## explicit; go 1.20 go.opentelemetry.io/collector/receiver/otlpreceiver go.opentelemetry.io/collector/receiver/otlpreceiver/internal/logs go.opentelemetry.io/collector/receiver/otlpreceiver/internal/metrics go.opentelemetry.io/collector/receiver/otlpreceiver/internal/trace -# go.opentelemetry.io/collector/semconv v0.82.0 -## explicit; go 1.19 +# go.opentelemetry.io/collector/semconv v0.83.0 +## explicit; go 1.20 go.opentelemetry.io/collector/semconv/v1.16.0 go.opentelemetry.io/collector/semconv/v1.17.0 go.opentelemetry.io/collector/semconv/v1.18.0 @@ -1953,8 +1956,8 @@ golang.org/x/net/ipv6 golang.org/x/net/proxy golang.org/x/net/publicsuffix golang.org/x/net/trace -# golang.org/x/oauth2 v0.10.0 -## explicit; go 1.17 +# golang.org/x/oauth2 v0.11.0 +## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/authhandler golang.org/x/oauth2/clientcredentials @@ -2045,7 +2048,7 @@ gonum.org/v1/gonum/lapack/gonum gonum.org/v1/gonum/lapack/lapack64 gonum.org/v1/gonum/mat gonum.org/v1/gonum/stat -# google.golang.org/api v0.134.0 +# google.golang.org/api v0.136.0 ## explicit; go 1.19 google.golang.org/api/compute/v1 google.golang.org/api/googleapi @@ -2073,15 +2076,15 @@ google.golang.org/appengine/internal/socket google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/socket google.golang.org/appengine/urlfetch -# google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130 +# google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 ## explicit; go 1.19 google.golang.org/genproto/internal -# google.golang.org/genproto/googleapis/api v0.0.0-20230706204954-ccb25ca9f130 +# google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5 ## explicit; go 1.19 google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/api/httpbody -# google.golang.org/genproto/googleapis/rpc v0.0.0-20230720185612-659f7aaaa771 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20230807174057-1744710a1577 ## explicit; go 1.19 google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/errdetails @@ -2470,7 +2473,7 @@ k8s.io/utils/net k8s.io/utils/pointer k8s.io/utils/strings/slices k8s.io/utils/trace -# sigs.k8s.io/controller-runtime v0.15.0 +# sigs.k8s.io/controller-runtime v0.15.1 ## explicit; go 1.20 sigs.k8s.io/controller-runtime/pkg/client/config sigs.k8s.io/controller-runtime/pkg/internal/log From 8c71b8a708360908c6ae9de122af3dfae51e951e Mon Sep 17 00:00:00 2001 From: Bryan Aguilar Date: Tue, 15 Aug 2023 09:00:21 -0700 Subject: [PATCH 4/8] Add var ifndef checks --- Makefile | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/Makefile b/Makefile index b427fab01..dd69dbc2c 100644 --- a/Makefile +++ b/Makefile @@ -201,6 +201,16 @@ gomod-tidy: .PHONY: gomod-update-collector gomod-update-collector: +ifndef CORE_VER + @echo "CORE_VER not defined" + @echo "usage: CORE_VER=v0.2.0 CONTRIB_VER=v0.2.0 make gomod-update-collector" + exit 1 +endif +ifndef CONTRIB_VER + @echo "CONTRIB_VER not defined" + @echo "usage: CORE_VER=v0.2.0 CONTRIB_VER=v0.2.0 make gomod-update-collector" + exit 1 +endif @$(MAKE) for-all-target TARGET="update-collector-ver" From f8d7a38f3daa3d1134801313e8102c9e8c38b919 Mon Sep 17 00:00:00 2001 From: Bryan Aguilar Date: Tue, 15 Aug 2023 09:39:15 -0700 Subject: [PATCH 5/8] Fix patch files --- patches/kafkaSASLValidationPanicFix.patch | 15 --------------- patches/statsDfeatGateInstrumentationScope.patch | 6 +++--- 2 files changed, 3 insertions(+), 18 deletions(-) delete mode 100644 patches/kafkaSASLValidationPanicFix.patch diff --git a/patches/kafkaSASLValidationPanicFix.patch b/patches/kafkaSASLValidationPanicFix.patch deleted file mode 100644 index 2951f13ff..000000000 --- a/patches/kafkaSASLValidationPanicFix.patch +++ /dev/null @@ -1,15 +0,0 @@ -diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/config.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/config.go -index 67851a14..c430389f 100644 ---- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/config.go -+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/config.go -@@ -104,6 +104,10 @@ func (cfg *Config) Validate() error { - } - - func validateSASLConfig(c *SASLConfig) error { -+ if c == nil { -+ return nil -+ } -+ - if c.Username == "" { - return fmt.Errorf("auth.sasl.username is required") - } diff --git a/patches/statsDfeatGateInstrumentationScope.patch b/patches/statsDfeatGateInstrumentationScope.patch index d68d1d2fc..6b056c0e8 100644 --- a/patches/statsDfeatGateInstrumentationScope.patch +++ b/patches/statsDfeatGateInstrumentationScope.patch @@ -4,10 +4,10 @@ Date: Tue Jul 25 17:33:22 2023 -0700 Add feature gate for instrumentation scope in statsd receiver -diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/protocol/statsd_parser.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/protocol/statsd_parser.go +diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/internal/protocol/statsd_parser.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/internal/protocol/statsd_parser.go index 1cfec5cf..699d6ee7 100644 ---- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/protocol/statsd_parser.go -+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/protocol/statsd_parser.go +--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/internal/protocol/statsd_parser.go ++++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/internal/protocol/statsd_parser.go @@ -14,6 +14,7 @@ import ( "github.com/lightstep/go-expohisto/structure" "go.opentelemetry.io/collector/client" From 991786c8ab0c8285a4159a68c29303d2ecd50b7b Mon Sep 17 00:00:00 2001 From: Bryan Aguilar Date: Tue, 15 Aug 2023 13:03:07 -0700 Subject: [PATCH 6/8] Add agent replace statements --- go.mod | 7 +++++++ go.sum | 4 ++-- testbed/go.mod | 5 +++++ testbed/go.sum | 4 ++-- 4 files changed, 16 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 168c7ee19..0b438a927 100644 --- a/go.mod +++ b/go.mod @@ -432,3 +432,10 @@ replace github.com/outcaste-io/ristretto v0.2.0 => github.com/outcaste-io/ristre // openshift removed all tags from their repo, use the pseudoversion from the release-3.9 branch HEAD replace github.com/openshift/api v3.9.0+incompatible => github.com/openshift/api v0.0.0-20180801171038-322a19404e37 + +// taken from https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/datadogexporter/go.mod +// not having versions on left hand side of replace was not our choice. Copying from how upstream does their replace. +// v0.47.x and v0.48.x are incompatible, prefer to use v0.48.x +replace github.com/DataDog/datadog-agent/pkg/proto => github.com/DataDog/datadog-agent/pkg/proto v0.48.0-beta.1 + +replace github.com/DataDog/datadog-agent/pkg/trace => github.com/DataDog/datadog-agent/pkg/trace v0.48.0-beta.1 diff --git a/go.sum b/go.sum index dbd548160..a3da9a8d2 100644 --- a/go.sum +++ b/go.sum @@ -103,8 +103,8 @@ github.com/DataDog/datadog-agent/pkg/proto v0.48.0-beta.1 h1:Htxj/RE55AeDZ+OE6+x github.com/DataDog/datadog-agent/pkg/proto v0.48.0-beta.1/go.mod h1:O3WwGRPZxs4BpB2ccUvIIPprhscWBRpudJT6mC+7sr8= github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.48.0-beta.1 h1:We9Y6+kwCnSOQilk2koeADjbZgMHFDl6iHBaobU5nAw= github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.48.0-beta.1/go.mod h1:5Q39ZOIOwZMnFyRadp+5gH1bFdjmb+Pgxe+j5XOwaTg= -github.com/DataDog/datadog-agent/pkg/trace v0.48.0-devel h1:sC2wq2fuI1r3U6FmUsn4clsrFOql5XBfs1EG15LPDEc= -github.com/DataDog/datadog-agent/pkg/trace v0.48.0-devel/go.mod h1:CmdN7Zrj+S+2hOSGW5hFT2LC2FVIF/avJTyvhUjaueI= +github.com/DataDog/datadog-agent/pkg/trace v0.48.0-beta.1 h1:usLCrmPm2wuNedbcuArxN37E/e7UaCJ66i1tuEq7E/M= +github.com/DataDog/datadog-agent/pkg/trace v0.48.0-beta.1/go.mod h1:kxBOu4ZSem1E0JdhxjeI2jAQ7nxeRxuhjU4r9LKnRkU= github.com/DataDog/datadog-agent/pkg/util/cgroups v0.48.0-beta.1 h1:9iyw6jSwJwsFe8TooU8mqMhMfFiW6N/05OnNMg91kBY= github.com/DataDog/datadog-agent/pkg/util/cgroups v0.48.0-beta.1/go.mod h1:TmxM8Pe+1QBWfM1JisS3xjvX1/kk655XY/IjqA36g6s= github.com/DataDog/datadog-agent/pkg/util/log v0.48.0-beta.1 h1:k4tcg077NsPJRxtuGdYEm9kge+zq5QO5x6Yv3R5BwpE= diff --git a/testbed/go.mod b/testbed/go.mod index fe318bf04..d99fddd51 100644 --- a/testbed/go.mod +++ b/testbed/go.mod @@ -420,3 +420,8 @@ replace github.com/go-openapi/spec v0.20.5 => github.com/go-openapi/spec v0.20.9 replace github.com/openshift/api v3.9.0+incompatible => github.com/openshift/api v0.0.0-20180801171038-322a19404e37 replace github.com/aws-observability/aws-otel-collector => ../ + +// v0.47.x and v0.48.x are incompatible, prefer to use v0.48.x +replace github.com/DataDog/datadog-agent/pkg/proto => github.com/DataDog/datadog-agent/pkg/proto v0.48.0-beta.1 + +replace github.com/DataDog/datadog-agent/pkg/trace => github.com/DataDog/datadog-agent/pkg/trace v0.48.0-beta.1 diff --git a/testbed/go.sum b/testbed/go.sum index 14d0e9f82..ce16713a5 100644 --- a/testbed/go.sum +++ b/testbed/go.sum @@ -103,8 +103,8 @@ github.com/DataDog/datadog-agent/pkg/proto v0.48.0-beta.1 h1:Htxj/RE55AeDZ+OE6+x github.com/DataDog/datadog-agent/pkg/proto v0.48.0-beta.1/go.mod h1:O3WwGRPZxs4BpB2ccUvIIPprhscWBRpudJT6mC+7sr8= github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.48.0-beta.1 h1:We9Y6+kwCnSOQilk2koeADjbZgMHFDl6iHBaobU5nAw= github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.48.0-beta.1/go.mod h1:5Q39ZOIOwZMnFyRadp+5gH1bFdjmb+Pgxe+j5XOwaTg= -github.com/DataDog/datadog-agent/pkg/trace v0.48.0-devel h1:sC2wq2fuI1r3U6FmUsn4clsrFOql5XBfs1EG15LPDEc= -github.com/DataDog/datadog-agent/pkg/trace v0.48.0-devel/go.mod h1:CmdN7Zrj+S+2hOSGW5hFT2LC2FVIF/avJTyvhUjaueI= +github.com/DataDog/datadog-agent/pkg/trace v0.48.0-beta.1 h1:usLCrmPm2wuNedbcuArxN37E/e7UaCJ66i1tuEq7E/M= +github.com/DataDog/datadog-agent/pkg/trace v0.48.0-beta.1/go.mod h1:kxBOu4ZSem1E0JdhxjeI2jAQ7nxeRxuhjU4r9LKnRkU= github.com/DataDog/datadog-agent/pkg/trace/exportable v0.0.0-20201016145401-4646cf596b02 h1:N2BRKjJ/c+ipDwt5b+ijqEc2EsmK3zXq2lNeIPnSwMI= github.com/DataDog/datadog-agent/pkg/trace/exportable v0.0.0-20201016145401-4646cf596b02/go.mod h1:EalMiS87Guu6PkLdxz7gmWqi+dRs9sjYLTOyTrM/aVU= github.com/DataDog/datadog-agent/pkg/util/cgroups v0.48.0-beta.1 h1:9iyw6jSwJwsFe8TooU8mqMhMfFiW6N/05OnNMg91kBY= From 013d1c1f94f8e2b9b69841bd02a41c4bd7024a1c Mon Sep 17 00:00:00 2001 From: Bryan Aguilar Date: Tue, 15 Aug 2023 13:03:14 -0700 Subject: [PATCH 7/8] go mod vendor --- .../datadog-agent/pkg/trace/agent/agent.go | 29 +- .../pkg/trace/agent/normalizer.go | 2 +- .../pkg/trace/agent/obfuscate.go | 2 +- .../pkg/trace/agent/truncator.go | 2 +- .../datadog-agent/pkg/trace/api/api.go | 8 +- .../datadog-agent/pkg/trace/api/otlp.go | 2 +- .../datadog-agent/pkg/trace/api/payload.go | 2 +- .../datadog-agent/pkg/trace/config/config.go | 16 + .../pkg/trace/event/extractor.go | 2 +- .../pkg/trace/event/extractor_fixed_rate.go | 2 +- .../pkg/trace/event/extractor_legacy.go | 2 +- .../pkg/trace/event/extractor_metric.go | 2 +- .../pkg/trace/event/extractor_noop.go | 2 +- .../pkg/trace/event/processor.go | 2 +- .../pkg/trace/event/sampler_max_eps.go | 2 +- .../pkg/trace/filters/blacklister.go | 2 +- .../pkg/trace/filters/replacer.go | 2 +- .../datadog-agent/pkg/trace/info/info.go | 2 +- .../pkg/trace/pb/agent_payload.pb.go | 238 -- .../pkg/trace/pb/agent_payload.proto | 28 - .../pkg/trace/pb/agent_payload_vtproto.pb.go | 523 ---- .../pkg/trace/pb/decoder_bytes.go | 272 -- .../datadog-agent/pkg/trace/pb/decoder_v05.go | 220 -- .../DataDog/datadog-agent/pkg/trace/pb/doc.go | 12 - .../datadog-agent/pkg/trace/pb/generate.sh | 8 - .../datadog-agent/pkg/trace/pb/hook.go | 33 - .../datadog-agent/pkg/trace/pb/span.pb.go | 305 --- .../datadog-agent/pkg/trace/pb/span.proto | 46 - .../datadog-agent/pkg/trace/pb/span_gen.go | 299 --- .../datadog-agent/pkg/trace/pb/span_utils.go | 51 - .../pkg/trace/pb/span_vtproto.pb.go | 994 ------- .../datadog-agent/pkg/trace/pb/stats.pb.go | 2384 ----------------- .../datadog-agent/pkg/trace/pb/stats.proto | 72 - .../datadog-agent/pkg/trace/pb/stats_gen.go | 1393 ---------- .../datadog-agent/pkg/trace/pb/trace.go | 52 - .../datadog-agent/pkg/trace/pb/trace_gen.go | 163 -- .../pkg/trace/pb/tracer_payload.pb.go | 388 --- .../pkg/trace/pb/tracer_payload.proto | 58 - .../pkg/trace/pb/tracer_payload_gen.go | 390 --- .../pkg/trace/pb/tracer_payload_utils.go | 35 - .../pkg/trace/pb/tracer_payload_vtproto.pb.go | 1066 -------- .../pkg/trace/sampler/prioritysampler.go | 2 +- .../pkg/trace/sampler/rare_sampler.go | 2 +- .../pkg/trace/sampler/sampler.go | 2 +- .../pkg/trace/sampler/scoresampler.go | 2 +- .../pkg/trace/sampler/signature.go | 2 +- .../pkg/trace/stats/aggregation.go | 4 +- .../trace/stats/client_stats_aggregator.go | 72 +- .../pkg/trace/stats/concentrator.go | 18 +- .../datadog-agent/pkg/trace/stats/statsraw.go | 16 +- .../datadog-agent/pkg/trace/stats/weight.go | 4 +- .../pkg/trace/traceutil/azure.go | 113 +- .../pkg/trace/traceutil/processed_trace.go | 4 +- .../datadog-agent/pkg/trace/traceutil/span.go | 2 +- .../pkg/trace/traceutil/trace.go | 2 +- .../datadog-agent/pkg/trace/writer/stats.go | 45 +- .../datadog-agent/pkg/trace/writer/trace.go | 2 +- vendor/modules.txt | 7 +- 58 files changed, 243 insertions(+), 9169 deletions(-) delete mode 100644 vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/agent_payload.pb.go delete mode 100644 vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/agent_payload.proto delete mode 100644 vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/agent_payload_vtproto.pb.go delete mode 100644 vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/decoder_bytes.go delete mode 100644 vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/decoder_v05.go delete mode 100644 vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/doc.go delete mode 100644 vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/generate.sh delete mode 100644 vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/hook.go delete mode 100644 vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/span.pb.go delete mode 100644 vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/span.proto delete mode 100644 vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/span_gen.go delete mode 100644 vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/span_utils.go delete mode 100644 vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/span_vtproto.pb.go delete mode 100644 vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/stats.pb.go delete mode 100644 vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/stats.proto delete mode 100644 vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/stats_gen.go delete mode 100644 vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/trace.go delete mode 100644 vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/trace_gen.go delete mode 100644 vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/tracer_payload.pb.go delete mode 100644 vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/tracer_payload.proto delete mode 100644 vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/tracer_payload_gen.go delete mode 100644 vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/tracer_payload_utils.go delete mode 100644 vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/tracer_payload_vtproto.pb.go diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/agent/agent.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/agent/agent.go index 054d38e05..e94c91014 100644 --- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/agent/agent.go +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/agent/agent.go @@ -10,6 +10,7 @@ import ( "runtime" "time" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/remoteconfighandler" "github.com/DataDog/datadog-agent/pkg/trace/telemetry" @@ -22,7 +23,6 @@ import ( "github.com/DataDog/datadog-agent/pkg/trace/log" "github.com/DataDog/datadog-agent/pkg/trace/metrics" "github.com/DataDog/datadog-agent/pkg/trace/metrics/timing" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/datadog-agent/pkg/trace/sampler" "github.com/DataDog/datadog-agent/pkg/trace/stats" "github.com/DataDog/datadog-agent/pkg/trace/traceutil" @@ -87,7 +87,7 @@ type Agent struct { func NewAgent(ctx context.Context, conf *config.AgentConfig, telemetryCollector telemetry.TelemetryCollector) *Agent { dynConf := sampler.NewDynamicConfig() in := make(chan *api.Payload, 1000) - statsChan := make(chan pb.StatsPayload, 100) + statsChan := make(chan *pb.StatsPayload, 100) oconf := conf.Obfuscation.Export(conf) if oconf.Statsd == nil { oconf.Statsd = metrics.Client @@ -215,6 +215,12 @@ func (a *Agent) setRootSpanTags(root *pb.Span) { rate := ratelimiter.RealRate() sampler.SetPreSampleRate(root, rate) } + + if a.conf.InAzureAppServices { + for k, v := range traceutil.GetAppServicesTags() { + traceutil.SetMeta(root, k, v) + } + } } // Process is the default work unit that receives a trace, transforms it and @@ -272,6 +278,7 @@ func (a *Agent) Process(p *api.Payload) { } // Extra sanitization steps of the trace. + appServicesTags := traceutil.GetAppServicesTags() for _, span := range chunk.Spans { for k, v := range a.conf.GlobalTags { if k == tagOrigin { @@ -280,6 +287,10 @@ func (a *Agent) Process(p *api.Payload) { traceutil.SetMeta(span, k, v) } } + if a.conf.InAzureAppServices { + traceutil.SetMeta(span, "aas.site.name", appServicesTags["aas.site.name"]) + traceutil.SetMeta(span, "aas.site.type", appServicesTags["aas.site.type"]) + } if a.ModifySpan != nil { a.ModifySpan(chunk, span) } @@ -407,7 +418,7 @@ func (a *Agent) discardSpans(p *api.Payload) { } } -func (a *Agent) processStats(in pb.ClientStatsPayload, lang, tracerVersion string) pb.ClientStatsPayload { +func (a *Agent) processStats(in *pb.ClientStatsPayload, lang, tracerVersion string) *pb.ClientStatsPayload { enableContainers := a.conf.HasFeature("enable_cid_stats") || (a.conf.FargateOrchestrator != config.OrchestratorUnknown) if !enableContainers || a.conf.HasFeature("disable_cid_stats") { // only allow the ContainerID stats dimension if we're in a Fargate instance or it's @@ -428,12 +439,12 @@ func (a *Agent) processStats(in pb.ClientStatsPayload, lang, tracerVersion strin for i, group := range in.Stats { n := 0 for _, b := range group.Stats { - a.normalizeStatsGroup(&b, lang) - if !a.Blacklister.AllowsStat(&b) { + a.normalizeStatsGroup(b, lang) + if !a.Blacklister.AllowsStat(b) { continue } - a.obfuscateStatsGroup(&b) - a.Replacer.ReplaceStatsGroup(&b) + a.obfuscateStatsGroup(b) + a.Replacer.ReplaceStatsGroup(b) group.Stats[n] = b n++ } @@ -443,7 +454,7 @@ func (a *Agent) processStats(in pb.ClientStatsPayload, lang, tracerVersion strin return in } -func mergeDuplicates(s pb.ClientStatsBucket) { +func mergeDuplicates(s *pb.ClientStatsBucket) { indexes := make(map[stats.Aggregation]int, len(s.Stats)) for i, g := range s.Stats { a := stats.NewAggregationFromGroup(g) @@ -461,7 +472,7 @@ func mergeDuplicates(s pb.ClientStatsBucket) { } // ProcessStats processes incoming client stats in from the given tracer. -func (a *Agent) ProcessStats(in pb.ClientStatsPayload, lang, tracerVersion string) { +func (a *Agent) ProcessStats(in *pb.ClientStatsPayload, lang, tracerVersion string) { a.ClientStatsAggregator.In <- a.processStats(in, lang, tracerVersion) } diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/agent/normalizer.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/agent/normalizer.go index 23a068bba..88b5a77de 100644 --- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/agent/normalizer.go +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/agent/normalizer.go @@ -12,9 +12,9 @@ import ( "strconv" "time" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/info" "github.com/DataDog/datadog-agent/pkg/trace/log" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/datadog-agent/pkg/trace/sampler" "github.com/DataDog/datadog-agent/pkg/trace/traceutil" ) diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/agent/obfuscate.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/agent/obfuscate.go index da4ae3238..622f5f9fa 100644 --- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/agent/obfuscate.go +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/agent/obfuscate.go @@ -9,9 +9,9 @@ import ( "strings" "github.com/DataDog/datadog-agent/pkg/obfuscate" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/config" "github.com/DataDog/datadog-agent/pkg/trace/log" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/datadog-agent/pkg/trace/traceutil" ) diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/agent/truncator.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/agent/truncator.go index def142efc..eb12764ee 100644 --- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/agent/truncator.go +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/agent/truncator.go @@ -6,8 +6,8 @@ package agent import ( + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/log" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/datadog-agent/pkg/trace/traceutil" ) diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/api/api.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/api/api.go index 370f45db4..2589a8f0c 100644 --- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/api/api.go +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/api/api.go @@ -26,6 +26,7 @@ import ( "github.com/tinylib/msgp/msgp" "go.uber.org/atomic" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/api/apiutil" "github.com/DataDog/datadog-agent/pkg/trace/api/internal/header" "github.com/DataDog/datadog-agent/pkg/trace/config" @@ -33,7 +34,6 @@ import ( "github.com/DataDog/datadog-agent/pkg/trace/log" "github.com/DataDog/datadog-agent/pkg/trace/metrics" "github.com/DataDog/datadog-agent/pkg/trace/metrics/timing" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/datadog-agent/pkg/trace/sampler" "github.com/DataDog/datadog-agent/pkg/trace/telemetry" "github.com/DataDog/datadog-agent/pkg/trace/watchdog" @@ -423,7 +423,7 @@ func (r *HTTPReceiver) rateLimited(n int64) bool { type StatsProcessor interface { // ProcessStats takes a stats payload and consumes it. It is considered to be originating // from the given lang. - ProcessStats(p pb.ClientStatsPayload, lang, tracerVersion string) + ProcessStats(p *pb.ClientStatsPayload, lang, tracerVersion string) } // handleStats handles incoming stats payloads. @@ -433,8 +433,8 @@ func (r *HTTPReceiver) handleStats(w http.ResponseWriter, req *http.Request) { ts := r.tagStats(V07, req.Header) rd := apiutil.NewLimitedReader(req.Body, r.conf.MaxRequestBytes) req.Header.Set("Accept", "application/msgpack") - var in pb.ClientStatsPayload - if err := msgp.Decode(rd, &in); err != nil { + in := &pb.ClientStatsPayload{} + if err := msgp.Decode(rd, in); err != nil { log.Errorf("Error decoding pb.ClientStatsPayload: %v", err) httpDecodingError(err, []string{"handler:stats", "codec:msgpack", "v:v0.6"}, w) return diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/api/otlp.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/api/otlp.go index 7f5a96eab..e69f24d5c 100644 --- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/api/otlp.go +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/api/otlp.go @@ -18,13 +18,13 @@ import ( "sync" "time" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/api/internal/header" "github.com/DataDog/datadog-agent/pkg/trace/config" "github.com/DataDog/datadog-agent/pkg/trace/info" "github.com/DataDog/datadog-agent/pkg/trace/log" "github.com/DataDog/datadog-agent/pkg/trace/metrics" "github.com/DataDog/datadog-agent/pkg/trace/metrics/timing" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/datadog-agent/pkg/trace/sampler" "github.com/DataDog/datadog-agent/pkg/trace/traceutil" diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/api/payload.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/api/payload.go index 678f37e48..1efc41aad 100644 --- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/api/payload.go +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/api/payload.go @@ -6,8 +6,8 @@ package api import ( + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/info" - "github.com/DataDog/datadog-agent/pkg/trace/pb" ) // Payload specifies information about a set of traces received by the API. diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/config/config.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/config/config.go index 6e6c86467..95c220606 100644 --- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/config/config.go +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/config/config.go @@ -11,6 +11,7 @@ import ( "net" "net/http" "net/url" + "os" "regexp" "time" @@ -35,6 +36,10 @@ type Endpoint struct { // TelemetryEndpointPrefix specifies the prefix of the telemetry endpoint URL. const TelemetryEndpointPrefix = "https://instrumentation-telemetry-intake." +// App Services env vars +const RunZip = "APPSVC_RUN_ZIP" +const AppLogsTrace = "WEBSITE_APPSERVICEAPPLOGS_TRACE_ENABLED" + // OTLP holds the configuration for the OpenTelemetry receiver. type OTLP struct { // BindHost specifies the host to bind the receiver to. @@ -448,6 +453,9 @@ type AgentConfig struct { // ContainerProcRoot is the root dir for `proc` info ContainerProcRoot string + // Azure App Services + InAzureAppServices bool + // DebugServerPort defines the port used by the debug server DebugServerPort int } @@ -529,6 +537,8 @@ func New() *AgentConfig { MaxPayloadSize: 5 * 1024 * 1024, }, + InAzureAppServices: InAzureAppServices(), + Features: make(map[string]struct{}), } } @@ -588,3 +598,9 @@ func (c *AgentConfig) AllFeatures() []string { } return feats } + +func InAzureAppServices() bool { + _, existsLinux := os.LookupEnv(RunZip) + _, existsWin := os.LookupEnv(AppLogsTrace) + return existsLinux || existsWin +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/event/extractor.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/event/extractor.go index cbd9ff3f3..905472076 100644 --- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/event/extractor.go +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/event/extractor.go @@ -6,7 +6,7 @@ package event import ( - "github.com/DataDog/datadog-agent/pkg/trace/pb" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/sampler" ) diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/event/extractor_fixed_rate.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/event/extractor_fixed_rate.go index 60596eb26..3e1973d3a 100644 --- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/event/extractor_fixed_rate.go +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/event/extractor_fixed_rate.go @@ -8,7 +8,7 @@ package event import ( "strings" - "github.com/DataDog/datadog-agent/pkg/trace/pb" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/sampler" ) diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/event/extractor_legacy.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/event/extractor_legacy.go index d92c57257..465a299bf 100644 --- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/event/extractor_legacy.go +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/event/extractor_legacy.go @@ -8,7 +8,7 @@ package event import ( "strings" - "github.com/DataDog/datadog-agent/pkg/trace/pb" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/sampler" "github.com/DataDog/datadog-agent/pkg/trace/traceutil" ) diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/event/extractor_metric.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/event/extractor_metric.go index df2eae87f..50d877045 100644 --- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/event/extractor_metric.go +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/event/extractor_metric.go @@ -6,7 +6,7 @@ package event import ( - "github.com/DataDog/datadog-agent/pkg/trace/pb" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/sampler" ) diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/event/extractor_noop.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/event/extractor_noop.go index b1ce25687..408c05445 100644 --- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/event/extractor_noop.go +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/event/extractor_noop.go @@ -6,7 +6,7 @@ package event import ( - "github.com/DataDog/datadog-agent/pkg/trace/pb" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/sampler" ) diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/event/processor.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/event/processor.go index 4e7c33e5e..29f8ccdaf 100644 --- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/event/processor.go +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/event/processor.go @@ -6,7 +6,7 @@ package event import ( - "github.com/DataDog/datadog-agent/pkg/trace/pb" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/sampler" "github.com/DataDog/datadog-agent/pkg/trace/traceutil" ) diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/event/sampler_max_eps.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/event/sampler_max_eps.go index d742407ae..3ce892933 100644 --- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/event/sampler_max_eps.go +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/event/sampler_max_eps.go @@ -8,9 +8,9 @@ package event import ( "time" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/log" "github.com/DataDog/datadog-agent/pkg/trace/metrics" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/datadog-agent/pkg/trace/sampler" "github.com/DataDog/datadog-agent/pkg/trace/watchdog" ) diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/filters/blacklister.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/filters/blacklister.go index 28fd436c6..76e863da2 100644 --- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/filters/blacklister.go +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/filters/blacklister.go @@ -8,8 +8,8 @@ package filters import ( "regexp" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/log" - "github.com/DataDog/datadog-agent/pkg/trace/pb" ) // Blacklister holds a list of regular expressions which will match resources diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/filters/replacer.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/filters/replacer.go index 98b8c6d1f..482f87828 100644 --- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/filters/replacer.go +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/filters/replacer.go @@ -8,8 +8,8 @@ package filters import ( "strconv" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/config" - "github.com/DataDog/datadog-agent/pkg/trace/pb" ) // Replacer is a filter which replaces tag values based on its diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/info/info.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/info/info.go index 5cb131553..03fd9145e 100644 --- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/info/info.go +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/info/info.go @@ -26,7 +26,7 @@ import ( var ( infoMu sync.RWMutex - receiverStats []TagStats // only for the last minute + receiverStats = []TagStats{} // only for the last minute languages []string // TODO: move from package globals to a clean single struct diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/agent_payload.pb.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/agent_payload.pb.go deleted file mode 100644 index 9aa335dd0..000000000 --- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/agent_payload.pb.go +++ /dev/null @@ -1,238 +0,0 @@ -// protoc -I. -I$GOPATH/src --gogofaster_out=. span.proto tracer_payload.proto agent_payload.proto - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.5 -// source: agent_payload.proto - -package pb - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// AgentPayload represents payload the agent sends to the intake. -type AgentPayload struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // hostName specifies hostname of where the agent is running. - HostName string `protobuf:"bytes,1,opt,name=hostName,proto3" json:"hostName,omitempty"` - // env specifies `env` set in agent configuration. - Env string `protobuf:"bytes,2,opt,name=env,proto3" json:"env,omitempty"` - // tracerPayloads specifies list of the payloads received from tracers. - TracerPayloads []*TracerPayload `protobuf:"bytes,5,rep,name=tracerPayloads,proto3" json:"tracerPayloads,omitempty"` - // tags specifies tags common in all `tracerPayloads`. - Tags map[string]string `protobuf:"bytes,6,rep,name=tags,proto3" json:"tags,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // agentVersion specifies version of the agent. - AgentVersion string `protobuf:"bytes,7,opt,name=agentVersion,proto3" json:"agentVersion,omitempty"` - // targetTPS holds `TargetTPS` value in AgentConfig. - TargetTPS float64 `protobuf:"fixed64,8,opt,name=targetTPS,proto3" json:"targetTPS,omitempty"` - // errorTPS holds `ErrorTPS` value in AgentConfig. - ErrorTPS float64 `protobuf:"fixed64,9,opt,name=errorTPS,proto3" json:"errorTPS,omitempty"` - // rareSamplerEnabled holds `RareSamplerEnabled` value in AgentConfig - RareSamplerEnabled bool `protobuf:"varint,10,opt,name=rareSamplerEnabled,proto3" json:"rareSamplerEnabled,omitempty"` -} - -func (x *AgentPayload) Reset() { - *x = AgentPayload{} - if protoimpl.UnsafeEnabled { - mi := &file_agent_payload_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AgentPayload) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AgentPayload) ProtoMessage() {} - -func (x *AgentPayload) ProtoReflect() protoreflect.Message { - mi := &file_agent_payload_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AgentPayload.ProtoReflect.Descriptor instead. -func (*AgentPayload) Descriptor() ([]byte, []int) { - return file_agent_payload_proto_rawDescGZIP(), []int{0} -} - -func (x *AgentPayload) GetHostName() string { - if x != nil { - return x.HostName - } - return "" -} - -func (x *AgentPayload) GetEnv() string { - if x != nil { - return x.Env - } - return "" -} - -func (x *AgentPayload) GetTracerPayloads() []*TracerPayload { - if x != nil { - return x.TracerPayloads - } - return nil -} - -func (x *AgentPayload) GetTags() map[string]string { - if x != nil { - return x.Tags - } - return nil -} - -func (x *AgentPayload) GetAgentVersion() string { - if x != nil { - return x.AgentVersion - } - return "" -} - -func (x *AgentPayload) GetTargetTPS() float64 { - if x != nil { - return x.TargetTPS - } - return 0 -} - -func (x *AgentPayload) GetErrorTPS() float64 { - if x != nil { - return x.ErrorTPS - } - return 0 -} - -func (x *AgentPayload) GetRareSamplerEnabled() bool { - if x != nil { - return x.RareSamplerEnabled - } - return false -} - -var File_agent_payload_proto protoreflect.FileDescriptor - -var file_agent_payload_proto_rawDesc = []byte{ - 0x0a, 0x13, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x1a, 0x14, 0x74, 0x72, 0x61, 0x63, 0x65, - 0x72, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, - 0xee, 0x02, 0x0a, 0x0c, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, - 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, - 0x65, 0x6e, 0x76, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x65, 0x6e, 0x76, 0x12, 0x39, - 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x73, - 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x72, 0x61, 0x63, - 0x65, 0x72, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x0e, 0x74, 0x72, 0x61, 0x63, 0x65, - 0x72, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x12, 0x2e, 0x0a, 0x04, 0x74, 0x61, 0x67, - 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x70, 0x62, 0x2e, 0x41, 0x67, 0x65, - 0x6e, 0x74, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x22, 0x0a, 0x0c, 0x61, 0x67, 0x65, - 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0c, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, - 0x09, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x50, 0x53, 0x18, 0x08, 0x20, 0x01, 0x28, 0x01, - 0x52, 0x09, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x50, 0x53, 0x12, 0x1a, 0x0a, 0x08, 0x65, - 0x72, 0x72, 0x6f, 0x72, 0x54, 0x50, 0x53, 0x18, 0x09, 0x20, 0x01, 0x28, 0x01, 0x52, 0x08, 0x65, - 0x72, 0x72, 0x6f, 0x72, 0x54, 0x50, 0x53, 0x12, 0x2e, 0x0a, 0x12, 0x72, 0x61, 0x72, 0x65, 0x53, - 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x0a, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x12, 0x72, 0x61, 0x72, 0x65, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, - 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x1a, 0x37, 0x0a, 0x09, 0x54, 0x61, 0x67, 0x73, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, - 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x44, - 0x61, 0x74, 0x61, 0x44, 0x6f, 0x67, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2d, 0x61, - 0x67, 0x65, 0x6e, 0x74, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x70, - 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_agent_payload_proto_rawDescOnce sync.Once - file_agent_payload_proto_rawDescData = file_agent_payload_proto_rawDesc -) - -func file_agent_payload_proto_rawDescGZIP() []byte { - file_agent_payload_proto_rawDescOnce.Do(func() { - file_agent_payload_proto_rawDescData = protoimpl.X.CompressGZIP(file_agent_payload_proto_rawDescData) - }) - return file_agent_payload_proto_rawDescData -} - -var file_agent_payload_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_agent_payload_proto_goTypes = []interface{}{ - (*AgentPayload)(nil), // 0: pb.AgentPayload - nil, // 1: pb.AgentPayload.TagsEntry - (*TracerPayload)(nil), // 2: pb.TracerPayload -} -var file_agent_payload_proto_depIdxs = []int32{ - 2, // 0: pb.AgentPayload.tracerPayloads:type_name -> pb.TracerPayload - 1, // 1: pb.AgentPayload.tags:type_name -> pb.AgentPayload.TagsEntry - 2, // [2:2] is the sub-list for method output_type - 2, // [2:2] is the sub-list for method input_type - 2, // [2:2] is the sub-list for extension type_name - 2, // [2:2] is the sub-list for extension extendee - 0, // [0:2] is the sub-list for field type_name -} - -func init() { file_agent_payload_proto_init() } -func file_agent_payload_proto_init() { - if File_agent_payload_proto != nil { - return - } - file_tracer_payload_proto_init() - if !protoimpl.UnsafeEnabled { - file_agent_payload_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AgentPayload); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_agent_payload_proto_rawDesc, - NumEnums: 0, - NumMessages: 2, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_agent_payload_proto_goTypes, - DependencyIndexes: file_agent_payload_proto_depIdxs, - MessageInfos: file_agent_payload_proto_msgTypes, - }.Build() - File_agent_payload_proto = out.File - file_agent_payload_proto_rawDesc = nil - file_agent_payload_proto_goTypes = nil - file_agent_payload_proto_depIdxs = nil -} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/agent_payload.proto b/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/agent_payload.proto deleted file mode 100644 index 3d0f4f247..000000000 --- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/agent_payload.proto +++ /dev/null @@ -1,28 +0,0 @@ -// protoc -I. -I$GOPATH/src --gogofaster_out=. span.proto tracer_payload.proto agent_payload.proto - -syntax = "proto3"; - -package pb; -option go_package = "github.com/DataDog/datadog-agent/pkg/trace/pb"; - -import "tracer_payload.proto"; - -// AgentPayload represents payload the agent sends to the intake. -message AgentPayload { - // hostName specifies hostname of where the agent is running. - string hostName = 1; - // env specifies `env` set in agent configuration. - string env = 2; - // tracerPayloads specifies list of the payloads received from tracers. - repeated TracerPayload tracerPayloads = 5; - // tags specifies tags common in all `tracerPayloads`. - map tags = 6; - // agentVersion specifies version of the agent. - string agentVersion = 7; - // targetTPS holds `TargetTPS` value in AgentConfig. - double targetTPS = 8; - // errorTPS holds `ErrorTPS` value in AgentConfig. - double errorTPS = 9; - // rareSamplerEnabled holds `RareSamplerEnabled` value in AgentConfig - bool rareSamplerEnabled = 10; -} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/agent_payload_vtproto.pb.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/agent_payload_vtproto.pb.go deleted file mode 100644 index 36ca03fe8..000000000 --- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/agent_payload_vtproto.pb.go +++ /dev/null @@ -1,523 +0,0 @@ -// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. -// protoc-gen-go-vtproto version: v0.4.0 -// source: agent_payload.proto - -package pb - -import ( - binary "encoding/binary" - fmt "fmt" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - io "io" - math "math" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -func (m *AgentPayload) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AgentPayload) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *AgentPayload) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if m.RareSamplerEnabled { - i-- - if m.RareSamplerEnabled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x50 - } - if m.ErrorTPS != 0 { - i -= 8 - binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ErrorTPS)))) - i-- - dAtA[i] = 0x49 - } - if m.TargetTPS != 0 { - i -= 8 - binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.TargetTPS)))) - i-- - dAtA[i] = 0x41 - } - if len(m.AgentVersion) > 0 { - i -= len(m.AgentVersion) - copy(dAtA[i:], m.AgentVersion) - i = encodeVarint(dAtA, i, uint64(len(m.AgentVersion))) - i-- - dAtA[i] = 0x3a - } - if len(m.Tags) > 0 { - for k := range m.Tags { - v := m.Tags[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarint(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarint(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarint(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x32 - } - } - if len(m.TracerPayloads) > 0 { - for iNdEx := len(m.TracerPayloads) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.TracerPayloads[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x2a - } - } - if len(m.Env) > 0 { - i -= len(m.Env) - copy(dAtA[i:], m.Env) - i = encodeVarint(dAtA, i, uint64(len(m.Env))) - i-- - dAtA[i] = 0x12 - } - if len(m.HostName) > 0 { - i -= len(m.HostName) - copy(dAtA[i:], m.HostName) - i = encodeVarint(dAtA, i, uint64(len(m.HostName))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *AgentPayload) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.HostName) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.Env) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if len(m.TracerPayloads) > 0 { - for _, e := range m.TracerPayloads { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) - } - } - if len(m.Tags) > 0 { - for k, v := range m.Tags { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + 1 + len(v) + sov(uint64(len(v))) - n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) - } - } - l = len(m.AgentVersion) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.TargetTPS != 0 { - n += 9 - } - if m.ErrorTPS != 0 { - n += 9 - } - if m.RareSamplerEnabled { - n += 2 - } - n += len(m.unknownFields) - return n -} - -func (m *AgentPayload) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AgentPayload: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AgentPayload: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HostName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.HostName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Env = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TracerPayloads", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TracerPayloads = append(m.TracerPayloads, &TracerPayload{}) - if err := m.TracerPayloads[len(m.TracerPayloads)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Tags == nil { - m.Tags = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLength - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLength - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLength - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLength - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Tags[mapkey] = mapvalue - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AgentVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AgentVersion = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 8: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetTPS", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.TargetTPS = float64(math.Float64frombits(v)) - case 9: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field ErrorTPS", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.ErrorTPS = float64(math.Float64frombits(v)) - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RareSamplerEnabled", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.RareSamplerEnabled = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/decoder_bytes.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/decoder_bytes.go deleted file mode 100644 index 0e1feacde..000000000 --- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/decoder_bytes.go +++ /dev/null @@ -1,272 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -package pb - -import ( - "bytes" - "errors" - "math" - "strings" - "unicode/utf8" - - "github.com/tinylib/msgp/msgp" -) - -// repairUTF8 ensures all characters in s are UTF-8 by replacing non-UTF-8 characters -// with the replacement char � -func repairUTF8(s string) string { - in := strings.NewReader(s) - var out bytes.Buffer - out.Grow(len(s)) - - for { - r, _, err := in.ReadRune() - if err != nil { - // note: by contract, if `in` contains non-valid utf-8, no error is returned. Rather the utf-8 replacement - // character is returned. Therefore, the only error should usually be io.EOF indicating end of string. - // If any other error is returned by chance, we quit as well, outputting whatever part of the string we - // had already constructed. - return out.String() - } - out.WriteRune(r) - } -} - -// parseStringBytes reads the next type in the msgpack payload and -// converts the BinType or the StrType in a valid string. -func parseStringBytes(bts []byte) (string, []byte, error) { - if msgp.IsNil(bts) { - bts, err := msgp.ReadNilBytes(bts) - return "", bts, err - } - // read the generic representation type without decoding - t := msgp.NextType(bts) - - var ( - err error - i []byte - ) - switch t { - case msgp.BinType: - i, bts, err = msgp.ReadBytesZC(bts) - case msgp.StrType: - i, bts, err = msgp.ReadStringZC(bts) - default: - return "", bts, msgp.TypeError{Encoded: t, Method: msgp.StrType} - } - if err != nil { - return "", bts, err - } - if utf8.Valid(i) { - return string(i), bts, nil - } - return repairUTF8(msgp.UnsafeString(i)), bts, nil -} - -// parseFloat64Bytes parses a float64 even if the sent value is an int64 or an uint64; -// this is required because the encoding library could remove bytes from the encoded -// payload to reduce the size, if they're not needed. -func parseFloat64Bytes(bts []byte) (float64, []byte, error) { - if msgp.IsNil(bts) { - bts, err := msgp.ReadNilBytes(bts) - return 0, bts, err - } - // read the generic representation type without decoding - t := msgp.NextType(bts) - - var err error - switch t { - case msgp.IntType: - var i int64 - i, bts, err = msgp.ReadInt64Bytes(bts) - if err != nil { - return 0, bts, err - } - - return float64(i), bts, nil - case msgp.UintType: - var i uint64 - i, bts, err = msgp.ReadUint64Bytes(bts) - if err != nil { - return 0, bts, err - } - - return float64(i), bts, nil - case msgp.Float64Type: - var f float64 - f, bts, err = msgp.ReadFloat64Bytes(bts) - if err != nil { - return 0, bts, err - } - - return f, bts, nil - default: - return 0, bts, msgp.TypeError{Encoded: t, Method: msgp.Float64Type} - } -} - -// cast to int64 values that are int64 but that are sent in uint64 -// over the wire. Set to 0 if they overflow the MaxInt64 size. This -// cast should be used ONLY while decoding int64 values that are -// sent as uint64 to reduce the payload size, otherwise the approach -// is not correct in the general sense. -func castInt64(v uint64) (int64, bool) { - if v > math.MaxInt64 { - return 0, false - } - return int64(v), true -} - -// parseInt64Bytes parses an int64 even if the sent value is an uint64; -// this is required because the encoding library could remove bytes from the encoded -// payload to reduce the size, if they're not needed. -func parseInt64Bytes(bts []byte) (int64, []byte, error) { - if msgp.IsNil(bts) { - bts, err := msgp.ReadNilBytes(bts) - return 0, bts, err - } - // read the generic representation type without decoding - t := msgp.NextType(bts) - - var ( - i int64 - u uint64 - err error - ) - switch t { - case msgp.IntType: - i, bts, err = msgp.ReadInt64Bytes(bts) - if err != nil { - return 0, bts, err - } - return i, bts, nil - case msgp.UintType: - u, bts, err = msgp.ReadUint64Bytes(bts) - if err != nil { - return 0, bts, err - } - - // force-cast - i, ok := castInt64(u) - if !ok { - return 0, bts, errors.New("found uint64, overflows int64") - } - return i, bts, nil - default: - return 0, bts, msgp.TypeError{Encoded: t, Method: msgp.IntType} - } -} - -// parseUint64Bytes parses an uint64 even if the sent value is an int64; -// this is required because the language used for the encoding library -// may not have unsigned types. An example is early version of Java -// (and so JRuby interpreter) that encodes uint64 as int64: -// http://docs.oracle.com/javase/tutorial/java/nutsandbolts/datatypes.html -func parseUint64Bytes(bts []byte) (uint64, []byte, error) { - if msgp.IsNil(bts) { - bts, err := msgp.ReadNilBytes(bts) - return 0, bts, err - } - // read the generic representation type without decoding - t := msgp.NextType(bts) - - var ( - i int64 - u uint64 - err error - ) - switch t { - case msgp.UintType: - u, bts, err = msgp.ReadUint64Bytes(bts) - if err != nil { - return 0, bts, err - } - return u, bts, err - case msgp.IntType: - i, bts, err = msgp.ReadInt64Bytes(bts) - if err != nil { - return 0, bts, err - } - return uint64(i), bts, nil - default: - return 0, bts, msgp.TypeError{Encoded: t, Method: msgp.IntType} - } -} - -// cast to int32 values that are int32 but that are sent in uint32 -// over the wire. Set to 0 if they overflow the MaxInt32 size. This -// cast should be used ONLY while decoding int32 values that are -// sent as uint32 to reduce the payload size, otherwise the approach -// is not correct in the general sense. -func castInt32(v uint32) (int32, bool) { - if v > math.MaxInt32 { - return 0, false - } - return int32(v), true -} - -// parseInt32Bytes parses an int32 even if the sent value is an uint32; -// this is required because the encoding library could remove bytes from the encoded -// payload to reduce the size, if they're not needed. -func parseInt32Bytes(bts []byte) (int32, []byte, error) { - if msgp.IsNil(bts) { - bts, err := msgp.ReadNilBytes(bts) - return 0, bts, err - } - // read the generic representation type without decoding - t := msgp.NextType(bts) - - var ( - i int32 - u uint32 - err error - ) - switch t { - case msgp.IntType: - i, bts, err = msgp.ReadInt32Bytes(bts) - if err != nil { - return 0, bts, err - } - return i, bts, nil - case msgp.UintType: - u, bts, err = msgp.ReadUint32Bytes(bts) - if err != nil { - return 0, bts, err - } - - // force-cast - i, ok := castInt32(u) - if !ok { - return 0, bts, errors.New("found uint32, overflows int32") - } - return i, bts, nil - default: - return 0, bts, msgp.TypeError{Encoded: t, Method: msgp.IntType} - } -} - -// parseBytes reads the next BinType in the msgpack payload. -func parseBytes(bts []byte) ([]byte, []byte, error) { - if msgp.IsNil(bts) { - bts, err := msgp.ReadNilBytes(bts) - return nil, bts, err - } - // read the generic representation type without decoding - t := msgp.NextType(bts) - - switch t { - case msgp.BinType: - unsafeBytes, bts, err := msgp.ReadBytesZC(bts) - if err != nil { - return nil, bts, err - } - safeBytes := make([]byte, len(unsafeBytes)) - copy(safeBytes, unsafeBytes) - return safeBytes, bts, nil - default: - return nil, bts, msgp.TypeError{Encoded: t, Method: msgp.BinType} - } -} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/decoder_v05.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/decoder_v05.go deleted file mode 100644 index 01ffae1bd..000000000 --- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/decoder_v05.go +++ /dev/null @@ -1,220 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -package pb - -import ( - "errors" - "fmt" - - "github.com/tinylib/msgp/msgp" -) - -// dictionaryString reads an int from decoder dc and returns the string -// at that index from dict. -func dictionaryString(bts []byte, dict []string) (string, []byte, error) { - var ( - ui uint32 - err error - ) - ui, bts, err = msgp.ReadUint32Bytes(bts) - if err != nil { - return "", bts, err - } - idx := int(ui) - if idx >= len(dict) { - return "", bts, fmt.Errorf("dictionary index %d out of range", idx) - } - return dict[idx], bts, nil -} - -// UnmarshalMsgDictionary decodes a trace using the specification from the v0.5 endpoint. -// For details, see the documentation for endpoint v0.5 in pkg/trace/api/version.go -func (t *Traces) UnmarshalMsgDictionary(bts []byte) error { - var err error - if _, bts, err = msgp.ReadArrayHeaderBytes(bts); err != nil { - return err - } - // read dictionary - var sz uint32 - if sz, bts, err = msgp.ReadArrayHeaderBytes(bts); err != nil { - return err - } - if sz > 25*1e6 { // Dictionary can't be larger than 25 MB - return errors.New("too long payload") - } - dict := make([]string, sz) - for i := range dict { - var str string - str, bts, err = parseStringBytes(bts) - if err != nil { - return err - } - dict[i] = str - } - // read traces - sz, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - return err - } - if cap(*t) >= int(sz) { - *t = (*t)[:sz] - } else { - *t = make(Traces, sz) - } - for i := range *t { - sz, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - return err - } - if cap((*t)[i]) >= int(sz) { - (*t)[i] = (*t)[i][:sz] - } else { - (*t)[i] = make(Trace, sz) - } - for j := range (*t)[i] { - if (*t)[i][j] == nil { - (*t)[i][j] = new(Span) - } - if bts, err = (*t)[i][j].UnmarshalMsgDictionary(bts, dict); err != nil { - return err - } - } - } - return nil -} - -// spanPropertyCount specifies the number of top-level properties that a span -// has. -const spanPropertyCount = 12 - -// UnmarshalMsgDictionary decodes a span from the given decoder dc, looking up strings -// in the given dictionary dict. For details, see the documentation for endpoint v0.5 -// in pkg/trace/api/version.go -func (z *Span) UnmarshalMsgDictionary(bts []byte, dict []string) ([]byte, error) { - var ( - sz uint32 - err error - ) - sz, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - return bts, err - } - if sz != spanPropertyCount { - return bts, errors.New("encoded span needs exactly 12 elements in array") - } - // Service (0) - z.Service, bts, err = dictionaryString(bts, dict) - if err != nil { - return bts, err - } - // Name (1) - z.Name, bts, err = dictionaryString(bts, dict) - if err != nil { - return bts, err - } - // Resource (2) - z.Resource, bts, err = dictionaryString(bts, dict) - if err != nil { - return bts, err - } - // TraceID (3) - z.TraceID, bts, err = parseUint64Bytes(bts) - if err != nil { - return bts, err - } - // SpanID (4) - z.SpanID, bts, err = parseUint64Bytes(bts) - if err != nil { - return bts, err - } - // ParentID (5) - z.ParentID, bts, err = parseUint64Bytes(bts) - if err != nil { - return bts, err - } - // Start (6) - z.Start, bts, err = parseInt64Bytes(bts) - if err != nil { - return bts, err - } - // Duration (7) - z.Duration, bts, err = parseInt64Bytes(bts) - if err != nil { - return bts, err - } - // Error (8) - z.Error, bts, err = parseInt32Bytes(bts) - if err != nil { - return bts, err - } - // Meta (9) - sz, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - return bts, err - } - if sz > 25*1e6 { // Dictionary can't be larger than 25 MB - return bts, errors.New("too long payload") - } - if z.Meta == nil && sz > 0 { - z.Meta = make(map[string]string, sz) - } else if len(z.Meta) > 0 { - for key := range z.Meta { - delete(z.Meta, key) - } - } - hook, hookok := MetaHook() - for sz > 0 { - sz-- - var key, val string - key, bts, err = dictionaryString(bts, dict) - if err != nil { - return bts, err - } - val, bts, err = dictionaryString(bts, dict) - if err != nil { - return bts, err - } - if hookok { - z.Meta[key] = hook(key, val) - } else { - z.Meta[key] = val - } - } - // Metrics (10) - sz, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - return bts, err - } - if z.Metrics == nil && sz > 0 { - z.Metrics = make(map[string]float64, sz) - } else if len(z.Metrics) > 0 { - for key := range z.Metrics { - delete(z.Metrics, key) - } - } - for sz > 0 { - sz-- - var ( - key string - val float64 - ) - key, bts, err = dictionaryString(bts, dict) - if err != nil { - return bts, err - } - val, bts, err = parseFloat64Bytes(bts) - if err != nil { - return bts, err - } - z.Metrics[key] = val - } - // Type (11) - z.Type, bts, err = dictionaryString(bts, dict) - if err != nil { - return bts, err - } - return bts, nil -} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/doc.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/doc.go deleted file mode 100644 index c999287de..000000000 --- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/doc.go +++ /dev/null @@ -1,12 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -// Package pb contains the data structures used by the trace agent to communicate -// with tracers and the Datadog API. Note that the "//go:generate" directives from this -// package were removed because the generated files were manually edited to create -// adaptions (see decoder.go). -// -// TODO: eventually move this to https://github.com/DataDog/agent-payload/v5 -package pb diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/generate.sh b/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/generate.sh deleted file mode 100644 index ed0d1388c..000000000 --- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/generate.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/usr/bin/env bash - - -protoc -I. --go_out=paths=source_relative:. --go-vtproto_out=paths=source_relative:. --go-vtproto_opt=features=marshal+unmarshal+size span.proto tracer_payload.proto agent_payload.proto stats.proto -protoc-go-inject-tag -input=span.pb.go -protoc-go-inject-tag -input=tracer_payload.pb.go -protoc-go-inject-tag -input=agent_payload.pb.go - diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/hook.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/hook.go deleted file mode 100644 index ef10f015a..000000000 --- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/hook.go +++ /dev/null @@ -1,33 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -package pb - -import ( - "sync" -) - -var ( - mu sync.RWMutex // guards metahook - metahook func(_, v string) string -) - -// SetMetaHook registers a callback which will run upon decoding each map -// entry in the span's Meta field. The hook has the opportunity to alter the -// value that is assigned to span.Meta[k] at decode time. By default, if no -// hook is defined, the behaviour is span.Meta[k] = v. -func SetMetaHook(hook func(k, v string) string) { - mu.Lock() - defer mu.Unlock() - metahook = hook -} - -// MetaHook returns the active meta hook. A MetaHook is a function which is ran -// for each span.Meta[k] = v value and has the opportunity to alter the final v. -func MetaHook() (hook func(k, v string) string, ok bool) { - mu.RLock() - defer mu.RUnlock() - return metahook, metahook != nil -} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/span.pb.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/span.pb.go deleted file mode 100644 index 0d9607e45..000000000 --- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/span.pb.go +++ /dev/null @@ -1,305 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.5 -// source: span.proto - -package pb - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type Span struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // service is the name of the service with which this span is associated. - // @gotags: json:"service" msg:"service" - Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service" msg:"service"` - // name is the operation name of this span. - // @gotags: json:"name" msg:"name" - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name" msg:"name"` - // resource is the resource name of this span, also sometimes called the endpoint (for web spans). - // @gotags: json:"resource" msg:"resource" - Resource string `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource" msg:"resource"` - // traceID is the ID of the trace to which this span belongs. - // @gotags: json:"trace_id" msg:"trace_id" - TraceID uint64 `protobuf:"varint,4,opt,name=traceID,proto3" json:"trace_id" msg:"trace_id"` - // spanID is the ID of this span. - // @gotags: json:"span_id" msg:"span_id" - SpanID uint64 `protobuf:"varint,5,opt,name=spanID,proto3" json:"span_id" msg:"span_id"` - // parentID is the ID of this span's parent, or zero if this span has no parent. - // @gotags: json:"parent_id" msg:"parent_id" - ParentID uint64 `protobuf:"varint,6,opt,name=parentID,proto3" json:"parent_id" msg:"parent_id"` - // start is the number of nanoseconds between the Unix epoch and the beginning of this span. - // @gotags: json:"start" msg:"start" - Start int64 `protobuf:"varint,7,opt,name=start,proto3" json:"start" msg:"start"` - // duration is the time length of this span in nanoseconds. - // @gotags: json:"duration" msg:"duration" - Duration int64 `protobuf:"varint,8,opt,name=duration,proto3" json:"duration" msg:"duration"` - // error is 1 if there is an error associated with this span, or 0 if there is not. - // @gotags: json:"error" msg:"error" - Error int32 `protobuf:"varint,9,opt,name=error,proto3" json:"error" msg:"error"` - // meta is a mapping from tag name to tag value for string-valued tags. - // @gotags: json:"meta" msg:"meta" - Meta map[string]string `protobuf:"bytes,10,rep,name=meta,proto3" json:"meta" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3" msg:"meta"` - // metrics is a mapping from tag name to tag value for numeric-valued tags. - // @gotags: json:"metrics" msg:"metrics" - Metrics map[string]float64 `protobuf:"bytes,11,rep,name=metrics,proto3" json:"metrics" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"fixed64,2,opt,name=value,proto3" msg:"metrics"` - // type is the type of the service with which this span is associated. Example values: web, db, lambda. - // @gotags: json:"type" msg:"type" - Type string `protobuf:"bytes,12,opt,name=type,proto3" json:"type" msg:"type"` - // meta_struct is a registry of structured "other" data used by, e.g., AppSec. - // @gotags: json:"meta_struct,omitempty" msg:"meta_struct" - MetaStruct map[string][]byte `protobuf:"bytes,13,rep,name=meta_struct,json=metaStruct,proto3" json:"meta_struct,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3" msg:"meta_struct"` -} - -func (x *Span) Reset() { - *x = Span{} - if protoimpl.UnsafeEnabled { - mi := &file_span_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Span) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Span) ProtoMessage() {} - -func (x *Span) ProtoReflect() protoreflect.Message { - mi := &file_span_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Span.ProtoReflect.Descriptor instead. -func (*Span) Descriptor() ([]byte, []int) { - return file_span_proto_rawDescGZIP(), []int{0} -} - -func (x *Span) GetService() string { - if x != nil { - return x.Service - } - return "" -} - -func (x *Span) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *Span) GetResource() string { - if x != nil { - return x.Resource - } - return "" -} - -func (x *Span) GetTraceID() uint64 { - if x != nil { - return x.TraceID - } - return 0 -} - -func (x *Span) GetSpanID() uint64 { - if x != nil { - return x.SpanID - } - return 0 -} - -func (x *Span) GetParentID() uint64 { - if x != nil { - return x.ParentID - } - return 0 -} - -func (x *Span) GetStart() int64 { - if x != nil { - return x.Start - } - return 0 -} - -func (x *Span) GetDuration() int64 { - if x != nil { - return x.Duration - } - return 0 -} - -func (x *Span) GetError() int32 { - if x != nil { - return x.Error - } - return 0 -} - -func (x *Span) GetMeta() map[string]string { - if x != nil { - return x.Meta - } - return nil -} - -func (x *Span) GetMetrics() map[string]float64 { - if x != nil { - return x.Metrics - } - return nil -} - -func (x *Span) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *Span) GetMetaStruct() map[string][]byte { - if x != nil { - return x.MetaStruct - } - return nil -} - -var File_span_proto protoreflect.FileDescriptor - -var file_span_proto_rawDesc = []byte{ - 0x0a, 0x0a, 0x73, 0x70, 0x61, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, - 0x22, 0xc2, 0x04, 0x0a, 0x04, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x44, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x44, 0x12, 0x16, 0x0a, - 0x06, 0x73, 0x70, 0x61, 0x6e, 0x49, 0x44, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x73, - 0x70, 0x61, 0x6e, 0x49, 0x44, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, - 0x44, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, - 0x44, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x26, 0x0a, 0x04, 0x6d, 0x65, 0x74, - 0x61, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x70, 0x61, - 0x6e, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x6d, 0x65, 0x74, - 0x61, 0x12, 0x2f, 0x0a, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x0b, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x4d, 0x65, 0x74, - 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, - 0x63, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x39, 0x0a, 0x0b, 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x73, - 0x74, 0x72, 0x75, 0x63, 0x74, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x70, 0x62, - 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x61, 0x53, 0x74, 0x72, 0x75, 0x63, - 0x74, 0x1a, 0x37, 0x0a, 0x09, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, - 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, - 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3a, 0x0a, 0x0c, 0x4d, 0x65, - 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3d, 0x0a, 0x0f, 0x4d, 0x65, 0x74, 0x61, 0x53, 0x74, - 0x72, 0x75, 0x63, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x44, 0x61, 0x74, 0x61, 0x44, 0x6f, 0x67, 0x2f, 0x64, 0x61, 0x74, 0x61, - 0x64, 0x6f, 0x67, 0x2d, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x74, 0x72, - 0x61, 0x63, 0x65, 0x2f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_span_proto_rawDescOnce sync.Once - file_span_proto_rawDescData = file_span_proto_rawDesc -) - -func file_span_proto_rawDescGZIP() []byte { - file_span_proto_rawDescOnce.Do(func() { - file_span_proto_rawDescData = protoimpl.X.CompressGZIP(file_span_proto_rawDescData) - }) - return file_span_proto_rawDescData -} - -var file_span_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -var file_span_proto_goTypes = []interface{}{ - (*Span)(nil), // 0: pb.Span - nil, // 1: pb.Span.MetaEntry - nil, // 2: pb.Span.MetricsEntry - nil, // 3: pb.Span.MetaStructEntry -} -var file_span_proto_depIdxs = []int32{ - 1, // 0: pb.Span.meta:type_name -> pb.Span.MetaEntry - 2, // 1: pb.Span.metrics:type_name -> pb.Span.MetricsEntry - 3, // 2: pb.Span.meta_struct:type_name -> pb.Span.MetaStructEntry - 3, // [3:3] is the sub-list for method output_type - 3, // [3:3] is the sub-list for method input_type - 3, // [3:3] is the sub-list for extension type_name - 3, // [3:3] is the sub-list for extension extendee - 0, // [0:3] is the sub-list for field type_name -} - -func init() { file_span_proto_init() } -func file_span_proto_init() { - if File_span_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_span_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Span); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_span_proto_rawDesc, - NumEnums: 0, - NumMessages: 4, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_span_proto_goTypes, - DependencyIndexes: file_span_proto_depIdxs, - MessageInfos: file_span_proto_msgTypes, - }.Build() - File_span_proto = out.File - file_span_proto_rawDesc = nil - file_span_proto_goTypes = nil - file_span_proto_depIdxs = nil -} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/span.proto b/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/span.proto deleted file mode 100644 index de10f5fa8..000000000 --- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/span.proto +++ /dev/null @@ -1,46 +0,0 @@ -syntax = "proto3"; - -package pb; -option go_package="github.com/DataDog/datadog-agent/pkg/trace/pb"; - -message Span { - // service is the name of the service with which this span is associated. - // @gotags: json:"service" msg:"service" - string service = 1; - // name is the operation name of this span. - // @gotags: json:"name" msg:"name" - string name = 2; - // resource is the resource name of this span, also sometimes called the endpoint (for web spans). - // @gotags: json:"resource" msg:"resource" - string resource = 3; - // traceID is the ID of the trace to which this span belongs. - // @gotags: json:"trace_id" msg:"trace_id" - uint64 traceID = 4; - // spanID is the ID of this span. - // @gotags: json:"span_id" msg:"span_id" - uint64 spanID = 5; - // parentID is the ID of this span's parent, or zero if this span has no parent. - // @gotags: json:"parent_id" msg:"parent_id" - uint64 parentID = 6; - // start is the number of nanoseconds between the Unix epoch and the beginning of this span. - // @gotags: json:"start" msg:"start" - int64 start = 7; - // duration is the time length of this span in nanoseconds. - // @gotags: json:"duration" msg:"duration" - int64 duration = 8; - // error is 1 if there is an error associated with this span, or 0 if there is not. - // @gotags: json:"error" msg:"error" - int32 error = 9; - // meta is a mapping from tag name to tag value for string-valued tags. - // @gotags: json:"meta" msg:"meta" - map meta = 10; - // metrics is a mapping from tag name to tag value for numeric-valued tags. - // @gotags: json:"metrics" msg:"metrics" - map metrics = 11; - // type is the type of the service with which this span is associated. Example values: web, db, lambda. - // @gotags: json:"type" msg:"type" - string type = 12; - // meta_struct is a registry of structured "other" data used by, e.g., AppSec. - // @gotags: json:"meta_struct,omitempty" msg:"meta_struct" - map meta_struct = 13; -} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/span_gen.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/span_gen.go deleted file mode 100644 index 950601153..000000000 --- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/span_gen.go +++ /dev/null @@ -1,299 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -package pb - -import ( - "github.com/tinylib/msgp/msgp" -) - -// This file is based on the code generated by tinylib/msgp but has been edited -// to add some features. If this needs to ge regenerated, be sure to port all -// the changes. - -// MarshalMsg implements msgp.Marshaler -func (z *Span) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 13 - // string "service" - o = append(o, 0x8d, 0xa7, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65) - o = msgp.AppendString(o, z.Service) - // string "name" - o = append(o, 0xa4, 0x6e, 0x61, 0x6d, 0x65) - o = msgp.AppendString(o, z.Name) - // string "resource" - o = append(o, 0xa8, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65) - o = msgp.AppendString(o, z.Resource) - // string "trace_id" - o = append(o, 0xa8, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64) - o = msgp.AppendUint64(o, z.TraceID) - // string "span_id" - o = append(o, 0xa7, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x69, 0x64) - o = msgp.AppendUint64(o, z.SpanID) - // string "parent_id" - o = append(o, 0xa9, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64) - o = msgp.AppendUint64(o, z.ParentID) - // string "start" - o = append(o, 0xa5, 0x73, 0x74, 0x61, 0x72, 0x74) - o = msgp.AppendInt64(o, z.Start) - // string "duration" - o = append(o, 0xa8, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e) - o = msgp.AppendInt64(o, z.Duration) - // string "error" - o = append(o, 0xa5, 0x65, 0x72, 0x72, 0x6f, 0x72) - o = msgp.AppendInt32(o, z.Error) - // string "meta" - o = append(o, 0xa4, 0x6d, 0x65, 0x74, 0x61) - o = msgp.AppendMapHeader(o, uint32(len(z.Meta))) - for za0001, za0002 := range z.Meta { - o = msgp.AppendString(o, za0001) - o = msgp.AppendString(o, za0002) - } - // string "metrics" - o = append(o, 0xa7, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73) - o = msgp.AppendMapHeader(o, uint32(len(z.Metrics))) - for za0003, za0004 := range z.Metrics { - o = msgp.AppendString(o, za0003) - o = msgp.AppendFloat64(o, za0004) - } - // string "type" - o = append(o, 0xa4, 0x74, 0x79, 0x70, 0x65) - o = msgp.AppendString(o, z.Type) - - // string "meta_struct" - o = append(o, 0xab, 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74) - o = msgp.AppendMapHeader(o, uint32(len(z.MetaStruct))) - for za0005, za0006 := range z.MetaStruct { - o = msgp.AppendString(o, za0005) - o = msgp.AppendBytes(o, za0006) - } - return -} - -// UnmarshalMsg implements msgp.Unmarshaler -func (z *Span) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - hook, hookok := MetaHook() - for zb0001 > 0 { - zb0001-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "service": - z.Service, bts, err = parseStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Service") - return - } - case "name": - z.Name, bts, err = parseStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Service") - return - } - case "resource": - z.Resource, bts, err = parseStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Service") - return - } - case "trace_id": - z.TraceID, bts, err = parseUint64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "TraceID") - return - } - case "span_id": - z.SpanID, bts, err = parseUint64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "SpanID") - return - } - case "parent_id": - z.ParentID, bts, err = parseUint64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "ParentID") - return - } - case "start": - z.Start, bts, err = parseInt64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "Start") - return - } - case "duration": - z.Duration, bts, err = parseInt64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "Duration") - return - } - case "error": - z.Error, bts, err = parseInt32Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "Error") - return - } - case "meta": - if msgp.IsNil(bts) { - bts, err = msgp.ReadNilBytes(bts) - z.Meta = nil - break - } - var zb0002 uint32 - zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Meta") - return - } - if z.Meta == nil && zb0002 > 0 { - z.Meta = make(map[string]string, zb0002) - } else if len(z.Meta) > 0 { - for key := range z.Meta { - delete(z.Meta, key) - } - } - for zb0002 > 0 { - var za0001 string - var za0002 string - zb0002-- - za0001, bts, err = parseStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Meta") - return - } - za0002, bts, err = parseStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Meta", za0001) - return - } - if hookok { - z.Meta[za0001] = hook(za0001, za0002) - } else { - z.Meta[za0001] = za0002 - } - } - case "metrics": - if msgp.IsNil(bts) { - bts, err = msgp.ReadNilBytes(bts) - z.Metrics = nil - break - } - var zb0003 uint32 - zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Metrics") - return - } - if z.Metrics == nil && zb0003 > 0 { - z.Metrics = make(map[string]float64, zb0003) - } else if len(z.Metrics) > 0 { - for key := range z.Metrics { - delete(z.Metrics, key) - } - } - for zb0003 > 0 { - var za0003 string - var za0004 float64 - zb0003-- - za0003, bts, err = parseStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Metrics") - return - } - za0004, bts, err = parseFloat64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "Metrics", za0003) - return - } - z.Metrics[za0003] = za0004 - } - case "type": - z.Type, bts, err = parseStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Type") - return - } - case "meta_struct": - if msgp.IsNil(bts) { - bts, err = msgp.ReadNilBytes(bts) - z.MetaStruct = nil - break - } - var zb0004 uint32 - zb0004, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "MetaStruct") - return - } - if z.MetaStruct == nil && zb0004 > 0 { - z.MetaStruct = make(map[string][]byte, zb0004) - } else if len(z.MetaStruct) > 0 { - for key := range z.MetaStruct { - delete(z.MetaStruct, key) - } - } - for zb0004 > 0 { - var za0005 string - var za0006 []byte - zb0004-- - za0005, bts, err = parseStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "MetaStruct") - return - } - za0006, bts, err = parseBytes(bts) - if za0006 != nil && err != nil { - err = msgp.WrapError(err, "MetaStruct", za0006) - return - } - z.MetaStruct[za0005] = za0006 - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - o = bts - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *Span) Msgsize() (s int) { - s = 1 + 8 + msgp.StringPrefixSize + len(z.Service) + 5 + msgp.StringPrefixSize + len(z.Name) + 9 + msgp.StringPrefixSize + len(z.Resource) + 9 + msgp.Uint64Size + 8 + msgp.Uint64Size + 10 + msgp.Uint64Size + 6 + msgp.Int64Size + 9 + msgp.Int64Size + 6 + msgp.Int32Size + 5 + msgp.MapHeaderSize - if z.Meta != nil { - for za0001, za0002 := range z.Meta { - _ = za0002 - s += msgp.StringPrefixSize + len(za0001) + msgp.StringPrefixSize + len(za0002) - } - } - s += 8 + msgp.MapHeaderSize - if z.Metrics != nil { - for za0003, za0004 := range z.Metrics { - _ = za0004 - s += msgp.StringPrefixSize + len(za0003) + msgp.Float64Size - } - } - s += 5 + msgp.StringPrefixSize + len(z.Type) + 12 + msgp.MapHeaderSize - if z.MetaStruct != nil { - for za0005, za0006 := range z.MetaStruct { - _ = za0006 - s += msgp.StringPrefixSize + len(za0005) + msgp.BytesPrefixSize + len(za0006) - } - } - return -} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/span_utils.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/span_utils.go deleted file mode 100644 index e8da5dea2..000000000 --- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/span_utils.go +++ /dev/null @@ -1,51 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -package pb - -// spanCopiedFields records the fields that are copied in ShallowCopy. -// This should match exactly the fields set in (*Span).ShallowCopy. -// This is used by tests to enforce the correctness of ShallowCopy. -var spanCopiedFields = map[string]struct{}{ - "Service": {}, - "Name": {}, - "Resource": {}, - "TraceID": {}, - "SpanID": {}, - "ParentID": {}, - "Start": {}, - "Duration": {}, - "Error": {}, - "Meta": {}, - "Metrics": {}, - "Type": {}, - "MetaStruct": {}, -} - -// ShallowCopy returns a shallow copy of the copy-able portion of a Span. These are the -// public fields which will have a Get* method for them. The completeness of this -// method is enforced by the init function above. Instead of using pkg/proto/utils.ProtoCopier, -// which incurs heavy reflection cost for every copy at runtime, we use reflection once at -// startup to ensure our method is complete. -func (s *Span) ShallowCopy() *Span { - if s == nil { - return &Span{} - } - return &Span{ - Service: s.Service, - Name: s.Name, - Resource: s.Resource, - TraceID: s.TraceID, - SpanID: s.SpanID, - ParentID: s.ParentID, - Start: s.Start, - Duration: s.Duration, - Error: s.Error, - Meta: s.Meta, - Metrics: s.Metrics, - Type: s.Type, - MetaStruct: s.MetaStruct, - } -} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/span_vtproto.pb.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/span_vtproto.pb.go deleted file mode 100644 index 9614470ba..000000000 --- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/span_vtproto.pb.go +++ /dev/null @@ -1,994 +0,0 @@ -// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. -// protoc-gen-go-vtproto version: v0.4.0 -// source: span.proto - -package pb - -import ( - binary "encoding/binary" - fmt "fmt" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - io "io" - math "math" - bits "math/bits" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -func (m *Span) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Span) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *Span) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.MetaStruct) > 0 { - for k := range m.MetaStruct { - v := m.MetaStruct[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarint(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarint(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarint(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x6a - } - } - if len(m.Type) > 0 { - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarint(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0x62 - } - if len(m.Metrics) > 0 { - for k := range m.Metrics { - v := m.Metrics[k] - baseI := i - i -= 8 - binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(v)))) - i-- - dAtA[i] = 0x11 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarint(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarint(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x5a - } - } - if len(m.Meta) > 0 { - for k := range m.Meta { - v := m.Meta[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarint(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarint(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarint(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x52 - } - } - if m.Error != 0 { - i = encodeVarint(dAtA, i, uint64(m.Error)) - i-- - dAtA[i] = 0x48 - } - if m.Duration != 0 { - i = encodeVarint(dAtA, i, uint64(m.Duration)) - i-- - dAtA[i] = 0x40 - } - if m.Start != 0 { - i = encodeVarint(dAtA, i, uint64(m.Start)) - i-- - dAtA[i] = 0x38 - } - if m.ParentID != 0 { - i = encodeVarint(dAtA, i, uint64(m.ParentID)) - i-- - dAtA[i] = 0x30 - } - if m.SpanID != 0 { - i = encodeVarint(dAtA, i, uint64(m.SpanID)) - i-- - dAtA[i] = 0x28 - } - if m.TraceID != 0 { - i = encodeVarint(dAtA, i, uint64(m.TraceID)) - i-- - dAtA[i] = 0x20 - } - if len(m.Resource) > 0 { - i -= len(m.Resource) - copy(dAtA[i:], m.Resource) - i = encodeVarint(dAtA, i, uint64(len(m.Resource))) - i-- - dAtA[i] = 0x1a - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarint(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x12 - } - if len(m.Service) > 0 { - i -= len(m.Service) - copy(dAtA[i:], m.Service) - i = encodeVarint(dAtA, i, uint64(len(m.Service))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarint(dAtA []byte, offset int, v uint64) int { - offset -= sov(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Span) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Service) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.Name) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.Resource) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.TraceID != 0 { - n += 1 + sov(uint64(m.TraceID)) - } - if m.SpanID != 0 { - n += 1 + sov(uint64(m.SpanID)) - } - if m.ParentID != 0 { - n += 1 + sov(uint64(m.ParentID)) - } - if m.Start != 0 { - n += 1 + sov(uint64(m.Start)) - } - if m.Duration != 0 { - n += 1 + sov(uint64(m.Duration)) - } - if m.Error != 0 { - n += 1 + sov(uint64(m.Error)) - } - if len(m.Meta) > 0 { - for k, v := range m.Meta { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + 1 + len(v) + sov(uint64(len(v))) - n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) - } - } - if len(m.Metrics) > 0 { - for k, v := range m.Metrics { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + 1 + 8 - n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) - } - } - l = len(m.Type) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if len(m.MetaStruct) > 0 { - for k, v := range m.MetaStruct { - _ = k - _ = v - l = 1 + len(v) + sov(uint64(len(v))) - mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l - n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) - } - } - n += len(m.unknownFields) - return n -} - -func sov(x uint64) (n int) { - return (bits.Len64(x|1) + 6) / 7 -} -func soz(x uint64) (n int) { - return sov(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Span) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Span: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Span: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Service = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Resource = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TraceID", wireType) - } - m.TraceID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TraceID |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SpanID", wireType) - } - m.SpanID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.SpanID |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ParentID", wireType) - } - m.ParentID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ParentID |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) - } - m.Start = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Start |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) - } - m.Duration = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Duration |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) - } - m.Error = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Error |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Meta == nil { - m.Meta = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLength - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLength - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLength - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLength - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Meta[mapkey] = mapvalue - iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Metrics == nil { - m.Metrics = make(map[string]float64) - } - var mapkey string - var mapvalue float64 - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLength - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLength - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapvaluetemp uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - mapvaluetemp = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - mapvalue = math.Float64frombits(mapvaluetemp) - } else { - iNdEx = entryPreIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Metrics[mapkey] = mapvalue - iNdEx = postIndex - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 13: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MetaStruct", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.MetaStruct == nil { - m.MetaStruct = make(map[string][]byte) - } - var mapkey string - var mapvalue []byte - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLength - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLength - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapbyteLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapbyteLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intMapbyteLen := int(mapbyteLen) - if intMapbyteLen < 0 { - return ErrInvalidLength - } - postbytesIndex := iNdEx + intMapbyteLen - if postbytesIndex < 0 { - return ErrInvalidLength - } - if postbytesIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = make([]byte, mapbyteLen) - copy(mapvalue, dAtA[iNdEx:postbytesIndex]) - iNdEx = postbytesIndex - } else { - iNdEx = entryPreIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.MetaStruct[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} - -func skip(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflow - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflow - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflow - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLength - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroup - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLength - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLength = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflow = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroup = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/stats.pb.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/stats.pb.go deleted file mode 100644 index b8580586d..000000000 --- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/stats.pb.go +++ /dev/null @@ -1,2384 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: stats.proto - -package pb - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -// StatsPayload is the payload used to send stats from the agent to the backend. -type StatsPayload struct { - AgentHostname string `protobuf:"bytes,1,opt,name=agentHostname,proto3" json:"agentHostname,omitempty"` - AgentEnv string `protobuf:"bytes,2,opt,name=agentEnv,proto3" json:"agentEnv,omitempty"` - Stats []ClientStatsPayload `protobuf:"bytes,3,rep,name=stats,proto3" json:"stats"` - AgentVersion string `protobuf:"bytes,4,opt,name=agentVersion,proto3" json:"agentVersion,omitempty"` - ClientComputed bool `protobuf:"varint,5,opt,name=clientComputed,proto3" json:"clientComputed,omitempty"` -} - -func (m *StatsPayload) Reset() { *m = StatsPayload{} } -func (m *StatsPayload) String() string { return proto.CompactTextString(m) } -func (*StatsPayload) ProtoMessage() {} -func (*StatsPayload) Descriptor() ([]byte, []int) { - return fileDescriptor_b4756a0aec8b9d44, []int{0} -} -func (m *StatsPayload) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StatsPayload) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_StatsPayload.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalTo(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *StatsPayload) XXX_Merge(src proto.Message) { - xxx_messageInfo_StatsPayload.Merge(m, src) -} -func (m *StatsPayload) XXX_Size() int { - return m.Size() -} -func (m *StatsPayload) XXX_DiscardUnknown() { - xxx_messageInfo_StatsPayload.DiscardUnknown(m) -} - -var xxx_messageInfo_StatsPayload proto.InternalMessageInfo - -func (m *StatsPayload) GetAgentHostname() string { - if m != nil { - return m.AgentHostname - } - return "" -} - -func (m *StatsPayload) GetAgentEnv() string { - if m != nil { - return m.AgentEnv - } - return "" -} - -func (m *StatsPayload) GetStats() []ClientStatsPayload { - if m != nil { - return m.Stats - } - return nil -} - -func (m *StatsPayload) GetAgentVersion() string { - if m != nil { - return m.AgentVersion - } - return "" -} - -func (m *StatsPayload) GetClientComputed() bool { - if m != nil { - return m.ClientComputed - } - return false -} - -// ClientStatsPayload is the first layer of span stats aggregation. It is also -// the payload sent by tracers to the agent when stats in tracer are enabled. -type ClientStatsPayload struct { - // Hostname is the tracer hostname. It's extracted from spans with "_dd.hostname" meta - // or set by tracer stats payload when hostname reporting is enabled. - Hostname string `protobuf:"bytes,1,opt,name=hostname,proto3" json:"hostname,omitempty"` - Env string `protobuf:"bytes,2,opt,name=env,proto3" json:"env,omitempty"` - Version string `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"` - Stats []ClientStatsBucket `protobuf:"bytes,4,rep,name=stats,proto3" json:"stats"` - Lang string `protobuf:"bytes,5,opt,name=lang,proto3" json:"lang,omitempty"` - TracerVersion string `protobuf:"bytes,6,opt,name=tracerVersion,proto3" json:"tracerVersion,omitempty"` - RuntimeID string `protobuf:"bytes,7,opt,name=runtimeID,proto3" json:"runtimeID,omitempty"` - Sequence uint64 `protobuf:"varint,8,opt,name=sequence,proto3" json:"sequence,omitempty"` - // AgentAggregation is set by the agent on tracer payloads modified by the agent aggregation layer - // characterizes counts only and distributions only payloads - AgentAggregation string `protobuf:"bytes,9,opt,name=agentAggregation,proto3" json:"agentAggregation,omitempty"` - // Service is the main service of the tracer. - // It is part of unified tagging: https://docs.datadoghq.com/getting_started/tagging/unified_service_tagging - Service string `protobuf:"bytes,10,opt,name=service,proto3" json:"service,omitempty"` - // ContainerID specifies the origin container ID. It is meant to be populated by the client and may - // be enhanced by the agent to ensure it is unique. - ContainerID string `protobuf:"bytes,11,opt,name=containerID,proto3" json:"containerID,omitempty"` - // Tags specifies a set of tags obtained from the orchestrator (where applicable) using the specified containerID. - // This field should be left empty by the client. It only applies to some specific environment. - Tags []string `protobuf:"bytes,12,rep,name=tags,proto3" json:"tags,omitempty"` -} - -func (m *ClientStatsPayload) Reset() { *m = ClientStatsPayload{} } -func (m *ClientStatsPayload) String() string { return proto.CompactTextString(m) } -func (*ClientStatsPayload) ProtoMessage() {} -func (*ClientStatsPayload) Descriptor() ([]byte, []int) { - return fileDescriptor_b4756a0aec8b9d44, []int{1} -} -func (m *ClientStatsPayload) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ClientStatsPayload) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ClientStatsPayload.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalTo(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ClientStatsPayload) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClientStatsPayload.Merge(m, src) -} -func (m *ClientStatsPayload) XXX_Size() int { - return m.Size() -} -func (m *ClientStatsPayload) XXX_DiscardUnknown() { - xxx_messageInfo_ClientStatsPayload.DiscardUnknown(m) -} - -var xxx_messageInfo_ClientStatsPayload proto.InternalMessageInfo - -func (m *ClientStatsPayload) GetHostname() string { - if m != nil { - return m.Hostname - } - return "" -} - -func (m *ClientStatsPayload) GetEnv() string { - if m != nil { - return m.Env - } - return "" -} - -func (m *ClientStatsPayload) GetVersion() string { - if m != nil { - return m.Version - } - return "" -} - -func (m *ClientStatsPayload) GetStats() []ClientStatsBucket { - if m != nil { - return m.Stats - } - return nil -} - -func (m *ClientStatsPayload) GetLang() string { - if m != nil { - return m.Lang - } - return "" -} - -func (m *ClientStatsPayload) GetTracerVersion() string { - if m != nil { - return m.TracerVersion - } - return "" -} - -func (m *ClientStatsPayload) GetRuntimeID() string { - if m != nil { - return m.RuntimeID - } - return "" -} - -func (m *ClientStatsPayload) GetSequence() uint64 { - if m != nil { - return m.Sequence - } - return 0 -} - -func (m *ClientStatsPayload) GetAgentAggregation() string { - if m != nil { - return m.AgentAggregation - } - return "" -} - -func (m *ClientStatsPayload) GetService() string { - if m != nil { - return m.Service - } - return "" -} - -func (m *ClientStatsPayload) GetContainerID() string { - if m != nil { - return m.ContainerID - } - return "" -} - -func (m *ClientStatsPayload) GetTags() []string { - if m != nil { - return m.Tags - } - return nil -} - -// ClientStatsBucket is a time bucket containing aggregated stats. -type ClientStatsBucket struct { - Start uint64 `protobuf:"varint,1,opt,name=start,proto3" json:"start,omitempty"` - Duration uint64 `protobuf:"varint,2,opt,name=duration,proto3" json:"duration,omitempty"` - Stats []ClientGroupedStats `protobuf:"bytes,3,rep,name=stats,proto3" json:"stats"` - // AgentTimeShift is the shift applied by the agent stats aggregator on bucket start - // when the received bucket start is outside of the agent aggregation window - AgentTimeShift int64 `protobuf:"varint,4,opt,name=agentTimeShift,proto3" json:"agentTimeShift,omitempty"` -} - -func (m *ClientStatsBucket) Reset() { *m = ClientStatsBucket{} } -func (m *ClientStatsBucket) String() string { return proto.CompactTextString(m) } -func (*ClientStatsBucket) ProtoMessage() {} -func (*ClientStatsBucket) Descriptor() ([]byte, []int) { - return fileDescriptor_b4756a0aec8b9d44, []int{2} -} -func (m *ClientStatsBucket) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ClientStatsBucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ClientStatsBucket.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalTo(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ClientStatsBucket) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClientStatsBucket.Merge(m, src) -} -func (m *ClientStatsBucket) XXX_Size() int { - return m.Size() -} -func (m *ClientStatsBucket) XXX_DiscardUnknown() { - xxx_messageInfo_ClientStatsBucket.DiscardUnknown(m) -} - -var xxx_messageInfo_ClientStatsBucket proto.InternalMessageInfo - -func (m *ClientStatsBucket) GetStart() uint64 { - if m != nil { - return m.Start - } - return 0 -} - -func (m *ClientStatsBucket) GetDuration() uint64 { - if m != nil { - return m.Duration - } - return 0 -} - -func (m *ClientStatsBucket) GetStats() []ClientGroupedStats { - if m != nil { - return m.Stats - } - return nil -} - -func (m *ClientStatsBucket) GetAgentTimeShift() int64 { - if m != nil { - return m.AgentTimeShift - } - return 0 -} - -// ClientGroupedStats aggregate stats on spans grouped by service, name, resource, status_code, type -type ClientGroupedStats struct { - Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - Resource string `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"` - HTTPStatusCode uint32 `protobuf:"varint,4,opt,name=HTTP_status_code,json=HTTPStatusCode,proto3" json:"HTTP_status_code,omitempty"` - Type string `protobuf:"bytes,5,opt,name=type,proto3" json:"type,omitempty"` - DBType string `protobuf:"bytes,6,opt,name=DB_type,json=DBType,proto3" json:"DB_type,omitempty"` - Hits uint64 `protobuf:"varint,7,opt,name=hits,proto3" json:"hits,omitempty"` - Errors uint64 `protobuf:"varint,8,opt,name=errors,proto3" json:"errors,omitempty"` - Duration uint64 `protobuf:"varint,9,opt,name=duration,proto3" json:"duration,omitempty"` - OkSummary []byte `protobuf:"bytes,10,opt,name=okSummary,proto3" json:"okSummary,omitempty"` - ErrorSummary []byte `protobuf:"bytes,11,opt,name=errorSummary,proto3" json:"errorSummary,omitempty"` - Synthetics bool `protobuf:"varint,12,opt,name=synthetics,proto3" json:"synthetics,omitempty"` - TopLevelHits uint64 `protobuf:"varint,13,opt,name=topLevelHits,proto3" json:"topLevelHits,omitempty"` - PeerService string `protobuf:"bytes,14,opt,name=peer_service,json=peerService,proto3" json:"peer_service,omitempty"` - SpanKind string `protobuf:"bytes,15,opt,name=span_kind,json=spanKind,proto3" json:"span_kind,omitempty"` -} - -func (m *ClientGroupedStats) Reset() { *m = ClientGroupedStats{} } -func (m *ClientGroupedStats) String() string { return proto.CompactTextString(m) } -func (*ClientGroupedStats) ProtoMessage() {} -func (*ClientGroupedStats) Descriptor() ([]byte, []int) { - return fileDescriptor_b4756a0aec8b9d44, []int{3} -} -func (m *ClientGroupedStats) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ClientGroupedStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ClientGroupedStats.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalTo(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ClientGroupedStats) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClientGroupedStats.Merge(m, src) -} -func (m *ClientGroupedStats) XXX_Size() int { - return m.Size() -} -func (m *ClientGroupedStats) XXX_DiscardUnknown() { - xxx_messageInfo_ClientGroupedStats.DiscardUnknown(m) -} - -var xxx_messageInfo_ClientGroupedStats proto.InternalMessageInfo - -func (m *ClientGroupedStats) GetService() string { - if m != nil { - return m.Service - } - return "" -} - -func (m *ClientGroupedStats) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *ClientGroupedStats) GetResource() string { - if m != nil { - return m.Resource - } - return "" -} - -func (m *ClientGroupedStats) GetHTTPStatusCode() uint32 { - if m != nil { - return m.HTTPStatusCode - } - return 0 -} - -func (m *ClientGroupedStats) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *ClientGroupedStats) GetDBType() string { - if m != nil { - return m.DBType - } - return "" -} - -func (m *ClientGroupedStats) GetHits() uint64 { - if m != nil { - return m.Hits - } - return 0 -} - -func (m *ClientGroupedStats) GetErrors() uint64 { - if m != nil { - return m.Errors - } - return 0 -} - -func (m *ClientGroupedStats) GetDuration() uint64 { - if m != nil { - return m.Duration - } - return 0 -} - -func (m *ClientGroupedStats) GetOkSummary() []byte { - if m != nil { - return m.OkSummary - } - return nil -} - -func (m *ClientGroupedStats) GetErrorSummary() []byte { - if m != nil { - return m.ErrorSummary - } - return nil -} - -func (m *ClientGroupedStats) GetSynthetics() bool { - if m != nil { - return m.Synthetics - } - return false -} - -func (m *ClientGroupedStats) GetTopLevelHits() uint64 { - if m != nil { - return m.TopLevelHits - } - return 0 -} - -func (m *ClientGroupedStats) GetPeerService() string { - if m != nil { - return m.PeerService - } - return "" -} - -func (m *ClientGroupedStats) GetSpanKind() string { - if m != nil { - return m.SpanKind - } - return "" -} - -func init() { - proto.RegisterType((*StatsPayload)(nil), "pb.StatsPayload") - proto.RegisterType((*ClientStatsPayload)(nil), "pb.ClientStatsPayload") - proto.RegisterType((*ClientStatsBucket)(nil), "pb.ClientStatsBucket") - proto.RegisterType((*ClientGroupedStats)(nil), "pb.ClientGroupedStats") -} - -func init() { proto.RegisterFile("stats.proto", fileDescriptor_b4756a0aec8b9d44) } - -var fileDescriptor_b4756a0aec8b9d44 = []byte{ - // 666 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x54, 0xc1, 0x6e, 0xda, 0x4a, - 0x14, 0xc5, 0xd8, 0x21, 0xf1, 0x85, 0xf0, 0xf2, 0x46, 0xef, 0xe5, 0x59, 0x79, 0x91, 0x4b, 0x51, - 0x15, 0xa1, 0x4a, 0x25, 0x6a, 0xfa, 0x05, 0x25, 0x54, 0x4d, 0xd4, 0x2e, 0x22, 0x83, 0xba, 0x45, - 0xc6, 0xbe, 0x31, 0x56, 0xf0, 0x8c, 0x3b, 0x1e, 0x23, 0xf1, 0x17, 0xfd, 0x85, 0x7e, 0x4b, 0x37, - 0x59, 0x66, 0xd9, 0x55, 0x55, 0x25, 0x1f, 0xd2, 0x6a, 0xae, 0x31, 0xc1, 0x44, 0xea, 0xee, 0x9e, - 0x33, 0x97, 0x3b, 0xf7, 0x1c, 0x9f, 0x01, 0x9a, 0x99, 0xf2, 0x55, 0xd6, 0x4f, 0xa5, 0x50, 0x82, - 0xd5, 0xd3, 0xe9, 0xd1, 0xab, 0x28, 0x56, 0xb3, 0x7c, 0xda, 0x0f, 0x44, 0x72, 0x1a, 0x89, 0x48, - 0x9c, 0xd2, 0xd1, 0x34, 0xbf, 0x26, 0x44, 0x80, 0xaa, 0xe2, 0x27, 0xdd, 0x3b, 0x03, 0x5a, 0x23, - 0x3d, 0xe2, 0xca, 0x5f, 0xce, 0x85, 0x1f, 0xb2, 0x17, 0xb0, 0xef, 0x47, 0xc8, 0xd5, 0x85, 0xc8, - 0x14, 0xf7, 0x13, 0x74, 0x8c, 0x8e, 0xd1, 0xb3, 0xbd, 0x2a, 0xc9, 0x8e, 0x60, 0x8f, 0x88, 0x77, - 0x7c, 0xe1, 0xd4, 0xa9, 0x61, 0x8d, 0xd9, 0x19, 0xec, 0xd0, 0x52, 0x8e, 0xd9, 0x31, 0x7b, 0xcd, - 0xb3, 0xc3, 0x7e, 0x3a, 0xed, 0x9f, 0xcf, 0x63, 0xe4, 0x6a, 0xf3, 0xa2, 0x81, 0x75, 0xfb, 0xe3, - 0x59, 0xcd, 0x2b, 0x5a, 0x59, 0x17, 0x5a, 0xf4, 0xfb, 0x4f, 0x28, 0xb3, 0x58, 0x70, 0xc7, 0xa2, - 0x99, 0x15, 0x8e, 0x9d, 0x40, 0x3b, 0xa0, 0x31, 0xe7, 0x22, 0x49, 0x73, 0x85, 0xa1, 0xb3, 0xd3, - 0x31, 0x7a, 0x7b, 0xde, 0x16, 0xdb, 0xfd, 0x55, 0x07, 0xf6, 0xf4, 0x3e, 0xbd, 0xf2, 0xac, 0xaa, - 0x69, 0x8d, 0xd9, 0x01, 0x98, 0xb8, 0x56, 0xa2, 0x4b, 0xe6, 0xc0, 0xee, 0x62, 0xb5, 0x8b, 0x49, - 0x6c, 0x09, 0xd9, 0xeb, 0x52, 0x9e, 0x45, 0xf2, 0xfe, 0xdd, 0x92, 0x37, 0xc8, 0x83, 0x1b, 0x54, - 0x55, 0x75, 0x0c, 0xac, 0xb9, 0xcf, 0x23, 0xda, 0xd7, 0xf6, 0xa8, 0xd6, 0x3e, 0x2b, 0xe9, 0x07, - 0x28, 0x4b, 0xc9, 0x8d, 0xc2, 0xe7, 0x0a, 0xc9, 0x8e, 0xc1, 0x96, 0x39, 0x57, 0x71, 0x82, 0x97, - 0x43, 0x67, 0x97, 0x3a, 0x1e, 0x09, 0x2d, 0x29, 0xc3, 0xcf, 0x39, 0xf2, 0x00, 0x9d, 0xbd, 0x8e, - 0xd1, 0xb3, 0xbc, 0x35, 0x66, 0x2f, 0xe1, 0x80, 0xdc, 0x7b, 0x1b, 0x45, 0x12, 0x23, 0x5f, 0xe9, - 0x2b, 0x6c, 0x1a, 0xf0, 0x84, 0xd7, 0x62, 0x33, 0x94, 0x8b, 0x38, 0x40, 0x07, 0x0a, 0xb1, 0x2b, - 0xc8, 0x3a, 0xd0, 0x0c, 0x04, 0x57, 0x7e, 0xcc, 0x51, 0x5e, 0x0e, 0x9d, 0x26, 0x9d, 0x6e, 0x52, - 0x5a, 0x9b, 0xf2, 0xa3, 0xcc, 0x69, 0x75, 0x4c, 0xad, 0x4d, 0xd7, 0xdd, 0xaf, 0x06, 0xfc, 0xfd, - 0xc4, 0x12, 0xf6, 0x0f, 0x19, 0x27, 0x15, 0xb9, 0x6f, 0x79, 0x05, 0xd0, 0x1a, 0xc2, 0x5c, 0x16, - 0xfb, 0xd5, 0x0b, 0x0d, 0x25, 0xfe, 0x43, 0x92, 0xde, 0x4b, 0x91, 0xa7, 0x18, 0x16, 0xe3, 0x2b, - 0x5e, 0x9f, 0x40, 0x9b, 0xf4, 0x8d, 0xe3, 0x04, 0x47, 0xb3, 0xf8, 0x5a, 0x51, 0x96, 0x4c, 0x6f, - 0x8b, 0xed, 0x7e, 0x33, 0xcb, 0x94, 0x6c, 0xce, 0xda, 0xb4, 0xc2, 0xa8, 0x5a, 0xc1, 0xc0, 0xa2, - 0xec, 0x14, 0x21, 0xb1, 0xca, 0x67, 0x20, 0x31, 0x13, 0xb9, 0x0c, 0x70, 0x15, 0x93, 0x35, 0x66, - 0x3d, 0x38, 0xb8, 0x18, 0x8f, 0xaf, 0x26, 0x7a, 0xad, 0x3c, 0x9b, 0x04, 0x22, 0x44, 0x5a, 0x65, - 0xdf, 0x6b, 0x6b, 0x7e, 0x44, 0xf4, 0xb9, 0x08, 0x69, 0xb2, 0x5a, 0xa6, 0x58, 0xc6, 0x43, 0xd7, - 0xec, 0x3f, 0xd8, 0x1d, 0x0e, 0x26, 0x44, 0x17, 0xc1, 0x68, 0x0c, 0x07, 0x63, 0x7d, 0xc0, 0xc0, - 0x9a, 0xc5, 0x2a, 0xa3, 0x30, 0x58, 0x1e, 0xd5, 0xec, 0x10, 0x1a, 0x28, 0xa5, 0x90, 0xd9, 0x2a, - 0x05, 0x2b, 0x54, 0xf1, 0xd6, 0xde, 0xf2, 0xf6, 0x18, 0x6c, 0x71, 0x33, 0xca, 0x93, 0xc4, 0x97, - 0x4b, 0xfa, 0xea, 0x2d, 0xef, 0x91, 0xd0, 0xef, 0x91, 0x66, 0x94, 0x0d, 0x4d, 0x6a, 0xa8, 0x70, - 0xcc, 0x05, 0xc8, 0x96, 0x5c, 0xcd, 0x50, 0xc5, 0x81, 0xfe, 0xfe, 0xfa, 0x2d, 0x6e, 0x30, 0x7a, - 0x86, 0x12, 0xe9, 0x47, 0x5c, 0xe0, 0xfc, 0x42, 0x6f, 0xbc, 0x4f, 0x1b, 0x54, 0x38, 0xf6, 0x1c, - 0x5a, 0x29, 0xa2, 0x9c, 0x94, 0x9e, 0xb7, 0x8b, 0x80, 0x69, 0x6e, 0xb4, 0xf2, 0xfd, 0x7f, 0xb0, - 0xb3, 0xd4, 0xe7, 0x93, 0x9b, 0x98, 0x87, 0xce, 0x5f, 0x85, 0xc9, 0x9a, 0xf8, 0x10, 0xf3, 0x70, - 0xe0, 0xdc, 0xde, 0xbb, 0xc6, 0xdd, 0xbd, 0x6b, 0xfc, 0xbc, 0x77, 0x8d, 0x2f, 0x0f, 0x6e, 0xed, - 0xee, 0xc1, 0xad, 0x7d, 0x7f, 0x70, 0x6b, 0xd3, 0x06, 0xfd, 0xbf, 0xbd, 0xf9, 0x1d, 0x00, 0x00, - 0xff, 0xff, 0xc7, 0x5d, 0xe8, 0x01, 0x21, 0x05, 0x00, 0x00, -} - -func (m *StatsPayload) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StatsPayload) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.AgentHostname) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintStats(dAtA, i, uint64(len(m.AgentHostname))) - i += copy(dAtA[i:], m.AgentHostname) - } - if len(m.AgentEnv) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintStats(dAtA, i, uint64(len(m.AgentEnv))) - i += copy(dAtA[i:], m.AgentEnv) - } - if len(m.Stats) > 0 { - for _, msg := range m.Stats { - dAtA[i] = 0x1a - i++ - i = encodeVarintStats(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.AgentVersion) > 0 { - dAtA[i] = 0x22 - i++ - i = encodeVarintStats(dAtA, i, uint64(len(m.AgentVersion))) - i += copy(dAtA[i:], m.AgentVersion) - } - if m.ClientComputed { - dAtA[i] = 0x28 - i++ - if m.ClientComputed { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - return i, nil -} - -func (m *ClientStatsPayload) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ClientStatsPayload) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Hostname) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintStats(dAtA, i, uint64(len(m.Hostname))) - i += copy(dAtA[i:], m.Hostname) - } - if len(m.Env) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintStats(dAtA, i, uint64(len(m.Env))) - i += copy(dAtA[i:], m.Env) - } - if len(m.Version) > 0 { - dAtA[i] = 0x1a - i++ - i = encodeVarintStats(dAtA, i, uint64(len(m.Version))) - i += copy(dAtA[i:], m.Version) - } - if len(m.Stats) > 0 { - for _, msg := range m.Stats { - dAtA[i] = 0x22 - i++ - i = encodeVarintStats(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.Lang) > 0 { - dAtA[i] = 0x2a - i++ - i = encodeVarintStats(dAtA, i, uint64(len(m.Lang))) - i += copy(dAtA[i:], m.Lang) - } - if len(m.TracerVersion) > 0 { - dAtA[i] = 0x32 - i++ - i = encodeVarintStats(dAtA, i, uint64(len(m.TracerVersion))) - i += copy(dAtA[i:], m.TracerVersion) - } - if len(m.RuntimeID) > 0 { - dAtA[i] = 0x3a - i++ - i = encodeVarintStats(dAtA, i, uint64(len(m.RuntimeID))) - i += copy(dAtA[i:], m.RuntimeID) - } - if m.Sequence != 0 { - dAtA[i] = 0x40 - i++ - i = encodeVarintStats(dAtA, i, uint64(m.Sequence)) - } - if len(m.AgentAggregation) > 0 { - dAtA[i] = 0x4a - i++ - i = encodeVarintStats(dAtA, i, uint64(len(m.AgentAggregation))) - i += copy(dAtA[i:], m.AgentAggregation) - } - if len(m.Service) > 0 { - dAtA[i] = 0x52 - i++ - i = encodeVarintStats(dAtA, i, uint64(len(m.Service))) - i += copy(dAtA[i:], m.Service) - } - if len(m.ContainerID) > 0 { - dAtA[i] = 0x5a - i++ - i = encodeVarintStats(dAtA, i, uint64(len(m.ContainerID))) - i += copy(dAtA[i:], m.ContainerID) - } - if len(m.Tags) > 0 { - for _, s := range m.Tags { - dAtA[i] = 0x62 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *ClientStatsBucket) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ClientStatsBucket) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Start != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintStats(dAtA, i, uint64(m.Start)) - } - if m.Duration != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintStats(dAtA, i, uint64(m.Duration)) - } - if len(m.Stats) > 0 { - for _, msg := range m.Stats { - dAtA[i] = 0x1a - i++ - i = encodeVarintStats(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.AgentTimeShift != 0 { - dAtA[i] = 0x20 - i++ - i = encodeVarintStats(dAtA, i, uint64(m.AgentTimeShift)) - } - return i, nil -} - -func (m *ClientGroupedStats) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ClientGroupedStats) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Service) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintStats(dAtA, i, uint64(len(m.Service))) - i += copy(dAtA[i:], m.Service) - } - if len(m.Name) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintStats(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if len(m.Resource) > 0 { - dAtA[i] = 0x1a - i++ - i = encodeVarintStats(dAtA, i, uint64(len(m.Resource))) - i += copy(dAtA[i:], m.Resource) - } - if m.HTTPStatusCode != 0 { - dAtA[i] = 0x20 - i++ - i = encodeVarintStats(dAtA, i, uint64(m.HTTPStatusCode)) - } - if len(m.Type) > 0 { - dAtA[i] = 0x2a - i++ - i = encodeVarintStats(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - } - if len(m.DBType) > 0 { - dAtA[i] = 0x32 - i++ - i = encodeVarintStats(dAtA, i, uint64(len(m.DBType))) - i += copy(dAtA[i:], m.DBType) - } - if m.Hits != 0 { - dAtA[i] = 0x38 - i++ - i = encodeVarintStats(dAtA, i, uint64(m.Hits)) - } - if m.Errors != 0 { - dAtA[i] = 0x40 - i++ - i = encodeVarintStats(dAtA, i, uint64(m.Errors)) - } - if m.Duration != 0 { - dAtA[i] = 0x48 - i++ - i = encodeVarintStats(dAtA, i, uint64(m.Duration)) - } - if len(m.OkSummary) > 0 { - dAtA[i] = 0x52 - i++ - i = encodeVarintStats(dAtA, i, uint64(len(m.OkSummary))) - i += copy(dAtA[i:], m.OkSummary) - } - if len(m.ErrorSummary) > 0 { - dAtA[i] = 0x5a - i++ - i = encodeVarintStats(dAtA, i, uint64(len(m.ErrorSummary))) - i += copy(dAtA[i:], m.ErrorSummary) - } - if m.Synthetics { - dAtA[i] = 0x60 - i++ - if m.Synthetics { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.TopLevelHits != 0 { - dAtA[i] = 0x68 - i++ - i = encodeVarintStats(dAtA, i, uint64(m.TopLevelHits)) - } - if len(m.PeerService) > 0 { - dAtA[i] = 0x72 - i++ - i = encodeVarintStats(dAtA, i, uint64(len(m.PeerService))) - i += copy(dAtA[i:], m.PeerService) - } - if len(m.SpanKind) > 0 { - dAtA[i] = 0x7a - i++ - i = encodeVarintStats(dAtA, i, uint64(len(m.SpanKind))) - i += copy(dAtA[i:], m.SpanKind) - } - return i, nil -} - -func encodeVarintStats(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *StatsPayload) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.AgentHostname) - if l > 0 { - n += 1 + l + sovStats(uint64(l)) - } - l = len(m.AgentEnv) - if l > 0 { - n += 1 + l + sovStats(uint64(l)) - } - if len(m.Stats) > 0 { - for _, e := range m.Stats { - l = e.Size() - n += 1 + l + sovStats(uint64(l)) - } - } - l = len(m.AgentVersion) - if l > 0 { - n += 1 + l + sovStats(uint64(l)) - } - if m.ClientComputed { - n += 2 - } - return n -} - -func (m *ClientStatsPayload) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Hostname) - if l > 0 { - n += 1 + l + sovStats(uint64(l)) - } - l = len(m.Env) - if l > 0 { - n += 1 + l + sovStats(uint64(l)) - } - l = len(m.Version) - if l > 0 { - n += 1 + l + sovStats(uint64(l)) - } - if len(m.Stats) > 0 { - for _, e := range m.Stats { - l = e.Size() - n += 1 + l + sovStats(uint64(l)) - } - } - l = len(m.Lang) - if l > 0 { - n += 1 + l + sovStats(uint64(l)) - } - l = len(m.TracerVersion) - if l > 0 { - n += 1 + l + sovStats(uint64(l)) - } - l = len(m.RuntimeID) - if l > 0 { - n += 1 + l + sovStats(uint64(l)) - } - if m.Sequence != 0 { - n += 1 + sovStats(uint64(m.Sequence)) - } - l = len(m.AgentAggregation) - if l > 0 { - n += 1 + l + sovStats(uint64(l)) - } - l = len(m.Service) - if l > 0 { - n += 1 + l + sovStats(uint64(l)) - } - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovStats(uint64(l)) - } - if len(m.Tags) > 0 { - for _, s := range m.Tags { - l = len(s) - n += 1 + l + sovStats(uint64(l)) - } - } - return n -} - -func (m *ClientStatsBucket) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Start != 0 { - n += 1 + sovStats(uint64(m.Start)) - } - if m.Duration != 0 { - n += 1 + sovStats(uint64(m.Duration)) - } - if len(m.Stats) > 0 { - for _, e := range m.Stats { - l = e.Size() - n += 1 + l + sovStats(uint64(l)) - } - } - if m.AgentTimeShift != 0 { - n += 1 + sovStats(uint64(m.AgentTimeShift)) - } - return n -} - -func (m *ClientGroupedStats) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Service) - if l > 0 { - n += 1 + l + sovStats(uint64(l)) - } - l = len(m.Name) - if l > 0 { - n += 1 + l + sovStats(uint64(l)) - } - l = len(m.Resource) - if l > 0 { - n += 1 + l + sovStats(uint64(l)) - } - if m.HTTPStatusCode != 0 { - n += 1 + sovStats(uint64(m.HTTPStatusCode)) - } - l = len(m.Type) - if l > 0 { - n += 1 + l + sovStats(uint64(l)) - } - l = len(m.DBType) - if l > 0 { - n += 1 + l + sovStats(uint64(l)) - } - if m.Hits != 0 { - n += 1 + sovStats(uint64(m.Hits)) - } - if m.Errors != 0 { - n += 1 + sovStats(uint64(m.Errors)) - } - if m.Duration != 0 { - n += 1 + sovStats(uint64(m.Duration)) - } - l = len(m.OkSummary) - if l > 0 { - n += 1 + l + sovStats(uint64(l)) - } - l = len(m.ErrorSummary) - if l > 0 { - n += 1 + l + sovStats(uint64(l)) - } - if m.Synthetics { - n += 2 - } - if m.TopLevelHits != 0 { - n += 1 + sovStats(uint64(m.TopLevelHits)) - } - l = len(m.PeerService) - if l > 0 { - n += 1 + l + sovStats(uint64(l)) - } - l = len(m.SpanKind) - if l > 0 { - n += 1 + l + sovStats(uint64(l)) - } - return n -} - -func sovStats(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozStats(x uint64) (n int) { - return sovStats(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *StatsPayload) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StatsPayload: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StatsPayload: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AgentHostname", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AgentHostname = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AgentEnv", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AgentEnv = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stats", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Stats = append(m.Stats, ClientStatsPayload{}) - if err := m.Stats[len(m.Stats)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AgentVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AgentVersion = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientComputed", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ClientComputed = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipStats(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthStats - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthStats - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ClientStatsPayload) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ClientStatsPayload: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ClientStatsPayload: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Hostname = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Env = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Version = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stats", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Stats = append(m.Stats, ClientStatsBucket{}) - if err := m.Stats[len(m.Stats)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Lang", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Lang = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TracerVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TracerVersion = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RuntimeID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RuntimeID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Sequence", wireType) - } - m.Sequence = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Sequence |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AgentAggregation", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AgentAggregation = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Service = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Tags = append(m.Tags, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipStats(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthStats - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthStats - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ClientStatsBucket) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ClientStatsBucket: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ClientStatsBucket: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) - } - m.Start = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Start |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) - } - m.Duration = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Duration |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stats", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Stats = append(m.Stats, ClientGroupedStats{}) - if err := m.Stats[len(m.Stats)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AgentTimeShift", wireType) - } - m.AgentTimeShift = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.AgentTimeShift |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipStats(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthStats - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthStats - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ClientGroupedStats) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ClientGroupedStats: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ClientGroupedStats: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Service = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Resource = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HTTPStatusCode", wireType) - } - m.HTTPStatusCode = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.HTTPStatusCode |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DBType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DBType = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Hits", wireType) - } - m.Hits = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Hits |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Errors", wireType) - } - m.Errors = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Errors |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) - } - m.Duration = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Duration |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OkSummary", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.OkSummary = append(m.OkSummary[:0], dAtA[iNdEx:postIndex]...) - if m.OkSummary == nil { - m.OkSummary = []byte{} - } - iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ErrorSummary", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ErrorSummary = append(m.ErrorSummary[:0], dAtA[iNdEx:postIndex]...) - if m.ErrorSummary == nil { - m.ErrorSummary = []byte{} - } - iNdEx = postIndex - case 12: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Synthetics", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Synthetics = bool(v != 0) - case 13: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TopLevelHits", wireType) - } - m.TopLevelHits = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TopLevelHits |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 14: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PeerService", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PeerService = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 15: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SpanKind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SpanKind = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipStats(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthStats - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthStats - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipStats(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowStats - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowStats - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowStats - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthStats - } - iNdEx += length - if iNdEx < 0 { - return 0, ErrInvalidLengthStats - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowStats - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipStats(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - if iNdEx < 0 { - return 0, ErrInvalidLengthStats - } - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthStats = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowStats = fmt.Errorf("proto: integer overflow") -) diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/stats.proto b/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/stats.proto deleted file mode 100644 index 3fccb5058..000000000 --- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/stats.proto +++ /dev/null @@ -1,72 +0,0 @@ -syntax = "proto3"; - -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; - -package pb; - -// protoc --gogofaster_out=. -I $GOPATH/src -I . stats.proto - -// StatsPayload is the payload used to send stats from the agent to the backend. -message StatsPayload { - string agentHostname = 1; - string agentEnv = 2; - repeated ClientStatsPayload stats = 3 [(gogoproto.nullable) = false]; - string agentVersion = 4; - bool clientComputed = 5; -} - -// ClientStatsPayload is the first layer of span stats aggregation. It is also -// the payload sent by tracers to the agent when stats in tracer are enabled. -message ClientStatsPayload { - // Hostname is the tracer hostname. It's extracted from spans with "_dd.hostname" meta - // or set by tracer stats payload when hostname reporting is enabled. - string hostname = 1; - string env = 2; // env tag set on spans or in the tracers, used for aggregation - string version = 3; // version tag set on spans or in the tracers, used for aggregation - repeated ClientStatsBucket stats = 4 [(gogoproto.nullable) = false]; - string lang = 5; // informative field not used for aggregation - string tracerVersion = 6; // informative field not used for aggregation - string runtimeID = 7; // used on stats payloads sent by the tracer to identify uniquely a message - uint64 sequence = 8; // used on stats payloads sent by the tracer to identify uniquely a message - // AgentAggregation is set by the agent on tracer payloads modified by the agent aggregation layer - // characterizes counts only and distributions only payloads - string agentAggregation = 9; - // Service is the main service of the tracer. - // It is part of unified tagging: https://docs.datadoghq.com/getting_started/tagging/unified_service_tagging - string service = 10; - // ContainerID specifies the origin container ID. It is meant to be populated by the client and may - // be enhanced by the agent to ensure it is unique. - string containerID = 11; - // Tags specifies a set of tags obtained from the orchestrator (where applicable) using the specified containerID. - // This field should be left empty by the client. It only applies to some specific environment. - repeated string tags = 12; -} - -// ClientStatsBucket is a time bucket containing aggregated stats. -message ClientStatsBucket { - uint64 start = 1; // bucket start in nanoseconds - uint64 duration = 2; // bucket duration in nanoseconds - repeated ClientGroupedStats stats = 3 [(gogoproto.nullable) = false]; - // AgentTimeShift is the shift applied by the agent stats aggregator on bucket start - // when the received bucket start is outside of the agent aggregation window - int64 agentTimeShift = 4; -} - -// ClientGroupedStats aggregate stats on spans grouped by service, name, resource, status_code, type -message ClientGroupedStats { - string service = 1; - string name = 2; - string resource = 3; - uint32 HTTP_status_code = 4; - string type = 5; - string DB_type = 6; // db_type might be used in the future to help in the obfuscation step - uint64 hits = 7; // count of all spans aggregated in the groupedstats - uint64 errors = 8; // count of error spans aggregated in the groupedstats - uint64 duration = 9; // total duration in nanoseconds of spans aggregated in the bucket - bytes okSummary = 10; // ddsketch summary of ok spans latencies encoded in protobuf - bytes errorSummary = 11; // ddsketch summary of error spans latencies encoded in protobuf - bool synthetics = 12; // set to true on spans generated by synthetics traffic - uint64 topLevelHits = 13; // count of top level spans aggregated in the groupedstats - string peer_service = 14; // name of the remote service that the `service` communicated with - string span_kind = 15; // value of the span.kind tag on the span -} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/stats_gen.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/stats_gen.go deleted file mode 100644 index 451b3a9bf..000000000 --- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/stats_gen.go +++ /dev/null @@ -1,1393 +0,0 @@ -package pb - -// Code generated by github.com/tinylib/msgp DO NOT EDIT. -// Command to generate: msgp -file pkg/trace/pb/stats.pb.go -o pkg/trace/pb/stats_gen.go -// Please remember to add this comment back after re-generation! - -import ( - _ "github.com/gogo/protobuf/gogoproto" - "github.com/tinylib/msgp/msgp" -) - -// DecodeMsg implements msgp.Decodable -func (z *ClientGroupedStats) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "Service": - z.Service, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "Service") - return - } - case "Name": - z.Name, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "Name") - return - } - case "Resource": - z.Resource, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "Resource") - return - } - case "HTTPStatusCode": - z.HTTPStatusCode, err = dc.ReadUint32() - if err != nil { - err = msgp.WrapError(err, "HTTPStatusCode") - return - } - case "Type": - z.Type, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "Type") - return - } - case "DBType": - z.DBType, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "DBType") - return - } - case "Hits": - z.Hits, err = dc.ReadUint64() - if err != nil { - err = msgp.WrapError(err, "Hits") - return - } - case "Errors": - z.Errors, err = dc.ReadUint64() - if err != nil { - err = msgp.WrapError(err, "Errors") - return - } - case "Duration": - z.Duration, err = dc.ReadUint64() - if err != nil { - err = msgp.WrapError(err, "Duration") - return - } - case "OkSummary": - z.OkSummary, err = dc.ReadBytes(z.OkSummary) - if err != nil { - err = msgp.WrapError(err, "OkSummary") - return - } - case "ErrorSummary": - z.ErrorSummary, err = dc.ReadBytes(z.ErrorSummary) - if err != nil { - err = msgp.WrapError(err, "ErrorSummary") - return - } - case "Synthetics": - z.Synthetics, err = dc.ReadBool() - if err != nil { - err = msgp.WrapError(err, "Synthetics") - return - } - case "TopLevelHits": - z.TopLevelHits, err = dc.ReadUint64() - if err != nil { - err = msgp.WrapError(err, "TopLevelHits") - return - } - case "PeerService": - z.PeerService, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "PeerService") - return - } - case "SpanKind": - z.SpanKind, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "SpanKind") - return - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - return -} - -// EncodeMsg implements msgp.Encodable -func (z *ClientGroupedStats) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 15 - // write "Service" - err = en.Append(0x8f, 0xa7, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65) - if err != nil { - return - } - err = en.WriteString(z.Service) - if err != nil { - err = msgp.WrapError(err, "Service") - return - } - // write "Name" - err = en.Append(0xa4, 0x4e, 0x61, 0x6d, 0x65) - if err != nil { - return - } - err = en.WriteString(z.Name) - if err != nil { - err = msgp.WrapError(err, "Name") - return - } - // write "Resource" - err = en.Append(0xa8, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65) - if err != nil { - return - } - err = en.WriteString(z.Resource) - if err != nil { - err = msgp.WrapError(err, "Resource") - return - } - // write "HTTPStatusCode" - err = en.Append(0xae, 0x48, 0x54, 0x54, 0x50, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65) - if err != nil { - return - } - err = en.WriteUint32(z.HTTPStatusCode) - if err != nil { - err = msgp.WrapError(err, "HTTPStatusCode") - return - } - // write "Type" - err = en.Append(0xa4, 0x54, 0x79, 0x70, 0x65) - if err != nil { - return - } - err = en.WriteString(z.Type) - if err != nil { - err = msgp.WrapError(err, "Type") - return - } - // write "DBType" - err = en.Append(0xa6, 0x44, 0x42, 0x54, 0x79, 0x70, 0x65) - if err != nil { - return - } - err = en.WriteString(z.DBType) - if err != nil { - err = msgp.WrapError(err, "DBType") - return - } - // write "Hits" - err = en.Append(0xa4, 0x48, 0x69, 0x74, 0x73) - if err != nil { - return - } - err = en.WriteUint64(z.Hits) - if err != nil { - err = msgp.WrapError(err, "Hits") - return - } - // write "Errors" - err = en.Append(0xa6, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x73) - if err != nil { - return - } - err = en.WriteUint64(z.Errors) - if err != nil { - err = msgp.WrapError(err, "Errors") - return - } - // write "Duration" - err = en.Append(0xa8, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e) - if err != nil { - return - } - err = en.WriteUint64(z.Duration) - if err != nil { - err = msgp.WrapError(err, "Duration") - return - } - // write "OkSummary" - err = en.Append(0xa9, 0x4f, 0x6b, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79) - if err != nil { - return - } - err = en.WriteBytes(z.OkSummary) - if err != nil { - err = msgp.WrapError(err, "OkSummary") - return - } - // write "ErrorSummary" - err = en.Append(0xac, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79) - if err != nil { - return - } - err = en.WriteBytes(z.ErrorSummary) - if err != nil { - err = msgp.WrapError(err, "ErrorSummary") - return - } - // write "Synthetics" - err = en.Append(0xaa, 0x53, 0x79, 0x6e, 0x74, 0x68, 0x65, 0x74, 0x69, 0x63, 0x73) - if err != nil { - return - } - err = en.WriteBool(z.Synthetics) - if err != nil { - err = msgp.WrapError(err, "Synthetics") - return - } - // write "TopLevelHits" - err = en.Append(0xac, 0x54, 0x6f, 0x70, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x48, 0x69, 0x74, 0x73) - if err != nil { - return - } - err = en.WriteUint64(z.TopLevelHits) - if err != nil { - err = msgp.WrapError(err, "TopLevelHits") - return - } - // write "PeerService" - err = en.Append(0xab, 0x50, 0x65, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65) - if err != nil { - return - } - err = en.WriteString(z.PeerService) - if err != nil { - err = msgp.WrapError(err, "PeerService") - return - } - // write "SpanKind" - err = en.Append(0xa8, 0x53, 0x70, 0x61, 0x6e, 0x4b, 0x69, 0x6e, 0x64) - if err != nil { - return - } - err = en.WriteString(z.SpanKind) - if err != nil { - err = msgp.WrapError(err, "SpanKind") - return - } - return -} - -// MarshalMsg implements msgp.Marshaler -func (z *ClientGroupedStats) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 15 - // string "Service" - o = append(o, 0x8f, 0xa7, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65) - o = msgp.AppendString(o, z.Service) - // string "Name" - o = append(o, 0xa4, 0x4e, 0x61, 0x6d, 0x65) - o = msgp.AppendString(o, z.Name) - // string "Resource" - o = append(o, 0xa8, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65) - o = msgp.AppendString(o, z.Resource) - // string "HTTPStatusCode" - o = append(o, 0xae, 0x48, 0x54, 0x54, 0x50, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65) - o = msgp.AppendUint32(o, z.HTTPStatusCode) - // string "Type" - o = append(o, 0xa4, 0x54, 0x79, 0x70, 0x65) - o = msgp.AppendString(o, z.Type) - // string "DBType" - o = append(o, 0xa6, 0x44, 0x42, 0x54, 0x79, 0x70, 0x65) - o = msgp.AppendString(o, z.DBType) - // string "Hits" - o = append(o, 0xa4, 0x48, 0x69, 0x74, 0x73) - o = msgp.AppendUint64(o, z.Hits) - // string "Errors" - o = append(o, 0xa6, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x73) - o = msgp.AppendUint64(o, z.Errors) - // string "Duration" - o = append(o, 0xa8, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e) - o = msgp.AppendUint64(o, z.Duration) - // string "OkSummary" - o = append(o, 0xa9, 0x4f, 0x6b, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79) - o = msgp.AppendBytes(o, z.OkSummary) - // string "ErrorSummary" - o = append(o, 0xac, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79) - o = msgp.AppendBytes(o, z.ErrorSummary) - // string "Synthetics" - o = append(o, 0xaa, 0x53, 0x79, 0x6e, 0x74, 0x68, 0x65, 0x74, 0x69, 0x63, 0x73) - o = msgp.AppendBool(o, z.Synthetics) - // string "TopLevelHits" - o = append(o, 0xac, 0x54, 0x6f, 0x70, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x48, 0x69, 0x74, 0x73) - o = msgp.AppendUint64(o, z.TopLevelHits) - // string "PeerService" - o = append(o, 0xab, 0x50, 0x65, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65) - o = msgp.AppendString(o, z.PeerService) - // string "SpanKind" - o = append(o, 0xa8, 0x53, 0x70, 0x61, 0x6e, 0x4b, 0x69, 0x6e, 0x64) - o = msgp.AppendString(o, z.SpanKind) - return -} - -// UnmarshalMsg implements msgp.Unmarshaler -func (z *ClientGroupedStats) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "Service": - z.Service, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Service") - return - } - case "Name": - z.Name, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Name") - return - } - case "Resource": - z.Resource, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Resource") - return - } - case "HTTPStatusCode": - z.HTTPStatusCode, bts, err = msgp.ReadUint32Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "HTTPStatusCode") - return - } - case "Type": - z.Type, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Type") - return - } - case "DBType": - z.DBType, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "DBType") - return - } - case "Hits": - z.Hits, bts, err = msgp.ReadUint64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "Hits") - return - } - case "Errors": - z.Errors, bts, err = msgp.ReadUint64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "Errors") - return - } - case "Duration": - z.Duration, bts, err = msgp.ReadUint64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "Duration") - return - } - case "OkSummary": - z.OkSummary, bts, err = msgp.ReadBytesBytes(bts, z.OkSummary) - if err != nil { - err = msgp.WrapError(err, "OkSummary") - return - } - case "ErrorSummary": - z.ErrorSummary, bts, err = msgp.ReadBytesBytes(bts, z.ErrorSummary) - if err != nil { - err = msgp.WrapError(err, "ErrorSummary") - return - } - case "Synthetics": - z.Synthetics, bts, err = msgp.ReadBoolBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Synthetics") - return - } - case "TopLevelHits": - z.TopLevelHits, bts, err = msgp.ReadUint64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "TopLevelHits") - return - } - case "PeerService": - z.PeerService, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "PeerService") - return - } - case "SpanKind": - z.SpanKind, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "SpanKind") - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - o = bts - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *ClientGroupedStats) Msgsize() (s int) { - s = 1 + 8 + msgp.StringPrefixSize + len(z.Service) + 5 + msgp.StringPrefixSize + len(z.Name) + 9 + msgp.StringPrefixSize + len(z.Resource) + 15 + msgp.Uint32Size + 5 + msgp.StringPrefixSize + len(z.Type) + 7 + msgp.StringPrefixSize + len(z.DBType) + 5 + msgp.Uint64Size + 7 + msgp.Uint64Size + 9 + msgp.Uint64Size + 10 + msgp.BytesPrefixSize + len(z.OkSummary) + 13 + msgp.BytesPrefixSize + len(z.ErrorSummary) + 11 + msgp.BoolSize + 13 + msgp.Uint64Size + 12 + msgp.StringPrefixSize + len(z.PeerService) + 9 + msgp.StringPrefixSize + len(z.SpanKind) - return -} - -// DecodeMsg implements msgp.Decodable -func (z *ClientStatsBucket) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "Start": - z.Start, err = dc.ReadUint64() - if err != nil { - err = msgp.WrapError(err, "Start") - return - } - case "Duration": - z.Duration, err = dc.ReadUint64() - if err != nil { - err = msgp.WrapError(err, "Duration") - return - } - case "Stats": - var zb0002 uint32 - zb0002, err = dc.ReadArrayHeader() - if err != nil { - err = msgp.WrapError(err, "Stats") - return - } - if cap(z.Stats) >= int(zb0002) { - z.Stats = (z.Stats)[:zb0002] - } else { - z.Stats = make([]ClientGroupedStats, zb0002) - } - for za0001 := range z.Stats { - err = z.Stats[za0001].DecodeMsg(dc) - if err != nil { - err = msgp.WrapError(err, "Stats", za0001) - return - } - } - case "AgentTimeShift": - z.AgentTimeShift, err = dc.ReadInt64() - if err != nil { - err = msgp.WrapError(err, "AgentTimeShift") - return - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - return -} - -// EncodeMsg implements msgp.Encodable -func (z *ClientStatsBucket) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 4 - // write "Start" - err = en.Append(0x84, 0xa5, 0x53, 0x74, 0x61, 0x72, 0x74) - if err != nil { - return - } - err = en.WriteUint64(z.Start) - if err != nil { - err = msgp.WrapError(err, "Start") - return - } - // write "Duration" - err = en.Append(0xa8, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e) - if err != nil { - return - } - err = en.WriteUint64(z.Duration) - if err != nil { - err = msgp.WrapError(err, "Duration") - return - } - // write "Stats" - err = en.Append(0xa5, 0x53, 0x74, 0x61, 0x74, 0x73) - if err != nil { - return - } - err = en.WriteArrayHeader(uint32(len(z.Stats))) - if err != nil { - err = msgp.WrapError(err, "Stats") - return - } - for za0001 := range z.Stats { - err = z.Stats[za0001].EncodeMsg(en) - if err != nil { - err = msgp.WrapError(err, "Stats", za0001) - return - } - } - // write "AgentTimeShift" - err = en.Append(0xae, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x68, 0x69, 0x66, 0x74) - if err != nil { - return - } - err = en.WriteInt64(z.AgentTimeShift) - if err != nil { - err = msgp.WrapError(err, "AgentTimeShift") - return - } - return -} - -// MarshalMsg implements msgp.Marshaler -func (z *ClientStatsBucket) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 4 - // string "Start" - o = append(o, 0x84, 0xa5, 0x53, 0x74, 0x61, 0x72, 0x74) - o = msgp.AppendUint64(o, z.Start) - // string "Duration" - o = append(o, 0xa8, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e) - o = msgp.AppendUint64(o, z.Duration) - // string "Stats" - o = append(o, 0xa5, 0x53, 0x74, 0x61, 0x74, 0x73) - o = msgp.AppendArrayHeader(o, uint32(len(z.Stats))) - for za0001 := range z.Stats { - o, err = z.Stats[za0001].MarshalMsg(o) - if err != nil { - err = msgp.WrapError(err, "Stats", za0001) - return - } - } - // string "AgentTimeShift" - o = append(o, 0xae, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x68, 0x69, 0x66, 0x74) - o = msgp.AppendInt64(o, z.AgentTimeShift) - return -} - -// UnmarshalMsg implements msgp.Unmarshaler -func (z *ClientStatsBucket) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "Start": - z.Start, bts, err = msgp.ReadUint64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "Start") - return - } - case "Duration": - z.Duration, bts, err = msgp.ReadUint64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "Duration") - return - } - case "Stats": - var zb0002 uint32 - zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Stats") - return - } - if cap(z.Stats) >= int(zb0002) { - z.Stats = (z.Stats)[:zb0002] - } else { - z.Stats = make([]ClientGroupedStats, zb0002) - } - for za0001 := range z.Stats { - bts, err = z.Stats[za0001].UnmarshalMsg(bts) - if err != nil { - err = msgp.WrapError(err, "Stats", za0001) - return - } - } - case "AgentTimeShift": - z.AgentTimeShift, bts, err = msgp.ReadInt64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "AgentTimeShift") - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - o = bts - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *ClientStatsBucket) Msgsize() (s int) { - s = 1 + 6 + msgp.Uint64Size + 9 + msgp.Uint64Size + 6 + msgp.ArrayHeaderSize - for za0001 := range z.Stats { - s += z.Stats[za0001].Msgsize() - } - s += 15 + msgp.Int64Size - return -} - -// DecodeMsg implements msgp.Decodable -func (z *ClientStatsPayload) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "Hostname": - z.Hostname, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "Hostname") - return - } - case "Env": - z.Env, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "Env") - return - } - case "Version": - z.Version, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "Version") - return - } - case "Stats": - var zb0002 uint32 - zb0002, err = dc.ReadArrayHeader() - if err != nil { - err = msgp.WrapError(err, "Stats") - return - } - if cap(z.Stats) >= int(zb0002) { - z.Stats = (z.Stats)[:zb0002] - } else { - z.Stats = make([]ClientStatsBucket, zb0002) - } - for za0001 := range z.Stats { - err = z.Stats[za0001].DecodeMsg(dc) - if err != nil { - err = msgp.WrapError(err, "Stats", za0001) - return - } - } - case "Lang": - z.Lang, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "Lang") - return - } - case "TracerVersion": - z.TracerVersion, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "TracerVersion") - return - } - case "RuntimeID": - z.RuntimeID, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "RuntimeID") - return - } - case "Sequence": - z.Sequence, err = dc.ReadUint64() - if err != nil { - err = msgp.WrapError(err, "Sequence") - return - } - case "AgentAggregation": - z.AgentAggregation, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "AgentAggregation") - return - } - case "Service": - z.Service, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "Service") - return - } - case "ContainerID": - z.ContainerID, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "ContainerID") - return - } - case "Tags": - var zb0003 uint32 - zb0003, err = dc.ReadArrayHeader() - if err != nil { - err = msgp.WrapError(err, "Tags") - return - } - if cap(z.Tags) >= int(zb0003) { - z.Tags = (z.Tags)[:zb0003] - } else { - z.Tags = make([]string, zb0003) - } - for za0002 := range z.Tags { - z.Tags[za0002], err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "Tags", za0002) - return - } - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - return -} - -// EncodeMsg implements msgp.Encodable -func (z *ClientStatsPayload) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 12 - // write "Hostname" - err = en.Append(0x8c, 0xa8, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65) - if err != nil { - return - } - err = en.WriteString(z.Hostname) - if err != nil { - err = msgp.WrapError(err, "Hostname") - return - } - // write "Env" - err = en.Append(0xa3, 0x45, 0x6e, 0x76) - if err != nil { - return - } - err = en.WriteString(z.Env) - if err != nil { - err = msgp.WrapError(err, "Env") - return - } - // write "Version" - err = en.Append(0xa7, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) - if err != nil { - return - } - err = en.WriteString(z.Version) - if err != nil { - err = msgp.WrapError(err, "Version") - return - } - // write "Stats" - err = en.Append(0xa5, 0x53, 0x74, 0x61, 0x74, 0x73) - if err != nil { - return - } - err = en.WriteArrayHeader(uint32(len(z.Stats))) - if err != nil { - err = msgp.WrapError(err, "Stats") - return - } - for za0001 := range z.Stats { - err = z.Stats[za0001].EncodeMsg(en) - if err != nil { - err = msgp.WrapError(err, "Stats", za0001) - return - } - } - // write "Lang" - err = en.Append(0xa4, 0x4c, 0x61, 0x6e, 0x67) - if err != nil { - return - } - err = en.WriteString(z.Lang) - if err != nil { - err = msgp.WrapError(err, "Lang") - return - } - // write "TracerVersion" - err = en.Append(0xad, 0x54, 0x72, 0x61, 0x63, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) - if err != nil { - return - } - err = en.WriteString(z.TracerVersion) - if err != nil { - err = msgp.WrapError(err, "TracerVersion") - return - } - // write "RuntimeID" - err = en.Append(0xa9, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x49, 0x44) - if err != nil { - return - } - err = en.WriteString(z.RuntimeID) - if err != nil { - err = msgp.WrapError(err, "RuntimeID") - return - } - // write "Sequence" - err = en.Append(0xa8, 0x53, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65) - if err != nil { - return - } - err = en.WriteUint64(z.Sequence) - if err != nil { - err = msgp.WrapError(err, "Sequence") - return - } - // write "AgentAggregation" - err = en.Append(0xb0, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e) - if err != nil { - return - } - err = en.WriteString(z.AgentAggregation) - if err != nil { - err = msgp.WrapError(err, "AgentAggregation") - return - } - // write "Service" - err = en.Append(0xa7, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65) - if err != nil { - return - } - err = en.WriteString(z.Service) - if err != nil { - err = msgp.WrapError(err, "Service") - return - } - // write "ContainerID" - err = en.Append(0xab, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44) - if err != nil { - return - } - err = en.WriteString(z.ContainerID) - if err != nil { - err = msgp.WrapError(err, "ContainerID") - return - } - // write "Tags" - err = en.Append(0xa4, 0x54, 0x61, 0x67, 0x73) - if err != nil { - return - } - err = en.WriteArrayHeader(uint32(len(z.Tags))) - if err != nil { - err = msgp.WrapError(err, "Tags") - return - } - for za0002 := range z.Tags { - err = en.WriteString(z.Tags[za0002]) - if err != nil { - err = msgp.WrapError(err, "Tags", za0002) - return - } - } - return -} - -// MarshalMsg implements msgp.Marshaler -func (z *ClientStatsPayload) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 12 - // string "Hostname" - o = append(o, 0x8c, 0xa8, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65) - o = msgp.AppendString(o, z.Hostname) - // string "Env" - o = append(o, 0xa3, 0x45, 0x6e, 0x76) - o = msgp.AppendString(o, z.Env) - // string "Version" - o = append(o, 0xa7, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) - o = msgp.AppendString(o, z.Version) - // string "Stats" - o = append(o, 0xa5, 0x53, 0x74, 0x61, 0x74, 0x73) - o = msgp.AppendArrayHeader(o, uint32(len(z.Stats))) - for za0001 := range z.Stats { - o, err = z.Stats[za0001].MarshalMsg(o) - if err != nil { - err = msgp.WrapError(err, "Stats", za0001) - return - } - } - // string "Lang" - o = append(o, 0xa4, 0x4c, 0x61, 0x6e, 0x67) - o = msgp.AppendString(o, z.Lang) - // string "TracerVersion" - o = append(o, 0xad, 0x54, 0x72, 0x61, 0x63, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) - o = msgp.AppendString(o, z.TracerVersion) - // string "RuntimeID" - o = append(o, 0xa9, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x49, 0x44) - o = msgp.AppendString(o, z.RuntimeID) - // string "Sequence" - o = append(o, 0xa8, 0x53, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65) - o = msgp.AppendUint64(o, z.Sequence) - // string "AgentAggregation" - o = append(o, 0xb0, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e) - o = msgp.AppendString(o, z.AgentAggregation) - // string "Service" - o = append(o, 0xa7, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65) - o = msgp.AppendString(o, z.Service) - // string "ContainerID" - o = append(o, 0xab, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44) - o = msgp.AppendString(o, z.ContainerID) - // string "Tags" - o = append(o, 0xa4, 0x54, 0x61, 0x67, 0x73) - o = msgp.AppendArrayHeader(o, uint32(len(z.Tags))) - for za0002 := range z.Tags { - o = msgp.AppendString(o, z.Tags[za0002]) - } - return -} - -// UnmarshalMsg implements msgp.Unmarshaler -func (z *ClientStatsPayload) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "Hostname": - z.Hostname, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Hostname") - return - } - case "Env": - z.Env, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Env") - return - } - case "Version": - z.Version, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Version") - return - } - case "Stats": - var zb0002 uint32 - zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Stats") - return - } - if cap(z.Stats) >= int(zb0002) { - z.Stats = (z.Stats)[:zb0002] - } else { - z.Stats = make([]ClientStatsBucket, zb0002) - } - for za0001 := range z.Stats { - bts, err = z.Stats[za0001].UnmarshalMsg(bts) - if err != nil { - err = msgp.WrapError(err, "Stats", za0001) - return - } - } - case "Lang": - z.Lang, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Lang") - return - } - case "TracerVersion": - z.TracerVersion, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "TracerVersion") - return - } - case "RuntimeID": - z.RuntimeID, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "RuntimeID") - return - } - case "Sequence": - z.Sequence, bts, err = msgp.ReadUint64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "Sequence") - return - } - case "AgentAggregation": - z.AgentAggregation, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "AgentAggregation") - return - } - case "Service": - z.Service, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Service") - return - } - case "ContainerID": - z.ContainerID, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "ContainerID") - return - } - case "Tags": - var zb0003 uint32 - zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Tags") - return - } - if cap(z.Tags) >= int(zb0003) { - z.Tags = (z.Tags)[:zb0003] - } else { - z.Tags = make([]string, zb0003) - } - for za0002 := range z.Tags { - z.Tags[za0002], bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Tags", za0002) - return - } - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - o = bts - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *ClientStatsPayload) Msgsize() (s int) { - s = 1 + 9 + msgp.StringPrefixSize + len(z.Hostname) + 4 + msgp.StringPrefixSize + len(z.Env) + 8 + msgp.StringPrefixSize + len(z.Version) + 6 + msgp.ArrayHeaderSize - for za0001 := range z.Stats { - s += z.Stats[za0001].Msgsize() - } - s += 5 + msgp.StringPrefixSize + len(z.Lang) + 14 + msgp.StringPrefixSize + len(z.TracerVersion) + 10 + msgp.StringPrefixSize + len(z.RuntimeID) + 9 + msgp.Uint64Size + 17 + msgp.StringPrefixSize + len(z.AgentAggregation) + 8 + msgp.StringPrefixSize + len(z.Service) + 12 + msgp.StringPrefixSize + len(z.ContainerID) + 5 + msgp.ArrayHeaderSize - for za0002 := range z.Tags { - s += msgp.StringPrefixSize + len(z.Tags[za0002]) - } - return -} - -// DecodeMsg implements msgp.Decodable -func (z *StatsPayload) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "AgentHostname": - z.AgentHostname, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "AgentHostname") - return - } - case "AgentEnv": - z.AgentEnv, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "AgentEnv") - return - } - case "Stats": - var zb0002 uint32 - zb0002, err = dc.ReadArrayHeader() - if err != nil { - err = msgp.WrapError(err, "Stats") - return - } - if cap(z.Stats) >= int(zb0002) { - z.Stats = (z.Stats)[:zb0002] - } else { - z.Stats = make([]ClientStatsPayload, zb0002) - } - for za0001 := range z.Stats { - err = z.Stats[za0001].DecodeMsg(dc) - if err != nil { - err = msgp.WrapError(err, "Stats", za0001) - return - } - } - case "AgentVersion": - z.AgentVersion, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "AgentVersion") - return - } - case "ClientComputed": - z.ClientComputed, err = dc.ReadBool() - if err != nil { - err = msgp.WrapError(err, "ClientComputed") - return - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - return -} - -// EncodeMsg implements msgp.Encodable -func (z *StatsPayload) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 5 - // write "AgentHostname" - err = en.Append(0x85, 0xad, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65) - if err != nil { - return - } - err = en.WriteString(z.AgentHostname) - if err != nil { - err = msgp.WrapError(err, "AgentHostname") - return - } - // write "AgentEnv" - err = en.Append(0xa8, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x76) - if err != nil { - return - } - err = en.WriteString(z.AgentEnv) - if err != nil { - err = msgp.WrapError(err, "AgentEnv") - return - } - // write "Stats" - err = en.Append(0xa5, 0x53, 0x74, 0x61, 0x74, 0x73) - if err != nil { - return - } - err = en.WriteArrayHeader(uint32(len(z.Stats))) - if err != nil { - err = msgp.WrapError(err, "Stats") - return - } - for za0001 := range z.Stats { - err = z.Stats[za0001].EncodeMsg(en) - if err != nil { - err = msgp.WrapError(err, "Stats", za0001) - return - } - } - // write "AgentVersion" - err = en.Append(0xac, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) - if err != nil { - return - } - err = en.WriteString(z.AgentVersion) - if err != nil { - err = msgp.WrapError(err, "AgentVersion") - return - } - // write "ClientComputed" - err = en.Append(0xae, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x64) - if err != nil { - return - } - err = en.WriteBool(z.ClientComputed) - if err != nil { - err = msgp.WrapError(err, "ClientComputed") - return - } - return -} - -// MarshalMsg implements msgp.Marshaler -func (z *StatsPayload) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 5 - // string "AgentHostname" - o = append(o, 0x85, 0xad, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65) - o = msgp.AppendString(o, z.AgentHostname) - // string "AgentEnv" - o = append(o, 0xa8, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x76) - o = msgp.AppendString(o, z.AgentEnv) - // string "Stats" - o = append(o, 0xa5, 0x53, 0x74, 0x61, 0x74, 0x73) - o = msgp.AppendArrayHeader(o, uint32(len(z.Stats))) - for za0001 := range z.Stats { - o, err = z.Stats[za0001].MarshalMsg(o) - if err != nil { - err = msgp.WrapError(err, "Stats", za0001) - return - } - } - // string "AgentVersion" - o = append(o, 0xac, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) - o = msgp.AppendString(o, z.AgentVersion) - // string "ClientComputed" - o = append(o, 0xae, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x64) - o = msgp.AppendBool(o, z.ClientComputed) - return -} - -// UnmarshalMsg implements msgp.Unmarshaler -func (z *StatsPayload) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "AgentHostname": - z.AgentHostname, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "AgentHostname") - return - } - case "AgentEnv": - z.AgentEnv, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "AgentEnv") - return - } - case "Stats": - var zb0002 uint32 - zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Stats") - return - } - if cap(z.Stats) >= int(zb0002) { - z.Stats = (z.Stats)[:zb0002] - } else { - z.Stats = make([]ClientStatsPayload, zb0002) - } - for za0001 := range z.Stats { - bts, err = z.Stats[za0001].UnmarshalMsg(bts) - if err != nil { - err = msgp.WrapError(err, "Stats", za0001) - return - } - } - case "AgentVersion": - z.AgentVersion, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "AgentVersion") - return - } - case "ClientComputed": - z.ClientComputed, bts, err = msgp.ReadBoolBytes(bts) - if err != nil { - err = msgp.WrapError(err, "ClientComputed") - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - o = bts - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *StatsPayload) Msgsize() (s int) { - s = 1 + 14 + msgp.StringPrefixSize + len(z.AgentHostname) + 9 + msgp.StringPrefixSize + len(z.AgentEnv) + 6 + msgp.ArrayHeaderSize - for za0001 := range z.Stats { - s += z.Stats[za0001].Msgsize() - } - s += 13 + msgp.StringPrefixSize + len(z.AgentVersion) + 15 + msgp.BoolSize - return -} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/trace.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/trace.go deleted file mode 100644 index aa1069796..000000000 --- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/trace.go +++ /dev/null @@ -1,52 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -package pb - -//go:generate go run github.com/tinylib/msgp -file=span.pb.go -o span_gen.go -io=false -//go:generate go run github.com/tinylib/msgp -file=tracer_payload.pb.go -o tracer_payload_gen.go -io=false -//go:generate go run github.com/tinylib/msgp -io=false - -// Trace is a collection of spans with the same trace ID -type Trace []*Span - -// Traces is a list of traces. This model matters as this is what we unpack from msgp. -type Traces []Trace - -// RemoveChunk removes a chunk by its index. -func (p *TracerPayload) RemoveChunk(i int) { - if i < 0 || i >= len(p.Chunks) { - return - } - p.Chunks[i] = p.Chunks[len(p.Chunks)-1] - p.Chunks = p.Chunks[:len(p.Chunks)-1] -} - -// Cut cuts off a new tracer payload from the `p` with [0, i-1] chunks -// and keeps [i, n-1] chunks in the original payload `p`. -func (p *TracerPayload) Cut(i int) *TracerPayload { - if i < 0 { - i = 0 - } - if i > len(p.Chunks) { - i = len(p.Chunks) - } - new := TracerPayload{ - ContainerID: p.GetContainerID(), - LanguageName: p.GetLanguageName(), - LanguageVersion: p.GetLanguageVersion(), - TracerVersion: p.GetTracerVersion(), - RuntimeID: p.GetRuntimeID(), - Env: p.GetEnv(), - Hostname: p.GetHostname(), - AppVersion: p.GetAppVersion(), - Tags: p.GetTags(), - } - - new.Chunks = p.Chunks[:i] - p.Chunks = p.Chunks[i:] - - return &new -} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/trace_gen.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/trace_gen.go deleted file mode 100644 index 8f8833cfa..000000000 --- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/trace_gen.go +++ /dev/null @@ -1,163 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -package pb - -// Code generated by github.com/tinylib/msgp DO NOT EDIT. - -import ( - "github.com/tinylib/msgp/msgp" -) - -// MarshalMsg implements msgp.Marshaler -func (z Trace) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - o = msgp.AppendArrayHeader(o, uint32(len(z))) - for za0001 := range z { - if z[za0001] == nil { - o = msgp.AppendNil(o) - } else { - o, err = z[za0001].MarshalMsg(o) - if err != nil { - err = msgp.WrapError(err, za0001) - return - } - } - } - return -} - -// UnmarshalMsg implements msgp.Unmarshaler -func (z *Trace) UnmarshalMsg(bts []byte) (o []byte, err error) { - var zb0002 uint32 - zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - if cap((*z)) >= int(zb0002) { - (*z) = (*z)[:zb0002] - } else { - (*z) = make(Trace, zb0002) - } - for zb0001 := range *z { - if msgp.IsNil(bts) { - bts, err = msgp.ReadNilBytes(bts) - if err != nil { - return - } - (*z)[zb0001] = nil - } else { - if (*z)[zb0001] == nil { - (*z)[zb0001] = new(Span) - } - bts, err = (*z)[zb0001].UnmarshalMsg(bts) - if err != nil { - err = msgp.WrapError(err, zb0001) - return - } - } - } - o = bts - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z Trace) Msgsize() (s int) { - s = msgp.ArrayHeaderSize - for zb0003 := range z { - if z[zb0003] == nil { - s += msgp.NilSize - } else { - s += z[zb0003].Msgsize() - } - } - return -} - -// MarshalMsg implements msgp.Marshaler -func (z Traces) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - o = msgp.AppendArrayHeader(o, uint32(len(z))) - for za0001 := range z { - o = msgp.AppendArrayHeader(o, uint32(len(z[za0001]))) - for za0002 := range z[za0001] { - if z[za0001][za0002] == nil { - o = msgp.AppendNil(o) - } else { - o, err = z[za0001][za0002].MarshalMsg(o) - if err != nil { - err = msgp.WrapError(err, za0001, za0002) - return - } - } - } - } - return -} - -// UnmarshalMsg implements msgp.Unmarshaler -func (z *Traces) UnmarshalMsg(bts []byte) (o []byte, err error) { - var zb0003 uint32 - zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - if cap((*z)) >= int(zb0003) { - (*z) = (*z)[:zb0003] - } else { - (*z) = make(Traces, zb0003) - } - for zb0001 := range *z { - var zb0004 uint32 - zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, zb0001) - return - } - if cap((*z)[zb0001]) >= int(zb0004) { - (*z)[zb0001] = ((*z)[zb0001])[:zb0004] - } else { - (*z)[zb0001] = make(Trace, zb0004) - } - for zb0002 := range (*z)[zb0001] { - if msgp.IsNil(bts) { - bts, err = msgp.ReadNilBytes(bts) - if err != nil { - return - } - (*z)[zb0001][zb0002] = nil - } else { - if (*z)[zb0001][zb0002] == nil { - (*z)[zb0001][zb0002] = new(Span) - } - bts, err = (*z)[zb0001][zb0002].UnmarshalMsg(bts) - if err != nil { - err = msgp.WrapError(err, zb0001, zb0002) - return - } - } - } - } - o = bts - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z Traces) Msgsize() (s int) { - s = msgp.ArrayHeaderSize - for zb0005 := range z { - s += msgp.ArrayHeaderSize - for zb0006 := range z[zb0005] { - if z[zb0005][zb0006] == nil { - s += msgp.NilSize - } else { - s += z[zb0005][zb0006].Msgsize() - } - } - } - return -} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/tracer_payload.pb.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/tracer_payload.pb.go deleted file mode 100644 index 2f3bc339e..000000000 --- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/tracer_payload.pb.go +++ /dev/null @@ -1,388 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.5 -// source: tracer_payload.proto - -package pb - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// TraceChunk represents a list of spans with the same trace ID. In other words, a chunk of a trace. -type TraceChunk struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // priority specifies sampling priority of the trace. - // @gotags: json:"priority" msg:"priority" - Priority int32 `protobuf:"varint,1,opt,name=priority,proto3" json:"priority" msg:"priority"` - // origin specifies origin product ("lambda", "rum", etc.) of the trace. - // @gotags: json:"origin" msg:"origin" - Origin string `protobuf:"bytes,2,opt,name=origin,proto3" json:"origin" msg:"origin"` - // spans specifies list of containing spans. - // @gotags: json:"spans" msg:"spans" - Spans []*Span `protobuf:"bytes,3,rep,name=spans,proto3" json:"spans" msg:"spans"` - // tags specifies tags common in all `spans`. - // @gotags: json:"tags" msg:"tags" - Tags map[string]string `protobuf:"bytes,4,rep,name=tags,proto3" json:"tags" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3" msg:"tags"` - // droppedTrace specifies whether the trace was dropped by samplers or not. - // @gotags: json:"dropped_trace" msg:"dropped_trace" - DroppedTrace bool `protobuf:"varint,5,opt,name=droppedTrace,proto3" json:"dropped_trace" msg:"dropped_trace"` -} - -func (x *TraceChunk) Reset() { - *x = TraceChunk{} - if protoimpl.UnsafeEnabled { - mi := &file_tracer_payload_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TraceChunk) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TraceChunk) ProtoMessage() {} - -func (x *TraceChunk) ProtoReflect() protoreflect.Message { - mi := &file_tracer_payload_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TraceChunk.ProtoReflect.Descriptor instead. -func (*TraceChunk) Descriptor() ([]byte, []int) { - return file_tracer_payload_proto_rawDescGZIP(), []int{0} -} - -func (x *TraceChunk) GetPriority() int32 { - if x != nil { - return x.Priority - } - return 0 -} - -func (x *TraceChunk) GetOrigin() string { - if x != nil { - return x.Origin - } - return "" -} - -func (x *TraceChunk) GetSpans() []*Span { - if x != nil { - return x.Spans - } - return nil -} - -func (x *TraceChunk) GetTags() map[string]string { - if x != nil { - return x.Tags - } - return nil -} - -func (x *TraceChunk) GetDroppedTrace() bool { - if x != nil { - return x.DroppedTrace - } - return false -} - -// TracerPayload represents a payload the trace agent receives from tracers. -type TracerPayload struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // containerID specifies the ID of the container where the tracer is running on. - // @gotags: json:"container_id" msg:"container_id" - ContainerID string `protobuf:"bytes,1,opt,name=containerID,proto3" json:"container_id" msg:"container_id"` - // languageName specifies language of the tracer. - // @gotags: json:"language_name" msg:"language_name" - LanguageName string `protobuf:"bytes,2,opt,name=languageName,proto3" json:"language_name" msg:"language_name"` - // languageVersion specifies language version of the tracer. - // @gotags: json:"language_version" msg:"language_version" - LanguageVersion string `protobuf:"bytes,3,opt,name=languageVersion,proto3" json:"language_version" msg:"language_version"` - // tracerVersion specifies version of the tracer. - // @gotags: json:"tracer_version" msg:"tracer_version" - TracerVersion string `protobuf:"bytes,4,opt,name=tracerVersion,proto3" json:"tracer_version" msg:"tracer_version"` - // runtimeID specifies V4 UUID representation of a tracer session. - // @gotags: json:"runtime_id" msg:"runtime_id" - RuntimeID string `protobuf:"bytes,5,opt,name=runtimeID,proto3" json:"runtime_id" msg:"runtime_id"` - // chunks specifies list of containing trace chunks. - // @gotags: json:"chunks" msg:"chunks" - Chunks []*TraceChunk `protobuf:"bytes,6,rep,name=chunks,proto3" json:"chunks" msg:"chunks"` - // tags specifies tags common in all `chunks`. - // @gotags: json:"tags" msg:"tags" - Tags map[string]string `protobuf:"bytes,7,rep,name=tags,proto3" json:"tags" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3" msg:"tags"` - // env specifies `env` tag that set with the tracer. - // @gotags: json:"env" msg:"env" - Env string `protobuf:"bytes,8,opt,name=env,proto3" json:"env" msg:"env"` - // hostname specifies hostname of where the tracer is running. - // @gotags: json:"hostname" msg:"hostname" - Hostname string `protobuf:"bytes,9,opt,name=hostname,proto3" json:"hostname" msg:"hostname"` - // version specifies `version` tag that set with the tracer. - // @gotags: json:"app_version" msg:"app_version" - AppVersion string `protobuf:"bytes,10,opt,name=appVersion,proto3" json:"app_version" msg:"app_version"` -} - -func (x *TracerPayload) Reset() { - *x = TracerPayload{} - if protoimpl.UnsafeEnabled { - mi := &file_tracer_payload_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TracerPayload) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TracerPayload) ProtoMessage() {} - -func (x *TracerPayload) ProtoReflect() protoreflect.Message { - mi := &file_tracer_payload_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TracerPayload.ProtoReflect.Descriptor instead. -func (*TracerPayload) Descriptor() ([]byte, []int) { - return file_tracer_payload_proto_rawDescGZIP(), []int{1} -} - -func (x *TracerPayload) GetContainerID() string { - if x != nil { - return x.ContainerID - } - return "" -} - -func (x *TracerPayload) GetLanguageName() string { - if x != nil { - return x.LanguageName - } - return "" -} - -func (x *TracerPayload) GetLanguageVersion() string { - if x != nil { - return x.LanguageVersion - } - return "" -} - -func (x *TracerPayload) GetTracerVersion() string { - if x != nil { - return x.TracerVersion - } - return "" -} - -func (x *TracerPayload) GetRuntimeID() string { - if x != nil { - return x.RuntimeID - } - return "" -} - -func (x *TracerPayload) GetChunks() []*TraceChunk { - if x != nil { - return x.Chunks - } - return nil -} - -func (x *TracerPayload) GetTags() map[string]string { - if x != nil { - return x.Tags - } - return nil -} - -func (x *TracerPayload) GetEnv() string { - if x != nil { - return x.Env - } - return "" -} - -func (x *TracerPayload) GetHostname() string { - if x != nil { - return x.Hostname - } - return "" -} - -func (x *TracerPayload) GetAppVersion() string { - if x != nil { - return x.AppVersion - } - return "" -} - -var File_tracer_payload_proto protoreflect.FileDescriptor - -var file_tracer_payload_proto_rawDesc = []byte{ - 0x0a, 0x14, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x1a, 0x0a, 0x73, 0x70, 0x61, 0x6e, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xeb, 0x01, 0x0a, 0x0a, 0x54, 0x72, 0x61, 0x63, 0x65, - 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, - 0x79, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x12, 0x1e, 0x0a, 0x05, 0x73, 0x70, 0x61, - 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x08, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x70, - 0x61, 0x6e, 0x52, 0x05, 0x73, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x2c, 0x0a, 0x04, 0x74, 0x61, 0x67, - 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x72, 0x61, - 0x63, 0x65, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x2e, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x22, 0x0a, 0x0c, 0x64, 0x72, 0x6f, 0x70, 0x70, - 0x65, 0x64, 0x54, 0x72, 0x61, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x64, - 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x54, 0x72, 0x61, 0x63, 0x65, 0x1a, 0x37, 0x0a, 0x09, 0x54, - 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x3a, 0x02, 0x38, 0x01, 0x22, 0xa3, 0x03, 0x0a, 0x0d, 0x54, 0x72, 0x61, 0x63, 0x65, 0x72, 0x50, - 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, - 0x6e, 0x65, 0x72, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x12, 0x22, 0x0a, 0x0c, 0x6c, 0x61, 0x6e, 0x67, - 0x75, 0x61, 0x67, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, - 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x0f, - 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x56, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x24, 0x0a, 0x0d, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, - 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x74, - 0x72, 0x61, 0x63, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, - 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x49, 0x44, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x09, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x49, 0x44, 0x12, 0x26, 0x0a, 0x06, 0x63, 0x68, - 0x75, 0x6e, 0x6b, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, - 0x54, 0x72, 0x61, 0x63, 0x65, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x52, 0x06, 0x63, 0x68, 0x75, 0x6e, - 0x6b, 0x73, 0x12, 0x2f, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x1b, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x72, 0x50, 0x61, 0x79, 0x6c, - 0x6f, 0x61, 0x64, 0x2e, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x74, - 0x61, 0x67, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x76, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x65, 0x6e, 0x76, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x61, 0x70, 0x70, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, - 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x70, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x1a, 0x37, 0x0a, 0x09, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, - 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, - 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x69, - 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x44, 0x61, 0x74, 0x61, 0x44, 0x6f, 0x67, - 0x2f, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2d, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2f, 0x70, - 0x6b, 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, -} - -var ( - file_tracer_payload_proto_rawDescOnce sync.Once - file_tracer_payload_proto_rawDescData = file_tracer_payload_proto_rawDesc -) - -func file_tracer_payload_proto_rawDescGZIP() []byte { - file_tracer_payload_proto_rawDescOnce.Do(func() { - file_tracer_payload_proto_rawDescData = protoimpl.X.CompressGZIP(file_tracer_payload_proto_rawDescData) - }) - return file_tracer_payload_proto_rawDescData -} - -var file_tracer_payload_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -var file_tracer_payload_proto_goTypes = []interface{}{ - (*TraceChunk)(nil), // 0: pb.TraceChunk - (*TracerPayload)(nil), // 1: pb.TracerPayload - nil, // 2: pb.TraceChunk.TagsEntry - nil, // 3: pb.TracerPayload.TagsEntry - (*Span)(nil), // 4: pb.Span -} -var file_tracer_payload_proto_depIdxs = []int32{ - 4, // 0: pb.TraceChunk.spans:type_name -> pb.Span - 2, // 1: pb.TraceChunk.tags:type_name -> pb.TraceChunk.TagsEntry - 0, // 2: pb.TracerPayload.chunks:type_name -> pb.TraceChunk - 3, // 3: pb.TracerPayload.tags:type_name -> pb.TracerPayload.TagsEntry - 4, // [4:4] is the sub-list for method output_type - 4, // [4:4] is the sub-list for method input_type - 4, // [4:4] is the sub-list for extension type_name - 4, // [4:4] is the sub-list for extension extendee - 0, // [0:4] is the sub-list for field type_name -} - -func init() { file_tracer_payload_proto_init() } -func file_tracer_payload_proto_init() { - if File_tracer_payload_proto != nil { - return - } - file_span_proto_init() - if !protoimpl.UnsafeEnabled { - file_tracer_payload_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TraceChunk); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_tracer_payload_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TracerPayload); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_tracer_payload_proto_rawDesc, - NumEnums: 0, - NumMessages: 4, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_tracer_payload_proto_goTypes, - DependencyIndexes: file_tracer_payload_proto_depIdxs, - MessageInfos: file_tracer_payload_proto_msgTypes, - }.Build() - File_tracer_payload_proto = out.File - file_tracer_payload_proto_rawDesc = nil - file_tracer_payload_proto_goTypes = nil - file_tracer_payload_proto_depIdxs = nil -} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/tracer_payload.proto b/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/tracer_payload.proto deleted file mode 100644 index 6b391b6c1..000000000 --- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/tracer_payload.proto +++ /dev/null @@ -1,58 +0,0 @@ -syntax = "proto3"; - -package pb; -option go_package="github.com/DataDog/datadog-agent/pkg/trace/pb"; -import "span.proto"; - -// TraceChunk represents a list of spans with the same trace ID. In other words, a chunk of a trace. -message TraceChunk { - // priority specifies sampling priority of the trace. - // @gotags: json:"priority" msg:"priority" - int32 priority = 1; - // origin specifies origin product ("lambda", "rum", etc.) of the trace. - // @gotags: json:"origin" msg:"origin" - string origin = 2; - // spans specifies list of containing spans. - // @gotags: json:"spans" msg:"spans" - repeated Span spans = 3; - // tags specifies tags common in all `spans`. - // @gotags: json:"tags" msg:"tags" - map tags = 4; - // droppedTrace specifies whether the trace was dropped by samplers or not. - // @gotags: json:"dropped_trace" msg:"dropped_trace" - bool droppedTrace = 5; -} - -// TracerPayload represents a payload the trace agent receives from tracers. -message TracerPayload { - // containerID specifies the ID of the container where the tracer is running on. - // @gotags: json:"container_id" msg:"container_id" - string containerID = 1; - // languageName specifies language of the tracer. - // @gotags: json:"language_name" msg:"language_name" - string languageName = 2; - // languageVersion specifies language version of the tracer. - // @gotags: json:"language_version" msg:"language_version" - string languageVersion = 3; - // tracerVersion specifies version of the tracer. - // @gotags: json:"tracer_version" msg:"tracer_version" - string tracerVersion = 4; - // runtimeID specifies V4 UUID representation of a tracer session. - // @gotags: json:"runtime_id" msg:"runtime_id" - string runtimeID = 5; - // chunks specifies list of containing trace chunks. - // @gotags: json:"chunks" msg:"chunks" - repeated TraceChunk chunks = 6; - // tags specifies tags common in all `chunks`. - // @gotags: json:"tags" msg:"tags" - map tags = 7; - // env specifies `env` tag that set with the tracer. - // @gotags: json:"env" msg:"env" - string env = 8; - // hostname specifies hostname of where the tracer is running. - // @gotags: json:"hostname" msg:"hostname" - string hostname = 9; - // version specifies `version` tag that set with the tracer. - // @gotags: json:"app_version" msg:"app_version" - string appVersion = 10; -} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/tracer_payload_gen.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/tracer_payload_gen.go deleted file mode 100644 index d45264b77..000000000 --- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/tracer_payload_gen.go +++ /dev/null @@ -1,390 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -package pb - -// Code generated by github.com/tinylib/msgp DO NOT EDIT. - -import ( - // _ "github.com/gogo/protobuf/gogoproto" - "github.com/tinylib/msgp/msgp" -) - -// MarshalMsg implements msgp.Marshaler -func (z *TraceChunk) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 5 - // string "priority" - o = append(o, 0x85, 0xa8, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79) - o = msgp.AppendInt32(o, z.Priority) - // string "origin" - o = append(o, 0xa6, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e) - o = msgp.AppendString(o, z.Origin) - // string "spans" - o = append(o, 0xa5, 0x73, 0x70, 0x61, 0x6e, 0x73) - o = msgp.AppendArrayHeader(o, uint32(len(z.Spans))) - for za0001 := range z.Spans { - if z.Spans[za0001] == nil { - o = msgp.AppendNil(o) - } else { - o, err = z.Spans[za0001].MarshalMsg(o) - if err != nil { - err = msgp.WrapError(err, "Spans", za0001) - return - } - } - } - // string "tags" - o = append(o, 0xa4, 0x74, 0x61, 0x67, 0x73) - o = msgp.AppendMapHeader(o, uint32(len(z.Tags))) - for za0002, za0003 := range z.Tags { - o = msgp.AppendString(o, za0002) - o = msgp.AppendString(o, za0003) - } - // string "dropped_trace" - o = append(o, 0xad, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x74, 0x72, 0x61, 0x63, 0x65) - o = msgp.AppendBool(o, z.DroppedTrace) - return -} - -// UnmarshalMsg implements msgp.Unmarshaler -func (z *TraceChunk) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "priority": - z.Priority, bts, err = msgp.ReadInt32Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "Priority") - return - } - case "origin": - z.Origin, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Origin") - return - } - case "spans": - var zb0002 uint32 - zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Spans") - return - } - if cap(z.Spans) >= int(zb0002) { - z.Spans = (z.Spans)[:zb0002] - } else { - z.Spans = make([]*Span, zb0002) - } - for za0001 := range z.Spans { - if msgp.IsNil(bts) { - bts, err = msgp.ReadNilBytes(bts) - if err != nil { - return - } - z.Spans[za0001] = nil - } else { - if z.Spans[za0001] == nil { - z.Spans[za0001] = new(Span) - } - bts, err = z.Spans[za0001].UnmarshalMsg(bts) - if err != nil { - err = msgp.WrapError(err, "Spans", za0001) - return - } - } - } - case "tags": - var zb0003 uint32 - zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Tags") - return - } - if z.Tags == nil { - z.Tags = make(map[string]string, zb0003) - } else if len(z.Tags) > 0 { - for key := range z.Tags { - delete(z.Tags, key) - } - } - for zb0003 > 0 { - var za0002 string - var za0003 string - zb0003-- - za0002, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Tags") - return - } - za0003, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Tags", za0002) - return - } - z.Tags[za0002] = za0003 - } - case "dropped_trace": - z.DroppedTrace, bts, err = msgp.ReadBoolBytes(bts) - if err != nil { - err = msgp.WrapError(err, "DroppedTrace") - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - o = bts - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *TraceChunk) Msgsize() (s int) { - s = 1 + 9 + msgp.Int32Size + 7 + msgp.StringPrefixSize + len(z.Origin) + 6 + msgp.ArrayHeaderSize - for za0001 := range z.Spans { - if z.Spans[za0001] == nil { - s += msgp.NilSize - } else { - s += z.Spans[za0001].Msgsize() - } - } - s += 5 + msgp.MapHeaderSize - if z.Tags != nil { - for za0002, za0003 := range z.Tags { - _ = za0003 - s += msgp.StringPrefixSize + len(za0002) + msgp.StringPrefixSize + len(za0003) - } - } - s += 14 + msgp.BoolSize - return -} - -// MarshalMsg implements msgp.Marshaler -func (z *TracerPayload) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 10 - // string "container_id" - o = append(o, 0x8a, 0xac, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64) - o = msgp.AppendString(o, z.ContainerID) - // string "language_name" - o = append(o, 0xad, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65) - o = msgp.AppendString(o, z.LanguageName) - // string "language_version" - o = append(o, 0xb0, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) - o = msgp.AppendString(o, z.LanguageVersion) - // string "tracer_version" - o = append(o, 0xae, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) - o = msgp.AppendString(o, z.TracerVersion) - // string "runtime_id" - o = append(o, 0xaa, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x69, 0x64) - o = msgp.AppendString(o, z.RuntimeID) - // string "chunks" - o = append(o, 0xa6, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x73) - o = msgp.AppendArrayHeader(o, uint32(len(z.Chunks))) - for za0001 := range z.Chunks { - if z.Chunks[za0001] == nil { - o = msgp.AppendNil(o) - } else { - o, err = z.Chunks[za0001].MarshalMsg(o) - if err != nil { - err = msgp.WrapError(err, "Chunks", za0001) - return - } - } - } - // string "tags" - o = append(o, 0xa4, 0x74, 0x61, 0x67, 0x73) - o = msgp.AppendMapHeader(o, uint32(len(z.Tags))) - for za0002, za0003 := range z.Tags { - o = msgp.AppendString(o, za0002) - o = msgp.AppendString(o, za0003) - } - // string "env" - o = append(o, 0xa3, 0x65, 0x6e, 0x76) - o = msgp.AppendString(o, z.Env) - // string "hostname" - o = append(o, 0xa8, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65) - o = msgp.AppendString(o, z.Hostname) - // string "app_version" - o = append(o, 0xab, 0x61, 0x70, 0x70, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) - o = msgp.AppendString(o, z.AppVersion) - return -} - -// UnmarshalMsg implements msgp.Unmarshaler -func (z *TracerPayload) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "container_id": - z.ContainerID, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "ContainerID") - return - } - case "language_name": - z.LanguageName, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "LanguageName") - return - } - case "language_version": - z.LanguageVersion, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "LanguageVersion") - return - } - case "tracer_version": - z.TracerVersion, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "TracerVersion") - return - } - case "runtime_id": - z.RuntimeID, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "RuntimeID") - return - } - case "chunks": - var zb0002 uint32 - zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Chunks") - return - } - if cap(z.Chunks) >= int(zb0002) { - z.Chunks = (z.Chunks)[:zb0002] - } else { - z.Chunks = make([]*TraceChunk, zb0002) - } - for za0001 := range z.Chunks { - if msgp.IsNil(bts) { - bts, err = msgp.ReadNilBytes(bts) - if err != nil { - return - } - z.Chunks[za0001] = nil - } else { - if z.Chunks[za0001] == nil { - z.Chunks[za0001] = new(TraceChunk) - } - bts, err = z.Chunks[za0001].UnmarshalMsg(bts) - if err != nil { - err = msgp.WrapError(err, "Chunks", za0001) - return - } - } - } - case "tags": - var zb0003 uint32 - zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Tags") - return - } - if z.Tags == nil { - z.Tags = make(map[string]string, zb0003) - } else if len(z.Tags) > 0 { - for key := range z.Tags { - delete(z.Tags, key) - } - } - for zb0003 > 0 { - var za0002 string - var za0003 string - zb0003-- - za0002, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Tags") - return - } - za0003, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Tags", za0002) - return - } - z.Tags[za0002] = za0003 - } - case "env": - z.Env, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Env") - return - } - case "hostname": - z.Hostname, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Hostname") - return - } - case "app_version": - z.AppVersion, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "AppVersion") - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - o = bts - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *TracerPayload) Msgsize() (s int) { - s = 1 + 13 + msgp.StringPrefixSize + len(z.ContainerID) + 14 + msgp.StringPrefixSize + len(z.LanguageName) + 17 + msgp.StringPrefixSize + len(z.LanguageVersion) + 15 + msgp.StringPrefixSize + len(z.TracerVersion) + 11 + msgp.StringPrefixSize + len(z.RuntimeID) + 7 + msgp.ArrayHeaderSize - for za0001 := range z.Chunks { - if z.Chunks[za0001] == nil { - s += msgp.NilSize - } else { - s += z.Chunks[za0001].Msgsize() - } - } - s += 5 + msgp.MapHeaderSize - if z.Tags != nil { - for za0002, za0003 := range z.Tags { - _ = za0003 - s += msgp.StringPrefixSize + len(za0002) + msgp.StringPrefixSize + len(za0003) - } - } - s += 4 + msgp.StringPrefixSize + len(z.Env) + 9 + msgp.StringPrefixSize + len(z.Hostname) + 12 + msgp.StringPrefixSize + len(z.AppVersion) - return -} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/tracer_payload_utils.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/tracer_payload_utils.go deleted file mode 100644 index 04b9d76f1..000000000 --- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/tracer_payload_utils.go +++ /dev/null @@ -1,35 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -package pb - -// traceChunkCopiedFields records the fields that are copied in ShallowCopy. -// This should match exactly the fields set in (*TraceChunk).ShallowCopy. -// This is used by tests to enforce the correctness of ShallowCopy. -var traceChunkCopiedFields = map[string]struct{}{ - "Priority": {}, - "Origin": {}, - "Spans": {}, - "Tags": {}, - "DroppedTrace": {}, -} - -// ShallowCopy returns a shallow copy of the copy-able portion of a TraceChunk. These are the -// public fields which will have a Get* method for them. The completeness of this -// method is enforced by the init function above. Instead of using pkg/proto/utils.ProtoCopier, -// which incurs heavy reflection cost for every copy at runtime, we use reflection once at -// startup to ensure our method is complete. -func (t *TraceChunk) ShallowCopy() *TraceChunk { - if t == nil { - return nil - } - return &TraceChunk{ - Priority: t.Priority, - Origin: t.Origin, - Spans: t.Spans, - Tags: t.Tags, - DroppedTrace: t.DroppedTrace, - } -} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/tracer_payload_vtproto.pb.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/tracer_payload_vtproto.pb.go deleted file mode 100644 index 6528e324b..000000000 --- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/pb/tracer_payload_vtproto.pb.go +++ /dev/null @@ -1,1066 +0,0 @@ -// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. -// protoc-gen-go-vtproto version: v0.4.0 -// source: tracer_payload.proto - -package pb - -import ( - fmt "fmt" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - io "io" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -func (m *TraceChunk) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TraceChunk) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *TraceChunk) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if m.DroppedTrace { - i-- - if m.DroppedTrace { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x28 - } - if len(m.Tags) > 0 { - for k := range m.Tags { - v := m.Tags[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarint(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarint(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarint(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x22 - } - } - if len(m.Spans) > 0 { - for iNdEx := len(m.Spans) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.Spans[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x1a - } - } - if len(m.Origin) > 0 { - i -= len(m.Origin) - copy(dAtA[i:], m.Origin) - i = encodeVarint(dAtA, i, uint64(len(m.Origin))) - i-- - dAtA[i] = 0x12 - } - if m.Priority != 0 { - i = encodeVarint(dAtA, i, uint64(m.Priority)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *TracerPayload) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TracerPayload) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *TracerPayload) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.AppVersion) > 0 { - i -= len(m.AppVersion) - copy(dAtA[i:], m.AppVersion) - i = encodeVarint(dAtA, i, uint64(len(m.AppVersion))) - i-- - dAtA[i] = 0x52 - } - if len(m.Hostname) > 0 { - i -= len(m.Hostname) - copy(dAtA[i:], m.Hostname) - i = encodeVarint(dAtA, i, uint64(len(m.Hostname))) - i-- - dAtA[i] = 0x4a - } - if len(m.Env) > 0 { - i -= len(m.Env) - copy(dAtA[i:], m.Env) - i = encodeVarint(dAtA, i, uint64(len(m.Env))) - i-- - dAtA[i] = 0x42 - } - if len(m.Tags) > 0 { - for k := range m.Tags { - v := m.Tags[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarint(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarint(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarint(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x3a - } - } - if len(m.Chunks) > 0 { - for iNdEx := len(m.Chunks) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.Chunks[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x32 - } - } - if len(m.RuntimeID) > 0 { - i -= len(m.RuntimeID) - copy(dAtA[i:], m.RuntimeID) - i = encodeVarint(dAtA, i, uint64(len(m.RuntimeID))) - i-- - dAtA[i] = 0x2a - } - if len(m.TracerVersion) > 0 { - i -= len(m.TracerVersion) - copy(dAtA[i:], m.TracerVersion) - i = encodeVarint(dAtA, i, uint64(len(m.TracerVersion))) - i-- - dAtA[i] = 0x22 - } - if len(m.LanguageVersion) > 0 { - i -= len(m.LanguageVersion) - copy(dAtA[i:], m.LanguageVersion) - i = encodeVarint(dAtA, i, uint64(len(m.LanguageVersion))) - i-- - dAtA[i] = 0x1a - } - if len(m.LanguageName) > 0 { - i -= len(m.LanguageName) - copy(dAtA[i:], m.LanguageName) - i = encodeVarint(dAtA, i, uint64(len(m.LanguageName))) - i-- - dAtA[i] = 0x12 - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarint(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *TraceChunk) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Priority != 0 { - n += 1 + sov(uint64(m.Priority)) - } - l = len(m.Origin) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if len(m.Spans) > 0 { - for _, e := range m.Spans { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) - } - } - if len(m.Tags) > 0 { - for k, v := range m.Tags { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + 1 + len(v) + sov(uint64(len(v))) - n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) - } - } - if m.DroppedTrace { - n += 2 - } - n += len(m.unknownFields) - return n -} - -func (m *TracerPayload) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.LanguageName) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.LanguageVersion) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.TracerVersion) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.RuntimeID) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if len(m.Chunks) > 0 { - for _, e := range m.Chunks { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) - } - } - if len(m.Tags) > 0 { - for k, v := range m.Tags { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + 1 + len(v) + sov(uint64(len(v))) - n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) - } - } - l = len(m.Env) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.Hostname) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.AppVersion) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n -} - -func (m *TraceChunk) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TraceChunk: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TraceChunk: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Priority", wireType) - } - m.Priority = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Priority |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Origin", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Origin = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spans", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Spans = append(m.Spans, &Span{}) - if err := m.Spans[len(m.Spans)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Tags == nil { - m.Tags = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLength - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLength - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLength - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLength - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Tags[mapkey] = mapvalue - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DroppedTrace", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.DroppedTrace = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TracerPayload) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TracerPayload: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TracerPayload: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LanguageName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.LanguageName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LanguageVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.LanguageVersion = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TracerVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TracerVersion = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RuntimeID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RuntimeID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Chunks", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Chunks = append(m.Chunks, &TraceChunk{}) - if err := m.Chunks[len(m.Chunks)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Tags == nil { - m.Tags = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLength - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLength - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLength - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLength - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Tags[mapkey] = mapvalue - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Env = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Hostname = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AppVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AppVersion = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/prioritysampler.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/prioritysampler.go index 905a45c45..0f2bc9988 100644 --- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/prioritysampler.go +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/prioritysampler.go @@ -21,8 +21,8 @@ package sampler import ( "time" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/config" - "github.com/DataDog/datadog-agent/pkg/trace/pb" ) const ( diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/rare_sampler.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/rare_sampler.go index 3b898df36..4e0712edd 100644 --- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/rare_sampler.go +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/rare_sampler.go @@ -12,9 +12,9 @@ import ( "go.uber.org/atomic" "golang.org/x/time/rate" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/config" "github.com/DataDog/datadog-agent/pkg/trace/metrics" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/datadog-agent/pkg/trace/traceutil" ) diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/sampler.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/sampler.go index 66eb643b4..f8b1251d9 100644 --- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/sampler.go +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/sampler.go @@ -9,7 +9,7 @@ package sampler import ( "math" - "github.com/DataDog/datadog-agent/pkg/trace/pb" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/traceutil" ) diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/scoresampler.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/scoresampler.go index 29809acac..3597c7c0b 100644 --- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/scoresampler.go +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/scoresampler.go @@ -9,8 +9,8 @@ import ( "sync" "time" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/config" - "github.com/DataDog/datadog-agent/pkg/trace/pb" ) const ( diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/signature.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/signature.go index 1916cc3b6..bca9a5f56 100644 --- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/signature.go +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/signature.go @@ -8,7 +8,7 @@ package sampler import ( "sort" - "github.com/DataDog/datadog-agent/pkg/trace/pb" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/traceutil" ) diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/stats/aggregation.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/stats/aggregation.go index 1e2bc0fd8..47bb9716e 100644 --- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/stats/aggregation.go +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/stats/aggregation.go @@ -9,8 +9,8 @@ import ( "strconv" "strings" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/log" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/datadog-agent/pkg/trace/traceutil" ) @@ -87,7 +87,7 @@ func NewAggregationFromSpan(s *pb.Span, origin string, aggKey PayloadAggregation } // NewAggregationFromGroup gets the Aggregation key of grouped stats. -func NewAggregationFromGroup(g pb.ClientGroupedStats) Aggregation { +func NewAggregationFromGroup(g *pb.ClientGroupedStats) Aggregation { return Aggregation{ BucketsAggregationKey: BucketsAggregationKey{ Resource: g.Resource, diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/stats/client_stats_aggregator.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/stats/client_stats_aggregator.go index e46411ed2..f98c91150 100644 --- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/stats/client_stats_aggregator.go +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/stats/client_stats_aggregator.go @@ -8,8 +8,8 @@ package stats import ( "time" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/config" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/datadog-agent/pkg/trace/watchdog" ) @@ -35,8 +35,8 @@ const ( // This and the aggregator timestamp alignment ensure that all counts will have at most one point per second per agent for a specific granularity. // While distributions are not tied to the agent. type ClientStatsAggregator struct { - In chan pb.ClientStatsPayload - out chan pb.StatsPayload + In chan *pb.ClientStatsPayload + out chan *pb.StatsPayload buckets map[int64]*bucket // buckets used to aggregate client stats flushTicker *time.Ticker @@ -51,10 +51,10 @@ type ClientStatsAggregator struct { } // NewClientStatsAggregator initializes a new aggregator ready to be started -func NewClientStatsAggregator(conf *config.AgentConfig, out chan pb.StatsPayload) *ClientStatsAggregator { +func NewClientStatsAggregator(conf *config.AgentConfig, out chan *pb.StatsPayload) *ClientStatsAggregator { c := &ClientStatsAggregator{ flushTicker: time.NewTicker(time.Second), - In: make(chan pb.ClientStatsPayload, 10), + In: make(chan *pb.ClientStatsPayload, 10), buckets: make(map[int64]*bucket, 20), out: out, agentEnv: conf.DefaultEnv, @@ -124,7 +124,7 @@ func (a *ClientStatsAggregator) getAggregationBucketTime(now, bs time.Time) (tim return alignAggTs(bs), false } -func (a *ClientStatsAggregator) add(now time.Time, p pb.ClientStatsPayload) { +func (a *ClientStatsAggregator) add(now time.Time, p *pb.ClientStatsPayload) { for _, clientBucket := range p.Stats { clientBucketStart := time.Unix(0, int64(clientBucket.Start)) ts, shifted := a.getAggregationBucketTime(now, clientBucketStart) @@ -137,16 +137,17 @@ func (a *ClientStatsAggregator) add(now time.Time, p pb.ClientStatsPayload) { b = &bucket{ts: ts} a.buckets[ts.Unix()] = b } - p.Stats = []pb.ClientStatsBucket{clientBucket} + p.Stats = []*pb.ClientStatsBucket{clientBucket} a.flush(b.add(p, a.peerSvcAggregation)) } } -func (a *ClientStatsAggregator) flush(p []pb.ClientStatsPayload) { +func (a *ClientStatsAggregator) flush(p []*pb.ClientStatsPayload) { if len(p) == 0 { return } - a.out <- pb.StatsPayload{ + + a.out <- &pb.StatsPayload{ Stats: p, AgentEnv: a.agentEnv, AgentHostname: a.agentHostname, @@ -167,7 +168,7 @@ func alignAggTs(t time.Time) time.Time { type bucket struct { // first is the first payload matching the bucket. If a second payload matches the bucket // this field will be empty - first pb.ClientStatsPayload + first *pb.ClientStatsPayload // ts is the timestamp attached to the payload ts time.Time // n counts the number of payloads matching the bucket @@ -176,26 +177,39 @@ type bucket struct { agg map[PayloadAggregationKey]map[BucketsAggregationKey]*aggregatedCounts } -func (b *bucket) add(p pb.ClientStatsPayload, enablePeerSvcAgg bool) []pb.ClientStatsPayload { +func (b *bucket) add(p *pb.ClientStatsPayload, enablePeerSvcAgg bool) []*pb.ClientStatsPayload { b.n++ if b.n == 1 { - b.first = p + b.first = &pb.ClientStatsPayload{ + Hostname: p.GetHostname(), + Env: p.GetEnv(), + Version: p.GetVersion(), + Stats: p.GetStats(), + Lang: p.GetLang(), + TracerVersion: p.GetTracerVersion(), + RuntimeID: p.GetRuntimeID(), + Sequence: p.GetSequence(), + AgentAggregation: p.GetAgentAggregation(), + Service: p.GetService(), + ContainerID: p.GetContainerID(), + Tags: p.GetTags(), + } return nil } // if it's the second payload we flush the first payload with counts trimmed if b.n == 2 { first := b.first - b.first = pb.ClientStatsPayload{} + b.first = &pb.ClientStatsPayload{} b.agg = make(map[PayloadAggregationKey]map[BucketsAggregationKey]*aggregatedCounts, 2) b.aggregateCounts(first, enablePeerSvcAgg) b.aggregateCounts(p, enablePeerSvcAgg) - return []pb.ClientStatsPayload{trimCounts(first), trimCounts(p)} + return []*pb.ClientStatsPayload{trimCounts(first), trimCounts(p)} } b.aggregateCounts(p, enablePeerSvcAgg) - return []pb.ClientStatsPayload{trimCounts(p)} + return []*pb.ClientStatsPayload{trimCounts(p)} } -func (b *bucket) aggregateCounts(p pb.ClientStatsPayload, enablePeerSvcAgg bool) { +func (b *bucket) aggregateCounts(p *pb.ClientStatsPayload, enablePeerSvcAgg bool) { payloadAggKey := newPayloadAggregationKey(p.Env, p.Hostname, p.Version, p.ContainerID) payloadAgg, ok := b.agg[payloadAggKey] if !ok { @@ -208,6 +222,9 @@ func (b *bucket) aggregateCounts(p pb.ClientStatsPayload, enablePeerSvcAgg bool) } for _, s := range p.Stats { for _, sb := range s.Stats { + if sb == nil { + continue + } aggKey := newBucketAggregationKey(sb, enablePeerSvcAgg) agg, ok := payloadAgg[aggKey] if !ok { @@ -221,19 +238,19 @@ func (b *bucket) aggregateCounts(p pb.ClientStatsPayload, enablePeerSvcAgg bool) } } -func (b *bucket) flush() []pb.ClientStatsPayload { +func (b *bucket) flush() []*pb.ClientStatsPayload { if b.n == 1 { - return []pb.ClientStatsPayload{b.first} + return []*pb.ClientStatsPayload{b.first} } return b.aggregationToPayloads() } -func (b *bucket) aggregationToPayloads() []pb.ClientStatsPayload { - res := make([]pb.ClientStatsPayload, 0, len(b.agg)) +func (b *bucket) aggregationToPayloads() []*pb.ClientStatsPayload { + res := make([]*pb.ClientStatsPayload, 0, len(b.agg)) for payloadKey, aggrCounts := range b.agg { - stats := make([]pb.ClientGroupedStats, 0, len(aggrCounts)) + stats := make([]*pb.ClientGroupedStats, 0, len(aggrCounts)) for aggrKey, counts := range aggrCounts { - stats = append(stats, pb.ClientGroupedStats{ + stats = append(stats, &pb.ClientGroupedStats{ Service: aggrKey.Service, PeerService: aggrKey.PeerService, Name: aggrKey.Name, @@ -247,13 +264,13 @@ func (b *bucket) aggregationToPayloads() []pb.ClientStatsPayload { Duration: counts.duration, }) } - clientBuckets := []pb.ClientStatsBucket{ + clientBuckets := []*pb.ClientStatsBucket{ { Start: uint64(b.ts.UnixNano()), Duration: uint64(clientBucketDuration.Nanoseconds()), Stats: stats, }} - res = append(res, pb.ClientStatsPayload{ + res = append(res, &pb.ClientStatsPayload{ Hostname: payloadKey.Hostname, Env: payloadKey.Env, Version: payloadKey.Version, @@ -268,7 +285,7 @@ func newPayloadAggregationKey(env, hostname, version, cid string) PayloadAggrega return PayloadAggregationKey{Env: env, Hostname: hostname, Version: version, ContainerID: cid} } -func newBucketAggregationKey(b pb.ClientGroupedStats, enablePeerSvcAgg bool) BucketsAggregationKey { +func newBucketAggregationKey(b *pb.ClientGroupedStats, enablePeerSvcAgg bool) BucketsAggregationKey { k := BucketsAggregationKey{ Service: b.Service, Name: b.Name, @@ -284,10 +301,13 @@ func newBucketAggregationKey(b pb.ClientGroupedStats, enablePeerSvcAgg bool) Buc return k } -func trimCounts(p pb.ClientStatsPayload) pb.ClientStatsPayload { +func trimCounts(p *pb.ClientStatsPayload) *pb.ClientStatsPayload { p.AgentAggregation = keyDistributions for _, s := range p.Stats { for i, b := range s.Stats { + if b == nil { + continue + } b.Hits = 0 b.Errors = 0 b.Duration = 0 diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/stats/concentrator.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/stats/concentrator.go index d1db4089c..e59bfc6ec 100644 --- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/stats/concentrator.go +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/stats/concentrator.go @@ -10,9 +10,9 @@ import ( "sync" "time" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/config" "github.com/DataDog/datadog-agent/pkg/trace/log" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/datadog-agent/pkg/trace/traceutil" "github.com/DataDog/datadog-agent/pkg/trace/watchdog" ) @@ -27,7 +27,7 @@ const defaultBufferLen = 2 // allowing to find the gold (stats) amongst the traces. type Concentrator struct { In chan Input - Out chan pb.StatsPayload + Out chan *pb.StatsPayload // bucket duration in nanoseconds bsize int64 @@ -51,7 +51,7 @@ type Concentrator struct { } // NewConcentrator initializes a new concentrator ready to be started -func NewConcentrator(conf *config.AgentConfig, out chan pb.StatsPayload, now time.Time) *Concentrator { +func NewConcentrator(conf *config.AgentConfig, out chan *pb.StatsPayload, now time.Time) *Concentrator { bsize := conf.BucketInterval.Nanoseconds() c := Concentrator{ bsize: bsize, @@ -207,12 +207,12 @@ func (c *Concentrator) addNow(pt *traceutil.ProcessedTrace, containerID string) // Flush deletes and returns complete statistic buckets. // The force boolean guarantees flushing all buckets if set to true. -func (c *Concentrator) Flush(force bool) pb.StatsPayload { +func (c *Concentrator) Flush(force bool) *pb.StatsPayload { return c.flushNow(time.Now().UnixNano(), force) } -func (c *Concentrator) flushNow(now int64, force bool) pb.StatsPayload { - m := make(map[PayloadAggregationKey][]pb.ClientStatsBucket) +func (c *Concentrator) flushNow(now int64, force bool) *pb.StatsPayload { + m := make(map[PayloadAggregationKey][]*pb.ClientStatsBucket) c.mu.Lock() for ts, srb := range c.buckets { @@ -241,9 +241,9 @@ func (c *Concentrator) flushNow(now int64, force bool) pb.StatsPayload { c.oldestTs = newOldestTs } c.mu.Unlock() - sb := make([]pb.ClientStatsPayload, 0, len(m)) + sb := make([]*pb.ClientStatsPayload, 0, len(m)) for k, s := range m { - p := pb.ClientStatsPayload{ + p := &pb.ClientStatsPayload{ Env: k.Env, Hostname: k.Hostname, ContainerID: k.ContainerID, @@ -252,7 +252,7 @@ func (c *Concentrator) flushNow(now int64, force bool) pb.StatsPayload { } sb = append(sb, p) } - return pb.StatsPayload{Stats: sb, AgentHostname: c.agentHostname, AgentEnv: c.agentEnv, AgentVersion: c.agentVersion} + return &pb.StatsPayload{Stats: sb, AgentHostname: c.agentHostname, AgentEnv: c.agentEnv, AgentVersion: c.agentVersion} } // alignTs returns the provided timestamp truncated to the bucket size. diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/stats/statsraw.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/stats/statsraw.go index d7a22691e..a56531eef 100644 --- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/stats/statsraw.go +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/stats/statsraw.go @@ -8,8 +8,8 @@ package stats import ( "math/rand" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/log" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/sketches-go/ddsketch" "github.com/golang/protobuf/proto" @@ -48,18 +48,18 @@ func round(f float64) uint64 { return i } -func (s *groupedStats) export(a Aggregation) (pb.ClientGroupedStats, error) { +func (s *groupedStats) export(a Aggregation) (*pb.ClientGroupedStats, error) { msg := s.okDistribution.ToProto() okSummary, err := proto.Marshal(msg) if err != nil { - return pb.ClientGroupedStats{}, err + return &pb.ClientGroupedStats{}, err } msg = s.errDistribution.ToProto() errSummary, err := proto.Marshal(msg) if err != nil { - return pb.ClientGroupedStats{}, err + return &pb.ClientGroupedStats{}, err } - return pb.ClientGroupedStats{ + return &pb.ClientGroupedStats{ Service: a.Service, Name: a.Name, Resource: a.Resource, @@ -118,8 +118,8 @@ func NewRawBucket(ts, d uint64) *RawBucket { // Export transforms a RawBucket into a ClientStatsBucket, typically used // before communicating data to the API, as RawBucket is the internal // type while ClientStatsBucket is the public, shared one. -func (sb *RawBucket) Export() map[PayloadAggregationKey]pb.ClientStatsBucket { - m := make(map[PayloadAggregationKey]pb.ClientStatsBucket) +func (sb *RawBucket) Export() map[PayloadAggregationKey]*pb.ClientStatsBucket { + m := make(map[PayloadAggregationKey]*pb.ClientStatsBucket) for k, v := range sb.data { b, err := v.export(k) if err != nil { @@ -134,7 +134,7 @@ func (sb *RawBucket) Export() map[PayloadAggregationKey]pb.ClientStatsBucket { } s, ok := m[key] if !ok { - s = pb.ClientStatsBucket{ + s = &pb.ClientStatsBucket{ Start: sb.start, Duration: sb.duration, } diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/stats/weight.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/stats/weight.go index ee19fe925..d28ca5e46 100644 --- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/stats/weight.go +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/stats/weight.go @@ -5,9 +5,7 @@ package stats -import ( - "github.com/DataDog/datadog-agent/pkg/trace/pb" -) +import pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" // keySamplingRateGlobal is a metric key holding the global sampling rate. const keySamplingRateGlobal = "_sample_rate" diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/traceutil/azure.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/traceutil/azure.go index a1bcdfed4..a463a5482 100644 --- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/traceutil/azure.go +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/traceutil/azure.go @@ -13,26 +13,28 @@ import ( ) const ( - aasInstanceID = "aas.environment.instance_id" - aasInstanceName = "aas.environment.instance_name" - aasOperatingSystem = "aas.environment.os" - aasRuntime = "aas.environment.runtime" - aasResourceGroup = "aas.resource.group" - aasResourceID = "aas.resource.id" - aasSiteKind = "aas.site.kind" - aasSiteName = "aas.site.name" - aasSiteType = "aas.site.type" - aasSubscriptionID = "aas.subscription.id" - - // this value matches the runtime value set in the Azure Windows Extension - dotnetFramework = ".NET" - dotnetRuntime = "dotnet" - nodeFramework = "Node.js" - nodeRuntime = "node" - unknown = "unknown" + aasInstanceID = "aas.environment.instance_id" + aasInstanceName = "aas.environment.instance_name" + aasOperatingSystem = "aas.environment.os" + aasRuntime = "aas.environment.runtime" + aasExtensionVersion = "aas.environment.extension_version" + aasResourceGroup = "aas.resource.group" + aasResourceID = "aas.resource.id" + aasSiteKind = "aas.site.kind" + aasSiteName = "aas.site.name" + aasSiteType = "aas.site.type" + aasSubscriptionID = "aas.subscription.id" + + dotnetFramework = ".NET" + nodeFramework = "Node.js" + javaFramework = "Java" + pythonFramework = "Python" + phpFramework = "PHP" + goFramework = "Go" + containerFramework = "Container" + unknown = "unknown" appService = "app" - ddRuntime = "DD_RUNTIME" ) var appServicesTags map[string]string @@ -50,16 +52,17 @@ func getAppServicesTags(getenv func(string) string) map[string]string { resourceGroup := getenv("WEBSITE_RESOURCE_GROUP") instanceID := getEnvOrUnknown("WEBSITE_INSTANCE_ID", getenv) computerName := getEnvOrUnknown("COMPUTERNAME", getenv) - currentRuntime := getRuntime(getenv) + extensionVersion := getenv("DD_AAS_EXTENSION_VERSION") // Windows and linux environments provide the OS differently // We should grab it from GO's builtin runtime pkg websiteOS := runtime.GOOS + currentRuntime := getRuntime(websiteOS, getenv) subscriptionID := parseAzureSubscriptionID(ownerName) resourceID := compileAzureResourceID(subscriptionID, resourceGroup, siteName) - return map[string]string{ + tags := map[string]string{ aasInstanceID: instanceID, aasInstanceName: computerName, aasOperatingSystem: websiteOS, @@ -71,27 +74,79 @@ func getAppServicesTags(getenv func(string) string) map[string]string { aasSiteType: appService, aasSubscriptionID: subscriptionID, } + + // Remove the Java and .NET logic once non-universal extensions are deprecated + if websiteOS == "windows" { + if extensionVersion != "" { + tags[aasExtensionVersion] = extensionVersion + } else if val := getenv("DD_AAS_JAVA_EXTENSION_VERSION"); val != "" { + tags[aasExtensionVersion] = val + } else if val := getenv("DD_AAS_DOTNET_EXTENSION_VERSION"); val != "" { + tags[aasExtensionVersion] = val + } + } + + return tags } func getEnvOrUnknown(env string, getenv func(string) string) string { val := getenv(env) - if len(env) == 0 { + if len(val) == 0 { val = unknown } return val } -func getRuntime(getenv func(string) string) (rt string) { - env := getenv(ddRuntime) - switch env { - case dotnetRuntime: - rt = dotnetFramework - case nodeRuntime: - rt = nodeFramework +func getRuntime(websiteOS string, getenv func(string) string) (rt string) { + switch websiteOS { + case "windows": + rt = getWindowsRuntime(getenv) + case "linux", "darwin": + rt = getLinuxRuntime(getenv) default: rt = unknown } - return + + return rt +} + +func getWindowsRuntime(getenv func(string) string) (rt string) { + if getenv("WEBSITE_STACK") == "JAVA" { + rt = javaFramework + } else if val := getenv("WEBSITE_NODE_DEFAULT_VERSION"); val != "" { + rt = nodeFramework + } else { + // FIXME: Windows AAS only supports Java, Node, and .NET so we can infer this + // Needs to be inferred because no other env vars give us context on the runtime + rt = dotnetFramework + } + + return rt +} + +func getLinuxRuntime(getenv func(string) string) (rt string) { + rt = unknown + + switch getenv("WEBSITE_STACK") { + case "DOCKER": + rt = containerFramework + case "": + if val := getenv("DOCKER_SERVER_VERSION"); val != "" { + rt = containerFramework + } + case "NODE": + rt = nodeFramework + case "PYTHON": + rt = pythonFramework + case "JAVA", "TOMCAT": + rt = javaFramework + case "DOTNETCORE": + rt = dotnetFramework + case "PHP": + rt = phpFramework + } + + return rt } func parseAzureSubscriptionID(subID string) (id string) { diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/traceutil/processed_trace.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/traceutil/processed_trace.go index e3558cd02..6295bc1d6 100644 --- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/traceutil/processed_trace.go +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/traceutil/processed_trace.go @@ -5,9 +5,7 @@ package traceutil -import ( - "github.com/DataDog/datadog-agent/pkg/trace/pb" -) +import pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" // ProcessedTrace represents a trace being processed in the agent. type ProcessedTrace struct { diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/traceutil/span.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/traceutil/span.go index 8c7636baa..b3a00834f 100644 --- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/traceutil/span.go +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/traceutil/span.go @@ -10,7 +10,7 @@ import ( "github.com/tinylib/msgp/msgp" - "github.com/DataDog/datadog-agent/pkg/trace/pb" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" ) const ( diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/traceutil/trace.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/traceutil/trace.go index 3f2058377..b5871ce9f 100644 --- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/traceutil/trace.go +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/traceutil/trace.go @@ -6,8 +6,8 @@ package traceutil import ( + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/log" - "github.com/DataDog/datadog-agent/pkg/trace/pb" ) const ( diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/writer/stats.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/writer/stats.go index f7e8571c9..32bdbfc9a 100644 --- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/writer/stats.go +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/writer/stats.go @@ -13,12 +13,12 @@ import ( "strings" "time" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/config" "github.com/DataDog/datadog-agent/pkg/trace/info" "github.com/DataDog/datadog-agent/pkg/trace/log" "github.com/DataDog/datadog-agent/pkg/trace/metrics" "github.com/DataDog/datadog-agent/pkg/trace/metrics/timing" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/datadog-agent/pkg/trace/telemetry" "github.com/tinylib/msgp/msgp" @@ -40,7 +40,7 @@ const ( // StatsWriter ingests stats buckets and flushes them to the API. type StatsWriter struct { - in <-chan pb.StatsPayload + in <-chan *pb.StatsPayload senders []*sender stop chan struct{} stats *info.StatsWriterInfo @@ -48,14 +48,14 @@ type StatsWriter struct { // syncMode reports whether the writer should flush on its own or only when FlushSync is called syncMode bool - payloads []pb.StatsPayload // payloads buffered for sync mode + payloads []*pb.StatsPayload // payloads buffered for sync mode flushChan chan chan struct{} easylog *log.ThrottledLogger } // NewStatsWriter returns a new StatsWriter. It must be started using Run. -func NewStatsWriter(cfg *config.AgentConfig, in <-chan pb.StatsPayload, telemetryCollector telemetry.TelemetryCollector) *StatsWriter { +func NewStatsWriter(cfg *config.AgentConfig, in <-chan *pb.StatsPayload, telemetryCollector telemetry.TelemetryCollector) *StatsWriter { sw := &StatsWriter{ in: in, stats: &info.StatsWriterInfo{}, @@ -130,14 +130,14 @@ func (w *StatsWriter) Stop() { stopSenders(w.senders) } -func (w *StatsWriter) addStats(sp pb.StatsPayload) { +func (w *StatsWriter) addStats(sp *pb.StatsPayload) { defer timing.Since("datadog.trace_agent.stats_writer.encode_ms", time.Now()) payloads := w.buildPayloads(sp, maxEntriesPerPayload) w.payloads = append(w.payloads, payloads...) } // SendPayload sends a stats payload to the Datadog backend. -func (w *StatsWriter) SendPayload(p pb.StatsPayload) { +func (w *StatsWriter) SendPayload(p *pb.StatsPayload) { req := newPayload(map[string]string{ headerLanguages: strings.Join(info.Languages(), "|"), "Content-Type": "application/msgpack", @@ -158,11 +158,11 @@ func (w *StatsWriter) sendPayloads() { } func (w *StatsWriter) resetBuffer() { - w.payloads = make([]pb.StatsPayload, 0, len(w.payloads)) + w.payloads = make([]*pb.StatsPayload, 0, len(w.payloads)) } // encodePayload encodes the payload as Gzipped msgPack into w. -func encodePayload(w io.Writer, payload pb.StatsPayload) error { +func encodePayload(w io.Writer, payload *pb.StatsPayload) error { gz, err := gzip.NewWriterLevel(w, gzip.BestSpeed) if err != nil { return err @@ -172,15 +172,15 @@ func encodePayload(w io.Writer, payload pb.StatsPayload) error { log.Errorf("Error closing gzip stream when writing stats payload: %v", err) } }() - return msgp.Encode(gz, &payload) + return msgp.Encode(gz, payload) } // buildPayloads splits pb.ClientStatsPayload that have more than maxEntriesPerPayload // and then groups them into pb.StatsPayload with less than maxEntriesPerPayload -func (w *StatsWriter) buildPayloads(sp pb.StatsPayload, maxEntriesPerPayload int) []pb.StatsPayload { +func (w *StatsWriter) buildPayloads(sp *pb.StatsPayload, maxEntriesPerPayload int) []*pb.StatsPayload { split := splitPayloads(sp.Stats, maxEntriesPerPayload) - grouped := make([]pb.StatsPayload, 0, len(sp.Stats)) - current := pb.StatsPayload{ + grouped := make([]*pb.StatsPayload, 0, len(sp.Stats)) + current := &pb.StatsPayload{ AgentHostname: sp.AgentHostname, AgentEnv: sp.AgentEnv, AgentVersion: sp.AgentVersion, @@ -193,9 +193,14 @@ func (w *StatsWriter) buildPayloads(sp pb.StatsPayload, maxEntriesPerPayload int w.stats.ClientPayloads.Add(int64(len(current.Stats))) w.stats.StatsEntries.Add(int64(nbEntries)) grouped = append(grouped, current) - current.Stats = nil nbEntries = 0 nbBuckets = 0 + current = &pb.StatsPayload{ + AgentHostname: sp.AgentHostname, + AgentEnv: sp.AgentEnv, + AgentVersion: sp.AgentVersion, + ClientComputed: sp.ClientComputed, + } } for _, p := range split { if nbEntries+p.nbEntries > maxEntriesPerPayload { @@ -203,7 +208,7 @@ func (w *StatsWriter) buildPayloads(sp pb.StatsPayload, maxEntriesPerPayload int } nbEntries += p.nbEntries nbBuckets += len(p.Stats) - w.resolveContainerTags(&p.ClientStatsPayload) + w.resolveContainerTags(p.ClientStatsPayload) current.Stats = append(current.Stats, p.ClientStatsPayload) } if nbEntries > 0 { @@ -234,7 +239,7 @@ func (w *StatsWriter) resolveContainerTags(p *pb.ClientStatsPayload) { } } -func splitPayloads(payloads []pb.ClientStatsPayload, maxEntriesPerPayload int) []clientStatsPayload { +func splitPayloads(payloads []*pb.ClientStatsPayload, maxEntriesPerPayload int) []clientStatsPayload { split := make([]clientStatsPayload, 0, len(payloads)) for _, p := range payloads { split = append(split, splitPayload(p, maxEntriesPerPayload)...) @@ -245,7 +250,7 @@ func splitPayloads(payloads []pb.ClientStatsPayload, maxEntriesPerPayload int) [ type timeWindow struct{ start, duration uint64 } type clientStatsPayload struct { - pb.ClientStatsPayload + *pb.ClientStatsPayload nbEntries int // bucketIndexes maps from a timeWindow to a bucket in the ClientStatsPayload. // it allows quick checking of what bucket to add a payload to. @@ -253,7 +258,7 @@ type clientStatsPayload struct { } // splitPayload splits a stats payload to ensure that each stats payload has less than maxEntriesPerPayload entries. -func splitPayload(p pb.ClientStatsPayload, maxEntriesPerPayload int) []clientStatsPayload { +func splitPayload(p *pb.ClientStatsPayload, maxEntriesPerPayload int) []clientStatsPayload { if len(p.Stats) == 0 { return nil } @@ -277,7 +282,7 @@ func splitPayload(p pb.ClientStatsPayload, maxEntriesPerPayload int) []clientSta for i := 0; i < nbPayloads; i++ { payloads[i] = clientStatsPayload{ bucketIndexes: make(map[timeWindow]int, 1), - ClientStatsPayload: pb.ClientStatsPayload{ + ClientStatsPayload: &pb.ClientStatsPayload{ Hostname: p.Hostname, Env: p.Env, Version: p.Version, @@ -288,7 +293,7 @@ func splitPayload(p pb.ClientStatsPayload, maxEntriesPerPayload int) []clientSta Sequence: p.Sequence, AgentAggregation: p.AgentAggregation, ContainerID: p.ContainerID, - Stats: make([]pb.ClientStatsBucket, 0, maxEntriesPerPayload), + Stats: make([]*pb.ClientStatsBucket, 0, maxEntriesPerPayload), }, } } @@ -304,7 +309,7 @@ func splitPayload(p pb.ClientStatsPayload, maxEntriesPerPayload int) []clientSta if !ok { bi = len(payloads[j].Stats) payloads[j].bucketIndexes[tw] = bi - payloads[j].Stats = append(payloads[j].Stats, pb.ClientStatsBucket{Start: tw.start, Duration: tw.duration}) + payloads[j].Stats = append(payloads[j].Stats, &pb.ClientStatsBucket{Start: tw.start, Duration: tw.duration}) } // here, we can just append the group, because there are no duplicate groups in the original stats payloads sent to the writer. payloads[j].Stats[bi].Stats = append(payloads[j].Stats[bi].Stats, g) diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/writer/trace.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/writer/trace.go index af14f5bdb..fc4a23aad 100644 --- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/writer/trace.go +++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/writer/trace.go @@ -13,12 +13,12 @@ import ( "sync" "time" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/config" "github.com/DataDog/datadog-agent/pkg/trace/info" "github.com/DataDog/datadog-agent/pkg/trace/log" "github.com/DataDog/datadog-agent/pkg/trace/metrics" "github.com/DataDog/datadog-agent/pkg/trace/metrics/timing" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/datadog-agent/pkg/trace/telemetry" ) diff --git a/vendor/modules.txt b/vendor/modules.txt index ca0b81cd4..2c1f2f509 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -43,14 +43,14 @@ github.com/DataDog/agent-payload/v5/gogen # github.com/DataDog/datadog-agent/pkg/obfuscate v0.48.0-beta.1 ## explicit; go 1.12 github.com/DataDog/datadog-agent/pkg/obfuscate -# github.com/DataDog/datadog-agent/pkg/proto v0.48.0-beta.1 +# github.com/DataDog/datadog-agent/pkg/proto v0.48.0-beta.1 => github.com/DataDog/datadog-agent/pkg/proto v0.48.0-beta.1 ## explicit; go 1.19 github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace # github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.48.0-beta.1 ## explicit; go 1.18 github.com/DataDog/datadog-agent/pkg/remoteconfig/state github.com/DataDog/datadog-agent/pkg/remoteconfig/state/products/apmsampling -# github.com/DataDog/datadog-agent/pkg/trace v0.48.0-devel +# github.com/DataDog/datadog-agent/pkg/trace v0.48.0-devel => github.com/DataDog/datadog-agent/pkg/trace v0.48.0-beta.1 ## explicit; go 1.18 github.com/DataDog/datadog-agent/pkg/trace/agent github.com/DataDog/datadog-agent/pkg/trace/api @@ -63,7 +63,6 @@ github.com/DataDog/datadog-agent/pkg/trace/info github.com/DataDog/datadog-agent/pkg/trace/log github.com/DataDog/datadog-agent/pkg/trace/metrics github.com/DataDog/datadog-agent/pkg/trace/metrics/timing -github.com/DataDog/datadog-agent/pkg/trace/pb github.com/DataDog/datadog-agent/pkg/trace/remoteconfighandler github.com/DataDog/datadog-agent/pkg/trace/sampler github.com/DataDog/datadog-agent/pkg/trace/stats @@ -2496,3 +2495,5 @@ sigs.k8s.io/yaml # github.com/go-openapi/spec v0.20.5 => github.com/go-openapi/spec v0.20.6 # github.com/theupdateframework/go-tuf v0.3.1 => github.com/DataDog/go-tuf v0.3.0--fix-localmeta # github.com/outcaste-io/ristretto v0.2.0 => github.com/outcaste-io/ristretto v0.2.1 +# github.com/DataDog/datadog-agent/pkg/proto => github.com/DataDog/datadog-agent/pkg/proto v0.48.0-beta.1 +# github.com/DataDog/datadog-agent/pkg/trace => github.com/DataDog/datadog-agent/pkg/trace v0.48.0-beta.1 From 54c557decb0cfdf923919ea9efbabf616d7a1771 Mon Sep 17 00:00:00 2001 From: Bryan Aguilar Date: Tue, 15 Aug 2023 15:05:04 -0700 Subject: [PATCH 8/8] remove new target --- Makefile | 15 --------------- Makefile.Common | 4 ---- 2 files changed, 19 deletions(-) diff --git a/Makefile b/Makefile index dd69dbc2c..9b341cfa2 100644 --- a/Makefile +++ b/Makefile @@ -199,21 +199,6 @@ golint: lint-static-check gomod-tidy: @$(MAKE) for-all-target TARGET="mod-tidy" -.PHONY: gomod-update-collector -gomod-update-collector: -ifndef CORE_VER - @echo "CORE_VER not defined" - @echo "usage: CORE_VER=v0.2.0 CONTRIB_VER=v0.2.0 make gomod-update-collector" - exit 1 -endif -ifndef CONTRIB_VER - @echo "CONTRIB_VER not defined" - @echo "usage: CORE_VER=v0.2.0 CONTRIB_VER=v0.2.0 make gomod-update-collector" - exit 1 -endif - @$(MAKE) for-all-target TARGET="update-collector-ver" - - .PHONY: gomod-vendor gomod-vendor: go mod vendor diff --git a/Makefile.Common b/Makefile.Common index fd08f563c..500f9ed32 100644 --- a/Makefile.Common +++ b/Makefile.Common @@ -33,7 +33,3 @@ lint: mod-tidy: go mod tidy -.PHONY: update-collector-ver -update-collector-ver: - awk -v CORE="$(CORE_VER)" -v CONTRIB="$(CONTRIB_VER)" '/go.opentelemetry.io\/collector/ {sub("v[0-9]+\\.[0-9]+(\\.[0-9]+)?", CORE)} /github.com\/open-telemetry\/opentelemetry-collector-contrib/ {sub("v[0-9]+\\.[0-9]+(\\.[0-9]+)?", CONTRIB)} 1' \ - go.mod > go.mod.tmp && mv go.mod.tmp go.mod