diff --git a/.github/workflows/prometheus-compliance-tests.yml b/.github/workflows/prometheus-compliance-tests.yml index a667b19864e0..6cf45606fe13 100644 --- a/.github/workflows/prometheus-compliance-tests.yml +++ b/.github/workflows/prometheus-compliance-tests.yml @@ -56,7 +56,7 @@ jobs: - name: Copy binary to compliance directory # The required name of the downloaded artifact is `otelcol_0.42.0_linux_amd64`, so we place the collector contrib artifact under the same name in the bin folder to run. # Source: https://github.com/prometheus/compliance/blob/12cbdf92abf7737531871ab7620a2de965fc5382/remote_write_sender/targets/otel.go#L8 - run: mkdir compliance/remotewrite/sender/bin && cp opentelemetry-collector-contrib/bin/otelcontribcol_linux_amd64 compliance/remotewrite/sender/bin/otelcol_0.42.0_linux_amd64 + run: mkdir compliance/remotewrite/sender/bin && cp opentelemetry-collector-contrib/bin/otelcontribcol_linux_amd64 compliance/remotewrite/sender/bin/otelcol_0.42.0_linux_amd64 - name: clean up mod file run: go mod tidy working-directory: compliance/remotewrite/sender diff --git a/Makefile b/Makefile index 42b0c0e84e2a..65ed10959e65 100644 --- a/Makefile +++ b/Makefile @@ -558,4 +558,4 @@ checks: $(MAKE) gendistributions $(MAKE) -j4 generate $(MAKE) multimod-verify - git diff --exit-code || (echo 'Some files need committing' && git status && exit 1) + git diff --exit-code || (echo 'Some files need committing' && git status && exit 1) diff --git a/Makefile.Common b/Makefile.Common index d3a16de3b865..9d94e590e1f1 100644 --- a/Makefile.Common +++ b/Makefile.Common @@ -21,7 +21,7 @@ endif # SRC_ROOT is the top of the source tree. SRC_ROOT := $(shell git rev-parse --show-toplevel) # SRC_PARENT_DIR is the absolute path of source tree's parent directory -SRC_PARENT_DIR := $(shell dirname $(SRC_ROOT)) +SRC_PARENT_DIR := $(shell dirname $(SRC_ROOT)) # build tags required by any component should be defined as an independent variables and later added to GO_BUILD_TAGS below GO_BUILD_TAGS="" @@ -89,7 +89,7 @@ ALL_SRC := $(shell find $(ALL_PKG_DIRS) -name '*.go' \ -not -path '*/local/*' \ -type f | sort) -ALL_SRC_AND_SHELL := find . -type f \( -iname '*.go' -o -iname "*.sh" \) ! -path '**/third_party/*' | sort +ALL_SRC_AND_SHELL := find . -type f \( -iname '*.go' -o -iname "*.sh" \) ! -path '**/third_party/*' | sort # All source code and documents. Used in spell check. ALL_SRC_AND_DOC_CMD := find $(ALL_PKG_DIRS) -name "*.md" -o -name "*.go" -o -name "*.yaml" -not -path '*/third_party/*' -type f | sort @@ -137,7 +137,7 @@ test-with-cover: $(GOTESTSUM) .PHONY: do-unit-tests-with-cover do-unit-tests-with-cover: $(GOTESTSUM) @echo "running $(GOCMD) unit test ./... + coverage in `pwd`" - $(GOTESTSUM) $(GOTESTSUM_OPT) --packages="./..." -- $(GOTEST_OPT_WITH_COVERAGE) + $(GOTESTSUM) $(GOTESTSUM_OPT) --packages="./..." -- $(GOTEST_OPT_WITH_COVERAGE) $(GOCMD) tool cover -html=coverage.txt -o coverage.html .PHONY: mod-integration-test @@ -145,16 +145,16 @@ mod-integration-test: $(GOTESTSUM) @echo "running $(GOCMD) integration test ./... in `pwd`" $(GOTESTSUM) $(GOTESTSUM_OPT) --packages="./..." -- $(GOTEST_OPT_WITH_INTEGRATION) @if [ -e integration-coverage.txt ]; then \ - $(GOCMD) tool cover -html=integration-coverage.txt -o integration-coverage.html; \ - fi + $(GOCMD) tool cover -html=integration-coverage.txt -o integration-coverage.html; \ + fi .PHONY: do-integration-tests-with-cover do-integration-tests-with-cover: $(GOTESTSUM) @echo "running $(GOCMD) integration test ./... + coverage in `pwd`" - $(GOTESTSUM) $(GOTESTSUM_OPT) --packages="./..." -- $(GOTEST_OPT_WITH_INTEGRATION_COVERAGE) + $(GOTESTSUM) $(GOTESTSUM_OPT) --packages="./..." -- $(GOTEST_OPT_WITH_INTEGRATION_COVERAGE) @if [ -e integration-coverage.txt ]; then \ - $(GOCMD) tool cover -html=integration-coverage.txt -o integration-coverage.html; \ - fi + $(GOCMD) tool cover -html=integration-coverage.txt -o integration-coverage.html; \ + fi .PHONY: benchmark benchmark: $(GOTESTSUM) @@ -195,8 +195,8 @@ checklinks: .PHONY: fmt fmt: $(GOIMPORTS) - gofmt -w -s ./ - $(GOIMPORTS) -w -local github.com/open-telemetry/opentelemetry-collector-contrib ./ + gofmt -w -s ./ + $(GOIMPORTS) -w -local github.com/open-telemetry/opentelemetry-collector-contrib ./ .PHONY: lint lint: $(LINT) checklicense misspell diff --git a/confmap/provider/s3provider/README.md b/confmap/provider/s3provider/README.md index b863f876890e..d61a968b557e 100644 --- a/confmap/provider/s3provider/README.md +++ b/confmap/provider/s3provider/README.md @@ -1,5 +1,5 @@ ## Summary -This package provides a `ConfigMapProvider` implementation for Amazon S3 (`s3provider`) that allows the Collector the ability to load configuration by fetching and reading config objects stored in Amazon S3. +This package provides a `ConfigMapProvider` implementation for Amazon S3 (`s3provider`) that allows the Collector the ability to load configuration by fetching and reading config objects stored in Amazon S3. ## How it works - It will be called by `ConfigMapResolver` to load configuration for the Collector. - By giving a config URI starting with prefix `s3://`, this `s3provider` will be used to download config objects from the given S3 URIs, and then use the downloaded configuration during Collector initialization. diff --git a/exporter/alertmanagerexporter/alertmanager_exporter_test.go b/exporter/alertmanagerexporter/alertmanager_exporter_test.go index 0c6abbcca2c5..dfe1e18baecf 100644 --- a/exporter/alertmanagerexporter/alertmanager_exporter_test.go +++ b/exporter/alertmanagerexporter/alertmanager_exporter_test.go @@ -41,7 +41,7 @@ func createTracesAndSpan() (ptrace.Traces, ptrace.Span) { attrs.PutInt("attr2", 40) attrs.PutDouble("attr3", 3.14) - // add a span + // add a span spans := rs.ScopeSpans().AppendEmpty().Spans() spans.EnsureCapacity(1) span := spans.AppendEmpty() diff --git a/exporter/awsemfexporter/README.md b/exporter/awsemfexporter/README.md index 9dc35146e314..c6ef12e20190 100644 --- a/exporter/awsemfexporter/README.md +++ b/exporter/awsemfexporter/README.md @@ -43,7 +43,7 @@ The following exporter configuration parameters are supported. | `resource_to_telemetry_conversion` | "resource_to_telemetry_conversion" is the option for converting resource attributes to telemetry attributes. It has only one config onption- `enabled`. For metrics, if `enabled=true`, all the resource attributes will be converted to metric labels by default. See `Resource Attributes to Metric Labels` section below for examples. | `enabled=false` | | `output_destination` | "output_destination" is an option to specify the EMFExporter output. Currently, two options are available. "cloudwatch" or "stdout" | `cloudwatch` | | `detailed_metrics` | Retain detailed datapoint values in exported metrics (e.g instead of exporting a quantile as a statistical value, preserve the quantile's population) | `false` | -| `parse_json_encoded_attr_values` | List of attribute keys whose corresponding values are JSON-encoded strings and will be converted to JSON structures in emf logs. For example, the attribute string value "{\\"x\\":5,\\"y\\":6}" will be converted to a json object: ```{"x": 5, "y": 6}``` | [ ] | +| `parse_json_encoded_attr_values` | List of attribute keys whose corresponding values are JSON-encoded strings and will be converted to JSON structures in emf logs. For example, the attribute string value "{\\"x\\":5,\\"y\\":6}" will be converted to a json object: ```{"x": 5, "y": 6}``` | [ ] | | [`metric_declarations`](#metric_declaration) | List of rules for filtering exported metrics and their dimensions. | [ ] | | [`metric_descriptors`](#metric_descriptor) | List of rules for inserting or updating metric descriptors. | [ ] | | `retain_initial_value_of_delta_metric` | This option specifies how the first value of a metric is handled. AWS EMF expects metric values to only contain deltas to the previous value. In the default case the first received value is therefor not sent to AWS but only used as a baseline for follow up changes to this metric. This is fine for high throughput metrics with stable labels (e.g. `requests{code=200}`). In this case it does not matter if the first value of this metric is discarded. However when your metric describes infrequent events or events with high label cardinality, then the exporter in default configuration would still drop the first occurrence of this metric. With this configuration value set to `true` the first value of all metrics will instead be send to AWS. | false | diff --git a/exporter/azuremonitorexporter/trace_to_envelope_test.go b/exporter/azuremonitorexporter/trace_to_envelope_test.go index f3e27583e5a8..f03ecba4a146 100644 --- a/exporter/azuremonitorexporter/trace_to_envelope_test.go +++ b/exporter/azuremonitorexporter/trace_to_envelope_test.go @@ -105,7 +105,7 @@ var ( // - a specific SpanStatus as opposed to none // - an error http.status_code // - http.route is specified which should replace Span name as part of the RequestData name -// - no http.client_ip or net.peer.ip specified which causes data.Source to be empty +// - no http.client_ip or net.peer.ip specified which causes data.Source to be empty // - adds a few different types of attributes func TestHTTPServerSpanToRequestDataAttributeSet1(t *testing.T) { span := getDefaultHTTPServerSpan() diff --git a/exporter/clickhouseexporter/internal/metrics_model.go b/exporter/clickhouseexporter/internal/metrics_model.go index 92c51b550643..978a36d75be8 100644 --- a/exporter/clickhouseexporter/internal/metrics_model.go +++ b/exporter/clickhouseexporter/internal/metrics_model.go @@ -43,7 +43,7 @@ type MetricsModel interface { insert(ctx context.Context, db *sql.DB) error } -// MetricsMetaData contain specific metric data +// MetricsMetaData contain specific metric data type MetricsMetaData struct { ResAttr map[string]string ResURL string diff --git a/exporter/elasticsearchexporter/config.go b/exporter/elasticsearchexporter/config.go index c2a0f755d9fa..547ff693894a 100644 --- a/exporter/elasticsearchexporter/config.go +++ b/exporter/elasticsearchexporter/config.go @@ -152,7 +152,7 @@ type DiscoverySettings struct { Interval time.Duration `mapstructure:"interval"` } -// FlushSettings defines settings for configuring the write buffer flushing +// FlushSettings defines settings for configuring the write buffer flushing // policy in the Elasticsearch exporter. The exporter sends a bulk request with // all events already serialized into the send-buffer. type FlushSettings struct { diff --git a/exporter/googlecloudexporter/README.md b/exporter/googlecloudexporter/README.md index 78c7b96d9900..7b14ea18da79 100644 --- a/exporter/googlecloudexporter/README.md +++ b/exporter/googlecloudexporter/README.md @@ -136,7 +136,7 @@ These instructions are to get you up and running quickly with the GCP exporter i section](#prerequisite-authenticating) above. -4. **Run the collector.** The following runs the collector in the foreground, so please execute it in a separate terminal. +4. **Run the collector.** The following runs the collector in the foreground, so please execute it in a separate terminal. ```sh ./otelcol-contrib --config=config.yaml @@ -435,7 +435,7 @@ By default, the exporter sends telemetry to the project specified by `project` i The `gcp.project.id` label can be combined with the `destination_project_quota` option to attribute quota usage to the project parsed by the label. This feature is currently only available for traces and metrics. The Collector's default service account will need `roles/serviceusage.serviceUsageConsumer` IAM permissions in the destination quota project. -Note that this option will not work if a quota project is already defined in your Collector's GCP credentials. In this case, the telemetry will fail to export with a "project not found" error. +Note that this option will not work if a quota project is already defined in your Collector's GCP credentials. In this case, the telemetry will fail to export with a "project not found" error. This can be done by manually editing your [ADC file](https://cloud.google.com/docs/authentication/application-default-credentials#personal) (if it exists) to remove the `quota_project_id` entry line. ## Features and Feature-Gates diff --git a/exporter/googlecloudpubsubexporter/config.go b/exporter/googlecloudpubsubexporter/config.go index 74568c00021b..829e659a6238 100644 --- a/exporter/googlecloudpubsubexporter/config.go +++ b/exporter/googlecloudpubsubexporter/config.go @@ -38,7 +38,7 @@ type Config struct { // WatermarkConfig customizes the behavior of the watermark type WatermarkConfig struct { - // Behavior of the watermark. Currently, only of the message (none, earliest and current, current being the default) + // Behavior of the watermark. Currently, only of the message (none, earliest and current, current being the default) // will set the timestamp on pubsub based on timestamps of the events inside the message Behavior string `mapstructure:"behavior"` // Indication on how much the timestamp can drift from the current time, the timestamp will be capped to the allowed diff --git a/exporter/logzioexporter/config.go b/exporter/logzioexporter/config.go index f1ac1e31719a..74c53e495832 100644 --- a/exporter/logzioexporter/config.go +++ b/exporter/logzioexporter/config.go @@ -36,7 +36,7 @@ func (c *Config) Validate() error { // CheckAndWarnDeprecatedOptions Is checking for soon deprecated configuration options (queue_max_length, queue_capacity, drain_interval, custom_endpoint) log a warning message and map to the relevant updated option func (c *Config) checkAndWarnDeprecatedOptions(logger hclog.Logger) { if c.QueueCapacity != 0 { - logger.Warn("You are using the deprecated`queue_capacity` option that will be removed in the next release; use exporter helper configuration instead: https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md") + logger.Warn("You are using the deprecated `queue_capacity` option that will be removed in the next release; use exporter helper configuration instead: https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md") } // Warn and map queue_max_length -> QueueSettings.QueueSize if c.QueueMaxLength != 0 { diff --git a/exporter/prometheusremotewriteexporter/DESIGN.md b/exporter/prometheusremotewriteexporter/DESIGN.md index 3fbf369050f0..36946ddccd5f 100644 --- a/exporter/prometheusremotewriteexporter/DESIGN.md +++ b/exporter/prometheusremotewriteexporter/DESIGN.md @@ -45,10 +45,10 @@ Because of the gaps mentioned above, this project will convert from the current ## **2. Prometheus Remote Write/Cortex Exporter** -The Prometheus remote write/Cortex exporter should receive OTLP metrics, group data points by metric name and label set, convert each group to a TimeSeries, and send all TimeSeries to a storage backend via HTTP. +The Prometheus remote write/Cortex exporter should receive OTLP metrics, group data points by metric name and label set, convert each group to a TimeSeries, and send all TimeSeries to a storage backend via HTTP. ### **2.1 Receiving Metrics** -The Prometheus remote write/Cortex exporter receives a MetricsData instance in its PushMetrics() function. MetricsData contains a collection of Metric instances. Each Metric instance contains a series of data points, and each data point has a set of labels associated with it. Since Prometheus remote write TimeSeries are identified by unique sets of labels, the exporter needs to group data points within each Metric instance by their label set, and convert each group to a TimeSeries. +The Prometheus remote write/Cortex exporter receives a MetricsData instance in its PushMetrics() function. MetricsData contains a collection of Metric instances. Each Metric instance contains a series of data points, and each data point has a set of labels associated with it. Since Prometheus remote write TimeSeries are identified by unique sets of labels, the exporter needs to group data points within each Metric instance by their label set, and convert each group to a TimeSeries. To group data points by label set, the exporter should create a map with each PushMetrics() call. The key of the map should represent a combination of the following information: @@ -67,7 +67,7 @@ The value of the map should be Prometheus TimeSeries, and each data point’s va Pseudocode: - func PushMetrics(metricsData) { + func PushMetrics(metricsData) { // Create a map that stores distinct TimeSeries map := make(map[String][]TimeSeries) @@ -81,7 +81,7 @@ Pseudocode: // Add to TimeSeries // Sends TimeSeries to backend - export(map) + export(map) } ### **2.2 Mapping of OTLP Metrics to TimeSeries** diff --git a/exporter/signalfxexporter/config.go b/exporter/signalfxexporter/config.go index b9ec08570cd3..9703b9313db8 100644 --- a/exporter/signalfxexporter/config.go +++ b/exporter/signalfxexporter/config.go @@ -131,7 +131,7 @@ type Config struct { // to be used in a dimension key. NonAlphanumericDimensionChars string `mapstructure:"nonalphanumeric_dimension_chars"` - // Whether to drop histogram bucket metrics dispatched to Splunk Observability. + // Whether to drop histogram bucket metrics dispatched to Splunk Observability. // Default value is set to false. DropHistogramBuckets bool `mapstructure:"drop_histogram_buckets"` diff --git a/extension/bearertokenauthextension/README.md b/extension/bearertokenauthextension/README.md index 44683de5786a..cb19e0c2bb26 100644 --- a/extension/bearertokenauthextension/README.md +++ b/extension/bearertokenauthextension/README.md @@ -14,7 +14,7 @@ -This extension implements both `configauth.ServerAuthenticator` and `configauth.ClientAuthenticator`. It can be used in both http and gRPC exporters inside the `auth` settings, as a means to embed a static token for every RPC call that will be made. +This extension implements both `configauth.ServerAuthenticator` and `configauth.ClientAuthenticator`. It can be used in both http and gRPC exporters inside the `auth` settings, as a means to embed a static token for every RPC call that will be made. The authenticator type has to be set to `bearertokenauth`. diff --git a/extension/observer/ecsobserver/fetcher_test.go b/extension/observer/ecsobserver/fetcher_test.go index e57ab4c4d24a..155b2bbd5d1b 100644 --- a/extension/observer/ecsobserver/fetcher_test.go +++ b/extension/observer/ecsobserver/fetcher_test.go @@ -225,7 +225,7 @@ func TestFetcher_AttachContainerInstance(t *testing.T) { err = f.attachContainerInstance(ctx, tasks) require.NoError(t, err) assert.Nil(t, tasks[0].EC2) - // task instance pattern is 0 1 0 1 ..., nFargateInstances = 3 so the 4th task is running on instance 1 + // task instance pattern is 0 1 0 1 ..., nFargateInstances = 3 so the 4th task is running on instance 1 assert.Equal(t, "i-1", aws.StringValue(tasks[nFargateInstances].EC2.InstanceId)) }) } diff --git a/internal/coreinternal/attraction/attraction.go b/internal/coreinternal/attraction/attraction.go index 19f3c46f061f..b1a5083eba71 100644 --- a/internal/coreinternal/attraction/attraction.go +++ b/internal/coreinternal/attraction/attraction.go @@ -34,7 +34,7 @@ type ActionKeyValue struct { // The type of the value is inferred from the configuration. Value any `mapstructure:"value"` - // A regex pattern must be specified for the action EXTRACT. + // A regex pattern must be specified for the action EXTRACT. // It uses the attribute specified by `key' to extract values from // The target keys are inferred based on the names of the matcher groups // provided and the names will be inferred based on the values of the diff --git a/pkg/stanza/fileconsumer/file_test.go b/pkg/stanza/fileconsumer/file_test.go index 64c3193ac157..7739cf71153e 100644 --- a/pkg/stanza/fileconsumer/file_test.go +++ b/pkg/stanza/fileconsumer/file_test.go @@ -1340,7 +1340,7 @@ func TestStalePartialFingerprintDiscarded(t *testing.T) { file1 := filetest.OpenTempWithPattern(t, tempDir, "*.log1") file2 := filetest.OpenTempWithPattern(t, tempDir, "*.log2") - // Two same fingerprint file , and smaller than config size + // Two same fingerprint file , and smaller than config size content := "aaaaaaaaaaa" filetest.WriteString(t, file1, content+"\n") filetest.WriteString(t, file2, content+"\n") @@ -1435,7 +1435,7 @@ func TestNoLostPartial(t *testing.T) { operator, sink := testManager(t, cfg) operator.persister = testutil.NewMockPersister("test") - // Two same fingerprint file , and smaller than config size + // Two same fingerprint file , and smaller than config size file1 := filetest.OpenTempWithPattern(t, tempDir, "*.log1") file2 := filetest.OpenTempWithPattern(t, tempDir, "*.log2") diff --git a/pkg/winperfcounters/internal/third_party/telegraf/win_perf_counters/performance_query.go b/pkg/winperfcounters/internal/third_party/telegraf/win_perf_counters/performance_query.go index 2b094737e862..5c075e14e001 100644 --- a/pkg/winperfcounters/internal/third_party/telegraf/win_perf_counters/performance_query.go +++ b/pkg/winperfcounters/internal/third_party/telegraf/win_perf_counters/performance_query.go @@ -225,7 +225,7 @@ func UTF16PtrToString(s *uint16) string { return string(utf16.Decode(slice)) } -// UTF16ToStringArray converts list of Windows API NULL terminated strings to go string array +// UTF16ToStringArray converts list of Windows API NULL terminated strings to go string array func UTF16ToStringArray(buf []uint16) []string { var strings []string nextLineStart := 0 diff --git a/processor/routingprocessor/logs.go b/processor/routingprocessor/logs.go index 6574c9f898d3..65bdb4f168f8 100644 --- a/processor/routingprocessor/logs.go +++ b/processor/routingprocessor/logs.go @@ -182,7 +182,7 @@ func (p *logProcessor) recordNonRoutedResourceLogs(ctx context.Context, routingK func (p *logProcessor) routeForContext(ctx context.Context, l plog.Logs) error { value := p.extractor.extractFromContext(ctx) exporters := p.router.getExporters(value) - if value == "" { // "" is a key for default exporters + if value == "" { // "" is a key for default exporters p.telemetry.RoutingProcessorNonRoutedLogRecords.Add( ctx, int64(l.LogRecordCount()), diff --git a/processor/routingprocessor/metrics.go b/processor/routingprocessor/metrics.go index 2a605e3a9188..c48d96ab958e 100644 --- a/processor/routingprocessor/metrics.go +++ b/processor/routingprocessor/metrics.go @@ -181,7 +181,7 @@ func (p *metricsProcessor) recordNonRoutedForResourceMetrics(ctx context.Context func (p *metricsProcessor) routeForContext(ctx context.Context, m pmetric.Metrics) error { value := p.extractor.extractFromContext(ctx) exporters := p.router.getExporters(value) - if value == "" { // "" is a key for default exporters + if value == "" { // "" is a key for default exporters p.telemetry.RoutingProcessorNonRoutedMetricPoints.Add( ctx, int64(m.MetricCount()), diff --git a/processor/routingprocessor/traces.go b/processor/routingprocessor/traces.go index 637a8e2a52ef..84fcface4327 100644 --- a/processor/routingprocessor/traces.go +++ b/processor/routingprocessor/traces.go @@ -175,7 +175,7 @@ func (p *tracesProcessor) recordNonRoutedResourceSpans(ctx context.Context, rout func (p *tracesProcessor) routeForContext(ctx context.Context, t ptrace.Traces) error { value := p.extractor.extractFromContext(ctx) exporters := p.router.getExporters(value) - if value == "" { // "" is a key for default exporters + if value == "" { // "" is a key for default exporters p.telemetry.RoutingProcessorNonRoutedSpans.Add( ctx, int64(t.SpanCount()), diff --git a/processor/tailsamplingprocessor/config.go b/processor/tailsamplingprocessor/config.go index af2342f723f5..4185e7b9b0b2 100644 --- a/processor/tailsamplingprocessor/config.go +++ b/processor/tailsamplingprocessor/config.go @@ -109,7 +109,7 @@ type CompositeCfg struct { RateAllocation []RateAllocationCfg `mapstructure:"rate_allocation"` } -// RateAllocationCfg used within composite policy +// RateAllocationCfg used within composite policy type RateAllocationCfg struct { Policy string `mapstructure:"policy"` Percent int64 `mapstructure:"percent"` diff --git a/processor/transformprocessor/internal/metrics/func_convert_exponential_hist_to_explicit_hist_test.go b/processor/transformprocessor/internal/metrics/func_convert_exponential_hist_to_explicit_hist_test.go index 1dd76ad4f097..8d5d6ec8aa3c 100644 --- a/processor/transformprocessor/internal/metrics/func_convert_exponential_hist_to_explicit_hist_test.go +++ b/processor/transformprocessor/internal/metrics/func_convert_exponential_hist_to_explicit_hist_test.go @@ -750,7 +750,7 @@ func TestRandom_convert_exponential_hist_to_explicit_hist(t *testing.T) { // even though the distribution is random, we know that for this // particular test case, the min value is 40, therefore the 1st 3 bucket - // counts should be 0, as they represent values 10 - 30 + // counts should be 0, as they represent values 10 - 30 for i := 0; i < 3; i++ { assert.Equal(t, uint64(0), dp.BucketCounts().At(i), "bucket %d", i) } diff --git a/receiver/githubreceiver/internal/scraper/githubscraper/README.md b/receiver/githubreceiver/internal/scraper/githubscraper/README.md index a8a4d9e8d98f..d960a1f2587e 100644 --- a/receiver/githubreceiver/internal/scraper/githubscraper/README.md +++ b/receiver/githubreceiver/internal/scraper/githubscraper/README.md @@ -6,7 +6,7 @@ The GitHub scraper is reliant on limitations found within GitHub's REST and GraphQL APIs. The following limitations are known: * The original creation date of a branch is not available via either of the - APIs. GitSCM (the tool) does provide Ref creation time however this is not + APIs. GitSCM (the tool) does provide Ref creation time however this is not exposed. As such, we're forced to calculate the age by looking to see if any changes have been made to the branch, using that commit as the time from which we can grab the date. This means that age will reflect the time between diff --git a/receiver/githubreceiver/internal/scraper/githubscraper/factory.go b/receiver/githubreceiver/internal/scraper/githubscraper/factory.go index c6dfbc2e55c0..1a66e72b410c 100644 --- a/receiver/githubreceiver/internal/scraper/githubscraper/factory.go +++ b/receiver/githubreceiver/internal/scraper/githubscraper/factory.go @@ -15,7 +15,7 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/githubreceiver/internal/metadata" ) -// This file implements factory for the GitHub Scraper as part of the GitHub Receiver +// This file implements factory for the GitHub Scraper as part of the GitHub Receiver const ( defaultHTTPTimeout = 15 * time.Second diff --git a/receiver/googlecloudpubsubreceiver/README.md b/receiver/googlecloudpubsubreceiver/README.md index 5e5ee6a2ccc9..9051bbe370fa 100644 --- a/receiver/googlecloudpubsubreceiver/README.md +++ b/receiver/googlecloudpubsubreceiver/README.md @@ -18,7 +18,7 @@ This receiver gets OTLP messages from a Google Cloud [Pubsub](https://cloud.goog The following configuration options are supported: * `project` (Optional): The Google Cloud Project of the client connects to. -* `subscription` (Required): The subscription name to receive OTLP data from. The subscription name should be a +* `subscription` (Required): The subscription name to receive OTLP data from. The subscription name should be a fully qualified resource name (eg: `projects/otel-project/subscriptions/otlp`). * `encoding` (Optional): The encoding that will be used to received data from the subscription. This can either be `otlp_proto_trace`, `otlp_proto_metric`, `otlp_proto_log`, `cloud_logging`, or `raw_text` (see `encoding`). This will diff --git a/receiver/mongodbatlasreceiver/logs.go b/receiver/mongodbatlasreceiver/logs.go index 1abd297f6ae9..463fcc065551 100644 --- a/receiver/mongodbatlasreceiver/logs.go +++ b/receiver/mongodbatlasreceiver/logs.go @@ -40,7 +40,7 @@ type ProjectContext struct { orgName string } -// MongoDB Atlas Documentation reccommends a polling interval of 5 minutes: https://www.mongodb.com/docs/atlas/reference/api/logs/#logs +// MongoDB Atlas Documentation recommends a polling interval of 5 minutes: https://www.mongodb.com/docs/atlas/reference/api/logs/#logs const collectionInterval = time.Minute * 5 func newMongoDBAtlasLogsReceiver(settings rcvr.Settings, cfg *Config, consumer consumer.Logs) *logsReceiver { diff --git a/receiver/skywalkingreceiver/internal/metrics/skywalkingproto_to_metrics.go b/receiver/skywalkingreceiver/internal/metrics/skywalkingproto_to_metrics.go index 95a4ec66e137..3d784b5c2b31 100644 --- a/receiver/skywalkingreceiver/internal/metrics/skywalkingproto_to_metrics.go +++ b/receiver/skywalkingreceiver/internal/metrics/skywalkingproto_to_metrics.go @@ -79,7 +79,7 @@ func buildGCAttrs(gc *agent.GC) pcommon.Map { return attrs } -// memoryPoolMetricToMetrics translate memoryPool metrics +// memoryPoolMetricToMetrics translate memoryPool metrics func memoryPoolMetricToMetrics(timestamp int64, memoryPools []*agent.MemoryPool, sm pmetric.ScopeMetrics) { PoolNameArr := []string{MemoryPoolInitName, MemoryPoolUsedName, MemoryPoolMaxName, MemoryPoolCommittedName} dpsMp := make(map[string]pmetric.NumberDataPointSlice) diff --git a/receiver/solacereceiver/README.md b/receiver/solacereceiver/README.md index 325dbd304c6d..832fc6d7d701 100644 --- a/receiver/solacereceiver/README.md +++ b/receiver/solacereceiver/README.md @@ -76,7 +76,7 @@ service: receivers: [solace] ``` -High availability setup with SASL plain authentication (TLS enabled by default) +High availability setup with SASL plain authentication (TLS enabled by default) ```yaml receivers: solace/primary: diff --git a/receiver/splunkhecreceiver/README.md b/receiver/splunkhecreceiver/README.md index a4169cc8414b..993890c6153c 100644 --- a/receiver/splunkhecreceiver/README.md +++ b/receiver/splunkhecreceiver/README.md @@ -49,7 +49,7 @@ The following settings are optional: * `health_path` (default = '/services/collector/health'): The path reporting [health checks](https://docs.splunk.com/Documentation/Splunk/9.0.1/RESTREF/RESTinput#services.2Fcollector.2Fhealth). * `hec_metadata_to_otel_attrs/source` (default = 'com.splunk.source'): Specifies the mapping of the source field to a specific unified model attribute. * `hec_metadata_to_otel_attrs/sourcetype` (default = 'com.splunk.sourcetype'): Specifies the mapping of the sourcetype field to a specific unified model attribute. -* `hec_metadata_to_otel_attrs/index` (default = 'com.splunk.index'): Specifies the mapping of the index field to a specific unified model attribute. +* `hec_metadata_to_otel_attrs/index` (default = 'com.splunk.index'): Specifies the mapping of the index field to a specific unified model attribute. * `hec_metadata_to_otel_attrs/host` (default = 'host.name'): Specifies the mapping of the host field to a specific unified model attribute. * `ack` (no default): defines the ackextension to use for acknowledging events * `extension` (no default): Specifies the ack extension ID the receiver should use. If left blank, ack is disabled.