From e9efb4088d7f935ba34830b96da29c1a351bfc86 Mon Sep 17 00:00:00 2001 From: Carson Ip Date: Thu, 17 Oct 2024 15:35:59 +0100 Subject: [PATCH 01/12] [exporter/elasticsearch] Preserve attribute names and metric names on prefix conflict in OTel mapping mode (#35651) #### Description Metric names should be flattened and exported as is, even when one metric name is a prefix of another. Same for attributes for all logs, metrics and traces. #### Link to tracking issue #### Testing #### Documentation --- ...ode-passthrough-field-prefix-conflict.yaml | 27 ++++ .../elasticsearchexporter/exporter_test.go | 127 ++++++++++++++++++ .../internal/objmodel/objmodel.go | 32 ++--- .../internal/objmodel/objmodel_test.go | 40 ++++-- exporter/elasticsearchexporter/model.go | 9 +- 5 files changed, 206 insertions(+), 29 deletions(-) create mode 100644 .chloggen/elasticsearchexporter_otel-mode-passthrough-field-prefix-conflict.yaml diff --git a/.chloggen/elasticsearchexporter_otel-mode-passthrough-field-prefix-conflict.yaml b/.chloggen/elasticsearchexporter_otel-mode-passthrough-field-prefix-conflict.yaml new file mode 100644 index 000000000000..afde47be348b --- /dev/null +++ b/.chloggen/elasticsearchexporter_otel-mode-passthrough-field-prefix-conflict.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: bug_fix + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: elasticsearchexporter + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Preserve attribute names and metric names on prefix conflict in OTel mapping mode + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [35651] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: e.g. if there are attributes "a" and "a.b", they should be sent to Elasticsearch as is, instead of "a.value" and "a.b", in OTel mapping mode + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/exporter/elasticsearchexporter/exporter_test.go b/exporter/elasticsearchexporter/exporter_test.go index e2871666b138..f1b455e41e1e 100644 --- a/exporter/elasticsearchexporter/exporter_test.go +++ b/exporter/elasticsearchexporter/exporter_test.go @@ -714,6 +714,35 @@ func TestExporterLogs(t *testing.T) { assert.Equal(t, `{"some.scope.attribute":["foo","bar"]}`, gjson.GetBytes(doc, `scope.attributes`).Raw) assert.Equal(t, `{"some.resource.attribute":["foo","bar"]}`, gjson.GetBytes(doc, `resource.attributes`).Raw) }) + + t.Run("otel mode attribute key prefix conflict", func(t *testing.T) { + rec := newBulkRecorder() + server := newESTestServer(t, func(docs []itemRequest) ([]itemResponse, error) { + rec.Record(docs) + return itemsAllOK(docs) + }) + + exporter := newTestLogsExporter(t, server.URL, func(cfg *Config) { + cfg.Mapping.Mode = "otel" + }) + + mustSendLogs(t, exporter, newLogsWithAttributes(map[string]any{ + "a": "a", + "a.b": "a.b", + }, map[string]any{ + "a": "a", + "a.b": "a.b", + }, map[string]any{ + "a": "a", + "a.b": "a.b", + })) + + rec.WaitItems(1) + doc := rec.Items()[0].Document + assert.JSONEq(t, `{"a":"a","a.b":"a.b"}`, gjson.GetBytes(doc, `attributes`).Raw) + assert.JSONEq(t, `{"a":"a","a.b":"a.b"}`, gjson.GetBytes(doc, `scope.attributes`).Raw) + assert.JSONEq(t, `{"a":"a","a.b":"a.b"}`, gjson.GetBytes(doc, `resource.attributes`).Raw) + }) } func TestExporterMetrics(t *testing.T) { @@ -1300,6 +1329,75 @@ func TestExporterMetrics(t *testing.T) { assertItemsEqual(t, expected, rec.Items(), false) }) + t.Run("otel mode metric name conflict", func(t *testing.T) { + rec := newBulkRecorder() + server := newESTestServer(t, func(docs []itemRequest) ([]itemResponse, error) { + rec.Record(docs) + return itemsAllOK(docs) + }) + + exporter := newTestMetricsExporter(t, server.URL, func(cfg *Config) { + cfg.Mapping.Mode = "otel" + }) + + metrics := pmetric.NewMetrics() + resourceMetric := metrics.ResourceMetrics().AppendEmpty() + scopeMetric := resourceMetric.ScopeMetrics().AppendEmpty() + + fooBarMetric := scopeMetric.Metrics().AppendEmpty() + fooBarMetric.SetName("foo.bar") + fooBarMetric.SetEmptySum().DataPoints().AppendEmpty().SetIntValue(0) + + fooMetric := scopeMetric.Metrics().AppendEmpty() + fooMetric.SetName("foo") + fooMetric.SetEmptySum().DataPoints().AppendEmpty().SetIntValue(0) + + fooBarBazMetric := scopeMetric.Metrics().AppendEmpty() + fooBarBazMetric.SetName("foo.bar.baz") + fooBarBazMetric.SetEmptySum().DataPoints().AppendEmpty().SetIntValue(0) + + mustSendMetrics(t, exporter, metrics) + + rec.WaitItems(1) + expected := []itemRequest{ + { + Action: []byte(`{"create":{"_index":"metrics-generic.otel-default","dynamic_templates":{"metrics.foo.bar":"gauge_long","metrics.foo":"gauge_long","metrics.foo.bar.baz":"gauge_long"}}}`), + Document: []byte(`{"@timestamp":"1970-01-01T00:00:00.000000000Z","data_stream":{"dataset":"generic.otel","namespace":"default","type":"metrics"},"metrics":{"foo":0,"foo.bar":0,"foo.bar.baz":0},"resource":{"dropped_attributes_count":0},"scope":{"dropped_attributes_count":0}}`), + }, + } + + assertItemsEqual(t, expected, rec.Items(), false) + }) + + t.Run("otel mode attribute key prefix conflict", func(t *testing.T) { + rec := newBulkRecorder() + server := newESTestServer(t, func(docs []itemRequest) ([]itemResponse, error) { + rec.Record(docs) + return itemsAllOK(docs) + }) + + exporter := newTestMetricsExporter(t, server.URL, func(cfg *Config) { + cfg.Mapping.Mode = "otel" + }) + + mustSendMetrics(t, exporter, newMetricsWithAttributes(map[string]any{ + "a": "a", + "a.b": "a.b", + }, map[string]any{ + "a": "a", + "a.b": "a.b", + }, map[string]any{ + "a": "a", + "a.b": "a.b", + })) + + rec.WaitItems(1) + doc := rec.Items()[0].Document + assert.JSONEq(t, `{"a":"a","a.b":"a.b"}`, gjson.GetBytes(doc, `attributes`).Raw) + assert.JSONEq(t, `{"a":"a","a.b":"a.b"}`, gjson.GetBytes(doc, `scope.attributes`).Raw) + assert.JSONEq(t, `{"a":"a","a.b":"a.b"}`, gjson.GetBytes(doc, `resource.attributes`).Raw) + }) + t.Run("publish summary", func(t *testing.T) { rec := newBulkRecorder() server := newESTestServer(t, func(docs []itemRequest) ([]itemResponse, error) { @@ -1600,6 +1698,35 @@ func TestExporterTraces(t *testing.T) { assert.Equal(t, `{"some.resource.attribute":["foo","bar"]}`, gjson.GetBytes(doc, `resource.attributes`).Raw) } }) + + t.Run("otel mode attribute key prefix conflict", func(t *testing.T) { + rec := newBulkRecorder() + server := newESTestServer(t, func(docs []itemRequest) ([]itemResponse, error) { + rec.Record(docs) + return itemsAllOK(docs) + }) + + exporter := newTestTracesExporter(t, server.URL, func(cfg *Config) { + cfg.Mapping.Mode = "otel" + }) + + mustSendTraces(t, exporter, newTracesWithAttributes(map[string]any{ + "a": "a", + "a.b": "a.b", + }, map[string]any{ + "a": "a", + "a.b": "a.b", + }, map[string]any{ + "a": "a", + "a.b": "a.b", + })) + + rec.WaitItems(1) + doc := rec.Items()[0].Document + assert.JSONEq(t, `{"a":"a","a.b":"a.b"}`, gjson.GetBytes(doc, `attributes`).Raw) + assert.JSONEq(t, `{"a":"a","a.b":"a.b"}`, gjson.GetBytes(doc, `scope.attributes`).Raw) + assert.JSONEq(t, `{"a":"a","a.b":"a.b"}`, gjson.GetBytes(doc, `resource.attributes`).Raw) + }) } // TestExporterAuth verifies that the Elasticsearch exporter supports diff --git a/exporter/elasticsearchexporter/internal/objmodel/objmodel.go b/exporter/elasticsearchexporter/internal/objmodel/objmodel.go index f20f9b1d213b..33a63abc794d 100644 --- a/exporter/elasticsearchexporter/internal/objmodel/objmodel.go +++ b/exporter/elasticsearchexporter/internal/objmodel/objmodel.go @@ -209,12 +209,12 @@ func (doc *Document) sort() { // The filtering only keeps the last value for a key. // // Dedup ensure that keys are sorted. -func (doc *Document) Dedup() { +func (doc *Document) Dedup(appendValueOnConflict bool) { // 1. Always ensure the fields are sorted, Dedup support requires // Fields to be sorted. doc.sort() - // 2. rename fields if a primitive value is overwritten by an object. + // 2. rename fields if a primitive value is overwritten by an object if appendValueOnConflict. // For example the pair (path.x=1, path.x.a="test") becomes: // (path.x.value=1, path.x.a="test"). // @@ -227,16 +227,18 @@ func (doc *Document) Dedup() { // field in favor of the `value` field in the document. // // This step removes potential conflicts when dedotting and serializing fields. - var renamed bool - for i := 0; i < len(doc.fields)-1; i++ { - key, nextKey := doc.fields[i].key, doc.fields[i+1].key - if len(key) < len(nextKey) && strings.HasPrefix(nextKey, key) && nextKey[len(key)] == '.' { - renamed = true - doc.fields[i].key = key + ".value" + if appendValueOnConflict { + var renamed bool + for i := 0; i < len(doc.fields)-1; i++ { + key, nextKey := doc.fields[i].key, doc.fields[i+1].key + if len(key) < len(nextKey) && strings.HasPrefix(nextKey, key) && nextKey[len(key)] == '.' { + renamed = true + doc.fields[i].key = key + ".value" + } + } + if renamed { + doc.sort() } - } - if renamed { - doc.sort() } // 3. mark duplicates as 'ignore' @@ -251,7 +253,7 @@ func (doc *Document) Dedup() { // 4. fix objects that might be stored in arrays for i := range doc.fields { - doc.fields[i].value.Dedup() + doc.fields[i].value.Dedup(appendValueOnConflict) } } @@ -487,13 +489,13 @@ func (v *Value) sort() { // Dedup recursively dedups keys in stored documents. // // NOTE: The value MUST be sorted. -func (v *Value) Dedup() { +func (v *Value) Dedup(appendValueOnConflict bool) { switch v.kind { case KindObject: - v.doc.Dedup() + v.doc.Dedup(appendValueOnConflict) case KindArr: for i := range v.arr { - v.arr[i].Dedup() + v.arr[i].Dedup(appendValueOnConflict) } } } diff --git a/exporter/elasticsearchexporter/internal/objmodel/objmodel_test.go b/exporter/elasticsearchexporter/internal/objmodel/objmodel_test.go index 1961f716db05..3d0a07b820d0 100644 --- a/exporter/elasticsearchexporter/internal/objmodel/objmodel_test.go +++ b/exporter/elasticsearchexporter/internal/objmodel/objmodel_test.go @@ -86,8 +86,9 @@ func TestObjectModel_CreateMap(t *testing.T) { func TestObjectModel_Dedup(t *testing.T) { tests := map[string]struct { - build func() Document - want Document + build func() Document + appendValueOnConflict bool + want Document }{ "no duplicates": { build: func() (doc Document) { @@ -95,7 +96,8 @@ func TestObjectModel_Dedup(t *testing.T) { doc.AddInt("c", 3) return doc }, - want: Document{fields: []field{{"a", IntValue(1)}, {"c", IntValue(3)}}}, + appendValueOnConflict: true, + want: Document{fields: []field{{"a", IntValue(1)}, {"c", IntValue(3)}}}, }, "duplicate keys": { build: func() (doc Document) { @@ -104,7 +106,8 @@ func TestObjectModel_Dedup(t *testing.T) { doc.AddInt("a", 2) return doc }, - want: Document{fields: []field{{"a", ignoreValue}, {"a", IntValue(2)}, {"c", IntValue(3)}}}, + appendValueOnConflict: true, + want: Document{fields: []field{{"a", ignoreValue}, {"a", IntValue(2)}, {"c", IntValue(3)}}}, }, "duplicate after flattening from map: namespace object at end": { build: func() Document { @@ -114,7 +117,8 @@ func TestObjectModel_Dedup(t *testing.T) { am.PutEmptyMap("namespace").PutInt("a", 23) return DocumentFromAttributes(am) }, - want: Document{fields: []field{{"namespace.a", ignoreValue}, {"namespace.a", IntValue(23)}, {"toplevel", StringValue("test")}}}, + appendValueOnConflict: true, + want: Document{fields: []field{{"namespace.a", ignoreValue}, {"namespace.a", IntValue(23)}, {"toplevel", StringValue("test")}}}, }, "duplicate after flattening from map: namespace object at beginning": { build: func() Document { @@ -124,7 +128,8 @@ func TestObjectModel_Dedup(t *testing.T) { am.PutStr("toplevel", "test") return DocumentFromAttributes(am) }, - want: Document{fields: []field{{"namespace.a", ignoreValue}, {"namespace.a", IntValue(42)}, {"toplevel", StringValue("test")}}}, + appendValueOnConflict: true, + want: Document{fields: []field{{"namespace.a", ignoreValue}, {"namespace.a", IntValue(42)}, {"toplevel", StringValue("test")}}}, }, "dedup in arrays": { build: func() (doc Document) { @@ -136,6 +141,7 @@ func TestObjectModel_Dedup(t *testing.T) { doc.Add("arr", ArrValue(Value{kind: KindObject, doc: embedded})) return doc }, + appendValueOnConflict: true, want: Document{fields: []field{{"arr", ArrValue(Value{kind: KindObject, doc: Document{fields: []field{ {"a", ignoreValue}, {"a", IntValue(2)}, @@ -148,7 +154,8 @@ func TestObjectModel_Dedup(t *testing.T) { doc.AddInt("namespace.a", 2) return doc }, - want: Document{fields: []field{{"namespace.a", IntValue(2)}, {"namespace.value", IntValue(1)}}}, + appendValueOnConflict: true, + want: Document{fields: []field{{"namespace.a", IntValue(2)}, {"namespace.value", IntValue(1)}}}, }, "dedup removes primitive if value exists": { build: func() (doc Document) { @@ -157,14 +164,25 @@ func TestObjectModel_Dedup(t *testing.T) { doc.AddInt("namespace.value", 3) return doc }, - want: Document{fields: []field{{"namespace.a", IntValue(2)}, {"namespace.value", ignoreValue}, {"namespace.value", IntValue(3)}}}, + appendValueOnConflict: true, + want: Document{fields: []field{{"namespace.a", IntValue(2)}, {"namespace.value", ignoreValue}, {"namespace.value", IntValue(3)}}}, + }, + "dedup without append value on conflict": { + build: func() (doc Document) { + doc.AddInt("namespace", 1) + doc.AddInt("namespace.a", 2) + doc.AddInt("namespace.value", 3) + return doc + }, + appendValueOnConflict: false, + want: Document{fields: []field{{"namespace", IntValue(1)}, {"namespace.a", IntValue(2)}, {"namespace.value", IntValue(3)}}}, }, } for name, test := range tests { t.Run(name, func(t *testing.T) { doc := test.build() - doc.Dedup() + doc.Dedup(test.appendValueOnConflict) assert.Equal(t, test.want, doc) }) } @@ -282,7 +300,7 @@ func TestDocument_Serialize_Flat(t *testing.T) { m := pcommon.NewMap() assert.NoError(t, m.FromRaw(test.attrs)) doc := DocumentFromAttributes(m) - doc.Dedup() + doc.Dedup(true) err := doc.Serialize(&buf, false, false) require.NoError(t, err) @@ -343,7 +361,7 @@ func TestDocument_Serialize_Dedot(t *testing.T) { m := pcommon.NewMap() assert.NoError(t, m.FromRaw(test.attrs)) doc := DocumentFromAttributes(m) - doc.Dedup() + doc.Dedup(true) err := doc.Serialize(&buf, true, false) require.NoError(t, err) diff --git a/exporter/elasticsearchexporter/model.go b/exporter/elasticsearchexporter/model.go index 434bb1090a93..8c71df950752 100644 --- a/exporter/elasticsearchexporter/model.go +++ b/exporter/elasticsearchexporter/model.go @@ -115,7 +115,8 @@ func (m *encodeModel) encodeLog(resource pcommon.Resource, resourceSchemaURL str default: document = m.encodeLogDefaultMode(resource, record, scope) } - document.Dedup() + // For OTel mode, prefix conflicts are not a problem as otel-data has subobjects: false + document.Dedup(m.mode != MappingOTel) var buf bytes.Buffer err := document.Serialize(&buf, m.dedot, m.mode == MappingOTel) @@ -267,7 +268,8 @@ func (m *encodeModel) encodeLogECSMode(resource pcommon.Resource, record plog.Lo } func (m *encodeModel) encodeDocument(document objmodel.Document) ([]byte, error) { - document.Dedup() + // For OTel mode, prefix conflicts are not a problem as otel-data has subobjects: false + document.Dedup(m.mode != MappingOTel) var buf bytes.Buffer err := document.Serialize(&buf, m.dedot, m.mode == MappingOTel) @@ -646,7 +648,8 @@ func (m *encodeModel) encodeSpan(resource pcommon.Resource, resourceSchemaURL st default: document = m.encodeSpanDefaultMode(resource, span, scope) } - document.Dedup() + // For OTel mode, prefix conflicts are not a problem as otel-data has subobjects: false + document.Dedup(m.mode != MappingOTel) var buf bytes.Buffer err := document.Serialize(&buf, m.dedot, m.mode == MappingOTel) return buf.Bytes(), err From d8cad1c50e8a086bdeb17991d618e2ad083d1e75 Mon Sep 17 00:00:00 2001 From: Florian Bacher Date: Thu, 17 Oct 2024 18:02:29 +0200 Subject: [PATCH 02/12] [extension/opamp] Implement ReportsHealth capability (#35488) --- .chloggen/opamp-extension-reportshealth.yaml | 27 +++++++++++++ extension/opampextension/README.md | 1 + extension/opampextension/config.go | 5 +++ extension/opampextension/config_test.go | 41 ++++++++++++++++++++ extension/opampextension/factory.go | 1 + extension/opampextension/opamp_agent.go | 23 +++++++++++ extension/opampextension/opamp_agent_test.go | 1 + 7 files changed, 99 insertions(+) create mode 100644 .chloggen/opamp-extension-reportshealth.yaml diff --git a/.chloggen/opamp-extension-reportshealth.yaml b/.chloggen/opamp-extension-reportshealth.yaml new file mode 100644 index 000000000000..c58f185a6628 --- /dev/null +++ b/.chloggen/opamp-extension-reportshealth.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: opampextension + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Implement `ReportsHealth` capability in OpAMP extension + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [35433] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/extension/opampextension/README.md b/extension/opampextension/README.md index 59d7284fc643..4d5227c2e276 100644 --- a/extension/opampextension/README.md +++ b/extension/opampextension/README.md @@ -43,6 +43,7 @@ The following settings are optional for both transports: instance UID remains constant across process restarts. - `capabilities`: Keys with boolean true/false values that enable a particular OpAMP capability. - `reports_effective_config`: Whether to enable the OpAMP ReportsEffectiveConfig capability. Default is `true`. + - `reports_health`: Whether to enable the OpAMP ReportsHealth capability. Default is `true`. - `agent_description`: Setting that modifies the agent description reported to the OpAMP server. - `non_identifying_attributes`: A map of key value pairs that will be added to the [non-identifying attributes](https://github.com/open-telemetry/opamp-spec/blob/main/specification.md#agentdescriptionnon_identifying_attributes) reported to the OpAMP server. If an attribute collides with the default non-identifying attributes that are automatically added, the ones specified here take precedence. - `ppid`: An optional process ID to monitor. When this process is no longer running, the extension will emit a fatal error, causing the collector to exit. This is meant to be set by the Supervisor or some other parent process, and should not be configured manually. diff --git a/extension/opampextension/config.go b/extension/opampextension/config.go index a06adee4fab8..e47ae1894ed0 100644 --- a/extension/opampextension/config.go +++ b/extension/opampextension/config.go @@ -54,6 +54,8 @@ type AgentDescription struct { type Capabilities struct { // ReportsEffectiveConfig enables the OpAMP ReportsEffectiveConfig Capability. (default: true) ReportsEffectiveConfig bool `mapstructure:"reports_effective_config"` + // ReportsHealth enables the OpAMP ReportsHealth Capability. (default: true) + ReportsHealth bool `mapstructure:"reports_health"` } func (caps Capabilities) toAgentCapabilities() protobufs.AgentCapabilities { @@ -63,6 +65,9 @@ func (caps Capabilities) toAgentCapabilities() protobufs.AgentCapabilities { if caps.ReportsEffectiveConfig { agentCapabilities |= protobufs.AgentCapabilities_AgentCapabilities_ReportsEffectiveConfig } + if caps.ReportsHealth { + agentCapabilities |= protobufs.AgentCapabilities_AgentCapabilities_ReportsHealth + } return agentCapabilities } diff --git a/extension/opampextension/config_test.go b/extension/opampextension/config_test.go index bbccff4d91aa..7f50970f3184 100644 --- a/extension/opampextension/config_test.go +++ b/extension/opampextension/config_test.go @@ -8,6 +8,7 @@ import ( "testing" "time" + "github.com/open-telemetry/opamp-go/protobufs" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/config/configopaque" @@ -39,6 +40,7 @@ func TestUnmarshalConfig(t *testing.T) { InstanceUID: "01BX5ZZKBKACTAV9WEVGEMMVRZ", Capabilities: Capabilities{ ReportsEffectiveConfig: true, + ReportsHealth: true, }, PPIDPollInterval: 5 * time.Second, }, cfg) @@ -63,6 +65,7 @@ func TestUnmarshalHttpConfig(t *testing.T) { InstanceUID: "01BX5ZZKBKACTAV9WEVGEMMVRZ", Capabilities: Capabilities{ ReportsEffectiveConfig: true, + ReportsHealth: true, }, PPIDPollInterval: 5 * time.Second, }, cfg) @@ -286,3 +289,41 @@ func TestConfig_Validate(t *testing.T) { }) } } + +func TestCapabilities_toAgentCapabilities(t *testing.T) { + type fields struct { + ReportsEffectiveConfig bool + ReportsHealth bool + } + tests := []struct { + name string + fields fields + want protobufs.AgentCapabilities + }{ + { + name: "default capabilities", + fields: fields{ + ReportsEffectiveConfig: false, + ReportsHealth: false, + }, + want: protobufs.AgentCapabilities_AgentCapabilities_ReportsStatus, + }, + { + name: "all supported capabilities enabled", + fields: fields{ + ReportsEffectiveConfig: true, + ReportsHealth: true, + }, + want: protobufs.AgentCapabilities_AgentCapabilities_ReportsStatus | protobufs.AgentCapabilities_AgentCapabilities_ReportsEffectiveConfig | protobufs.AgentCapabilities_AgentCapabilities_ReportsHealth, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + caps := Capabilities{ + ReportsEffectiveConfig: tt.fields.ReportsEffectiveConfig, + ReportsHealth: tt.fields.ReportsHealth, + } + assert.Equalf(t, tt.want, caps.toAgentCapabilities(), "toAgentCapabilities()") + }) + } +} diff --git a/extension/opampextension/factory.go b/extension/opampextension/factory.go index ea4ea23a22d6..868c3bc85c65 100644 --- a/extension/opampextension/factory.go +++ b/extension/opampextension/factory.go @@ -27,6 +27,7 @@ func createDefaultConfig() component.Config { Server: &OpAMPServer{}, Capabilities: Capabilities{ ReportsEffectiveConfig: true, + ReportsHealth: true, }, PPIDPollInterval: 5 * time.Second, } diff --git a/extension/opampextension/opamp_agent.go b/extension/opampextension/opamp_agent.go index db1ef789e738..f984974c6c88 100644 --- a/extension/opampextension/opamp_agent.go +++ b/extension/opampextension/opamp_agent.go @@ -32,6 +32,8 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/extension/opampcustommessages" ) +var _ extensioncapabilities.PipelineWatcher = (*opampAgent)(nil) + type opampAgent struct { cfg *Config logger *zap.Logger @@ -121,6 +123,8 @@ func (o *opampAgent) Start(ctx context.Context, host component.Host) error { return err } + o.setHealth(&protobufs.ComponentHealth{Healthy: false}) + o.logger.Debug("Starting OpAMP client...") if err := o.opampClient.Start(context.Background(), settings); err != nil { @@ -141,6 +145,7 @@ func (o *opampAgent) Shutdown(ctx context.Context) error { if o.opampClient == nil { return nil } + o.logger.Debug("Stopping OpAMP client...") err := o.opampClient.Stop(ctx) // Opamp-go considers this an error, but the collector does not. @@ -178,6 +183,16 @@ func (o *opampAgent) Register(capability string, opts ...opampcustommessages.Cus return o.customCapabilityRegistry.Register(capability, opts...) } +func (o *opampAgent) Ready() error { + o.setHealth(&protobufs.ComponentHealth{Healthy: true}) + return nil +} + +func (o *opampAgent) NotReady() error { + o.setHealth(&protobufs.ComponentHealth{Healthy: false}) + return nil +} + func (o *opampAgent) updateEffectiveConfig(conf *confmap.Conf) { o.eclk.Lock() defer o.eclk.Unlock() @@ -344,3 +359,11 @@ func (o *opampAgent) onMessage(_ context.Context, msg *types.MessageData) { o.customCapabilityRegistry.ProcessMessage(msg.CustomMessage) } } + +func (o *opampAgent) setHealth(ch *protobufs.ComponentHealth) { + if o.capabilities.ReportsHealth && o.opampClient != nil { + if err := o.opampClient.SetHealth(ch); err != nil { + o.logger.Error("Could not report health to OpAMP server", zap.Error(err)) + } + } +} diff --git a/extension/opampextension/opamp_agent_test.go b/extension/opampextension/opamp_agent_test.go index e2013d1d45eb..fd72d346492c 100644 --- a/extension/opampextension/opamp_agent_test.go +++ b/extension/opampextension/opamp_agent_test.go @@ -31,6 +31,7 @@ func TestNewOpampAgent(t *testing.T) { assert.Equal(t, "test version", o.agentVersion) assert.NotEmpty(t, o.instanceID.String()) assert.True(t, o.capabilities.ReportsEffectiveConfig) + assert.True(t, o.capabilities.ReportsHealth) assert.Empty(t, o.effectiveConfig) assert.Nil(t, o.agentDescription) } From 7d02d77ecab748221f3d2339581678c27841180e Mon Sep 17 00:00:00 2001 From: Stefan Kurek Date: Thu, 17 Oct 2024 12:28:24 -0400 Subject: [PATCH 03/12] Adds replication metrics to versions of MySQL older than 8.0.22 (#35776) #### Description Version of MySQL older than 8.0.22 use the `show slave status` vs `show replica status` view to retrieve replication metrics. This allows for both the newer & older views to work and retrieve data for the two actual replica metrics available (`mysql.replica.time_behind_source` & `mysql.replica.sql_delay`). #### Link to tracking issue Fixes #35217 #### Testing Manual testing against MySQL versions 8.0, 8.0.37, & MariaDB Ver 15.1 Distrib 10.5.26-MariaDB #### Documentation None needed --- ...eceiver_older_replica_metrics_support.yaml | 27 ++ receiver/mysqlreceiver/client.go | 255 +++++++++++++----- receiver/mysqlreceiver/go.mod | 1 + receiver/mysqlreceiver/go.sum | 2 + receiver/mysqlreceiver/scraper_test.go | 6 +- 5 files changed, 218 insertions(+), 73 deletions(-) create mode 100644 .chloggen/mysqlreceiver_older_replica_metrics_support.yaml diff --git a/.chloggen/mysqlreceiver_older_replica_metrics_support.yaml b/.chloggen/mysqlreceiver_older_replica_metrics_support.yaml new file mode 100644 index 000000000000..aecdf987b857 --- /dev/null +++ b/.chloggen/mysqlreceiver_older_replica_metrics_support.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: bug_fix + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: mysqlreceiver + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Add replica metric support for versions of MySQL earlier than 8.0.22. + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [35217] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/receiver/mysqlreceiver/client.go b/receiver/mysqlreceiver/client.go index 736257e6a623..3aeabf1b777b 100644 --- a/receiver/mysqlreceiver/client.go +++ b/receiver/mysqlreceiver/client.go @@ -12,11 +12,12 @@ import ( // registers the mysql driver "github.com/go-sql-driver/mysql" + "github.com/hashicorp/go-version" ) type client interface { Connect() error - getVersion() (string, error) + getVersion() (*version.Version, error) getGlobalStats() (map[string]string, error) getInnodbStats() (map[string]string, error) getTableStats() ([]TableStats, error) @@ -110,66 +111,83 @@ type tableLockWaitEventStats struct { } type ReplicaStatusStats struct { - replicaIOState string - sourceHost string - sourceUser string - sourcePort int64 - connectRetry int64 - sourceLogFile string - readSourceLogPos int64 - relayLogFile string - relayLogPos int64 - relaySourceLogFile string - replicaIORunning string - replicaSQLRunning string - replicateDoDB string - replicateIgnoreDB string - replicateDoTable string - replicateIgnoreTable string - replicateWildDoTable string - replicateWildIgnoreTable string - lastErrno int64 - lastError string - skipCounter int64 - execSourceLogPos int64 - relayLogSpace int64 - untilCondition string - untilLogFile string - untilLogPos string - sourceSSLAllowed string - sourceSSLCAFile string - sourceSSLCAPath string - sourceSSLCert string - sourceSSLCipher string - sourceSSLKey string - secondsBehindSource sql.NullInt64 - sourceSSLVerifyServerCert string - lastIOErrno int64 - lastIOError string - lastSQLErrno int64 - lastSQLError string - replicateIgnoreServerIDs string - sourceServerID int64 - sourceUUID string - sourceInfoFile string - sqlDelay int64 - sqlRemainingDelay sql.NullInt64 - replicaSQLRunningState string - sourceRetryCount int64 - sourceBind string - lastIOErrorTimestamp string - lastSQLErrorTimestamp string - sourceSSLCrl string - sourceSSLCrlpath string - retrievedGtidSet string - executedGtidSet string - autoPosition string - replicateRewriteDB string - channelName string - sourceTLSVersion string - sourcePublicKeyPath string - getSourcePublicKey int64 - networkNamespace string + replicaIOState string + sourceHost string + sourceUser string + sourcePort int64 + connectRetry int64 + sourceLogFile string + readSourceLogPos int64 + relayLogFile string + relayLogPos int64 + relaySourceLogFile string + replicaIORunning string + replicaSQLRunning string + replicateDoDB string + replicateIgnoreDB string + replicateDoTable string + replicateIgnoreTable string + replicateWildDoTable string + replicateWildIgnoreTable string + lastErrno int64 + lastError string + skipCounter int64 + execSourceLogPos int64 + relayLogSpace int64 + untilCondition string + untilLogFile string + untilLogPos string + sourceSSLAllowed string + sourceSSLCAFile string + sourceSSLCAPath string + sourceSSLCert string + sourceSSLCipher string + sourceSSLKey string + secondsBehindSource sql.NullInt64 + sourceSSLVerifyServerCert string + lastIOErrno int64 + lastIOError string + lastSQLErrno int64 + lastSQLError string + replicateIgnoreServerIDs string + sourceServerID int64 + sourceUUID string + sourceInfoFile string + sqlDelay int64 + sqlRemainingDelay sql.NullInt64 + replicaSQLRunningState string + sourceRetryCount int64 + sourceBind string + lastIOErrorTimestamp string + lastSQLErrorTimestamp string + sourceSSLCrl string + sourceSSLCrlpath string + retrievedGtidSet string + executedGtidSet string + autoPosition string + replicateRewriteDB string + channelName string + sourceTLSVersion string + sourcePublicKeyPath string + getSourcePublicKey int64 + networkNamespace string + usingGtid string + gtidIoPos string + slaveDdlGroups int64 + slaveNonTransactionalGroups int64 + slaveTransactionalGroups int64 + retriedTransactions int64 + maxRelayLogSize int64 + executedLogEntries int64 + slaveReceivedHeartbeats int64 + slaveHeartbeatPeriod int64 + gtidSlavePos string + masterLastEventTime string + slaveLastEventTime string + masterSlaveTimeDiff string + parallelMode string + replicateDoDomainIDs string + replicateIgnoreDomainIDs string } var _ client = (*mySQLClient)(nil) @@ -218,15 +236,15 @@ func (c *mySQLClient) Connect() error { } // getVersion queries the db for the version. -func (c *mySQLClient) getVersion() (string, error) { +func (c *mySQLClient) getVersion() (*version.Version, error) { query := "SELECT VERSION();" - var version string - err := c.client.QueryRow(query).Scan(&version) + var versionStr string + err := c.client.QueryRow(query).Scan(&versionStr) if err != nil { - return "", err + return nil, err } - - return version, nil + version, err := version.NewVersion(versionStr) + return version, err } // getGlobalStats queries the db for global status metrics. @@ -397,16 +415,19 @@ func (c *mySQLClient) getTableLockWaitEventStats() ([]tableLockWaitEventStats, e } func (c *mySQLClient) getReplicaStatusStats() ([]ReplicaStatusStats, error) { - version, err := c.getVersion() + mysqlVersion, err := c.getVersion() if err != nil { return nil, err } - if version < "8.0.22" { - return nil, nil + query := "SHOW REPLICA STATUS" + minMysqlVersion, _ := version.NewVersion("8.0.22") + if strings.Contains(mysqlVersion.String(), "MariaDB") { + query = "SHOW SLAVE STATUS" + } else if mysqlVersion.LessThan(minMysqlVersion) { + query = "SHOW SLAVE STATUS" } - query := "SHOW REPLICA STATUS" rows, err := c.client.Query(query) if err != nil { @@ -427,28 +448,46 @@ func (c *mySQLClient) getReplicaStatusStats() ([]ReplicaStatusStats, error) { switch strings.ToLower(col) { case "replica_io_state": dest = append(dest, &s.replicaIOState) + case "slave_io_state": + dest = append(dest, &s.replicaIOState) case "source_host": dest = append(dest, &s.sourceHost) + case "master_host": + dest = append(dest, &s.sourceHost) case "source_user": dest = append(dest, &s.sourceUser) + case "master_user": + dest = append(dest, &s.sourceUser) case "source_port": dest = append(dest, &s.sourcePort) + case "master_port": + dest = append(dest, &s.sourcePort) case "connect_retry": dest = append(dest, &s.connectRetry) case "source_log_file": dest = append(dest, &s.sourceLogFile) + case "master_log_file": + dest = append(dest, &s.sourceLogFile) case "read_source_log_pos": dest = append(dest, &s.readSourceLogPos) + case "read_master_log_pos": + dest = append(dest, &s.readSourceLogPos) case "relay_log_file": dest = append(dest, &s.relayLogFile) case "relay_log_pos": dest = append(dest, &s.relayLogPos) case "relay_source_log_file": dest = append(dest, &s.relaySourceLogFile) + case "relay_master_log_file": + dest = append(dest, &s.relaySourceLogFile) case "replica_io_running": dest = append(dest, &s.replicaIORunning) + case "slave_io_running": + dest = append(dest, &s.replicaIORunning) case "replica_sql_running": dest = append(dest, &s.replicaSQLRunning) + case "slave_sql_running": + dest = append(dest, &s.replicaSQLRunning) case "replicate_do_db": dest = append(dest, &s.replicateDoDB) case "replicate_ignore_db": @@ -469,6 +508,8 @@ func (c *mySQLClient) getReplicaStatusStats() ([]ReplicaStatusStats, error) { dest = append(dest, &s.skipCounter) case "exec_source_log_pos": dest = append(dest, &s.execSourceLogPos) + case "exec_master_log_pos": + dest = append(dest, &s.execSourceLogPos) case "relay_log_space": dest = append(dest, &s.relayLogSpace) case "until_condition": @@ -479,20 +520,36 @@ func (c *mySQLClient) getReplicaStatusStats() ([]ReplicaStatusStats, error) { dest = append(dest, &s.untilLogPos) case "source_ssl_allowed": dest = append(dest, &s.sourceSSLAllowed) + case "master_ssl_allowed": + dest = append(dest, &s.sourceSSLAllowed) case "source_ssl_ca_file": dest = append(dest, &s.sourceSSLCAFile) + case "master_ssl_ca_file": + dest = append(dest, &s.sourceSSLCAFile) case "source_ssl_ca_path": dest = append(dest, &s.sourceSSLCAPath) + case "master_ssl_ca_path": + dest = append(dest, &s.sourceSSLCAPath) case "source_ssl_cert": dest = append(dest, &s.sourceSSLCert) + case "master_ssl_cert": + dest = append(dest, &s.sourceSSLCert) case "source_ssl_cipher": dest = append(dest, &s.sourceSSLCipher) + case "master_ssl_cipher": + dest = append(dest, &s.sourceSSLCipher) case "source_ssl_key": dest = append(dest, &s.sourceSSLKey) + case "master_ssl_key": + dest = append(dest, &s.sourceSSLKey) case "seconds_behind_source": dest = append(dest, &s.secondsBehindSource) + case "seconds_behind_master": + dest = append(dest, &s.secondsBehindSource) case "source_ssl_verify_server_cert": dest = append(dest, &s.sourceSSLVerifyServerCert) + case "master_ssl_verify_server_cert": + dest = append(dest, &s.sourceSSLVerifyServerCert) case "last_io_errno": dest = append(dest, &s.lastIOErrno) case "last_io_error": @@ -505,28 +562,44 @@ func (c *mySQLClient) getReplicaStatusStats() ([]ReplicaStatusStats, error) { dest = append(dest, &s.replicateIgnoreServerIDs) case "source_server_id": dest = append(dest, &s.sourceServerID) + case "master_server_id": + dest = append(dest, &s.sourceServerID) case "source_uuid": dest = append(dest, &s.sourceUUID) + case "master_uuid": + dest = append(dest, &s.sourceUUID) case "source_info_file": dest = append(dest, &s.sourceInfoFile) + case "master_info_file": + dest = append(dest, &s.sourceInfoFile) case "sql_delay": dest = append(dest, &s.sqlDelay) case "sql_remaining_delay": dest = append(dest, &s.sqlRemainingDelay) case "replica_sql_running_state": dest = append(dest, &s.replicaSQLRunningState) + case "slave_sql_running_state": + dest = append(dest, &s.replicaSQLRunningState) case "source_retry_count": dest = append(dest, &s.sourceRetryCount) + case "master_retry_count": + dest = append(dest, &s.sourceRetryCount) case "source_bind": dest = append(dest, &s.sourceBind) + case "master_bind": + dest = append(dest, &s.sourceBind) case "last_io_error_timestamp": dest = append(dest, &s.lastIOErrorTimestamp) case "last_sql_error_timestamp": dest = append(dest, &s.lastSQLErrorTimestamp) case "source_ssl_crl": dest = append(dest, &s.sourceSSLCrl) + case "master_ssl_crl": + dest = append(dest, &s.sourceSSLCrl) case "source_ssl_crlpath": dest = append(dest, &s.sourceSSLCrlpath) + case "master_ssl_crlpath": + dest = append(dest, &s.sourceSSLCrlpath) case "retrieved_gtid_set": dest = append(dest, &s.retrievedGtidSet) case "executed_gtid_set": @@ -539,12 +612,52 @@ func (c *mySQLClient) getReplicaStatusStats() ([]ReplicaStatusStats, error) { dest = append(dest, &s.channelName) case "source_tls_version": dest = append(dest, &s.sourceTLSVersion) + case "master_tls_version": + dest = append(dest, &s.sourceTLSVersion) case "source_public_key_path": dest = append(dest, &s.sourcePublicKeyPath) + case "master_public_key_path": + dest = append(dest, &s.sourcePublicKeyPath) case "get_source_public_key": dest = append(dest, &s.getSourcePublicKey) + case "get_master_public_key": + dest = append(dest, &s.getSourcePublicKey) case "network_namespace": dest = append(dest, &s.networkNamespace) + case "using_gtid": + dest = append(dest, &s.usingGtid) + case "gtid_io_pos": + dest = append(dest, &s.gtidIoPos) + case "slave_ddl_groups": + dest = append(dest, &s.slaveDdlGroups) + case "slave_non_transactional_groups": + dest = append(dest, &s.slaveNonTransactionalGroups) + case "slave_transactional_groups": + dest = append(dest, &s.slaveTransactionalGroups) + case "retried_transactions": + dest = append(dest, &s.retriedTransactions) + case "max_relay_log_size": + dest = append(dest, &s.maxRelayLogSize) + case "executed_log_entries": + dest = append(dest, &s.executedLogEntries) + case "slave_received_heartbeats": + dest = append(dest, &s.slaveReceivedHeartbeats) + case "slave_heartbeat_period": + dest = append(dest, &s.slaveHeartbeatPeriod) + case "gtid_slave_pos": + dest = append(dest, &s.gtidSlavePos) + case "master_last_event_time": + dest = append(dest, &s.masterLastEventTime) + case "slave_last_event_time": + dest = append(dest, &s.slaveLastEventTime) + case "master_slave_time_diff": + dest = append(dest, &s.masterSlaveTimeDiff) + case "parallel_mode": + dest = append(dest, &s.parallelMode) + case "replicate_do_domain_ids": + dest = append(dest, &s.replicateDoDomainIDs) + case "replicate_ignore_domain_ids": + dest = append(dest, &s.replicateIgnoreDomainIDs) default: return nil, fmt.Errorf("unknown column name %s for replica status", col) } diff --git a/receiver/mysqlreceiver/go.mod b/receiver/mysqlreceiver/go.mod index 44c73fe67420..9f70a56d04ab 100644 --- a/receiver/mysqlreceiver/go.mod +++ b/receiver/mysqlreceiver/go.mod @@ -48,6 +48,7 @@ require ( github.com/go-viper/mapstructure/v2 v2.1.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/google/uuid v1.6.0 // indirect + github.com/hashicorp/go-version v1.7.0 github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/compress v1.17.9 // indirect github.com/knadh/koanf/maps v0.1.1 // indirect diff --git a/receiver/mysqlreceiver/go.sum b/receiver/mysqlreceiver/go.sum index cf30c4b18167..a054b1a95e2a 100644 --- a/receiver/mysqlreceiver/go.sum +++ b/receiver/mysqlreceiver/go.sum @@ -58,6 +58,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= +github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= +github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= diff --git a/receiver/mysqlreceiver/scraper_test.go b/receiver/mysqlreceiver/scraper_test.go index 01dc4dd11840..8753752e948a 100644 --- a/receiver/mysqlreceiver/scraper_test.go +++ b/receiver/mysqlreceiver/scraper_test.go @@ -12,6 +12,7 @@ import ( "strings" "testing" + "github.com/hashicorp/go-version" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/config/confignet" @@ -158,8 +159,9 @@ func (c *mockClient) Connect() error { return nil } -func (c *mockClient) getVersion() (string, error) { - return "8.0.27", nil +func (c *mockClient) getVersion() (*version.Version, error) { + version, _ := version.NewVersion("8.0.27") + return version, nil } func (c *mockClient) getGlobalStats() (map[string]string, error) { From 5b1b4d42c63ae90199d5d14144bf846b18f94726 Mon Sep 17 00:00:00 2001 From: Carson Ip Date: Thu, 17 Oct 2024 19:01:10 +0100 Subject: [PATCH 04/12] [exporter/elasticsearch] Deprecate retry::max_requests in favor of retry::max_retries (#35571) **Description:** The new retry::max_retries will be exactly retry::max_requests - 1, but it will be much more intuitive to the end user. Deprecate retry::max_requests. **Link to tracking Issue:** Fixes #32344 **Testing:** **Documentation:** --- ...hexporter_deprecate-retry-maxrequests.yaml | 27 ++++++++++++ exporter/elasticsearchexporter/README.md | 3 +- exporter/elasticsearchexporter/bulkindexer.go | 43 ++++++++----------- exporter/elasticsearchexporter/config.go | 24 ++++++++++- exporter/elasticsearchexporter/config_test.go | 14 ++++-- exporter/elasticsearchexporter/esclient.go | 17 +++----- .../elasticsearchexporter/exporter_test.go | 10 ++--- exporter/elasticsearchexporter/factory.go | 8 ++-- .../testdata/config.yaml | 6 +-- 9 files changed, 97 insertions(+), 55 deletions(-) create mode 100644 .chloggen/elasticsearchexporter_deprecate-retry-maxrequests.yaml diff --git a/.chloggen/elasticsearchexporter_deprecate-retry-maxrequests.yaml b/.chloggen/elasticsearchexporter_deprecate-retry-maxrequests.yaml new file mode 100644 index 000000000000..80bb0eac5fb8 --- /dev/null +++ b/.chloggen/elasticsearchexporter_deprecate-retry-maxrequests.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: deprecation + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: elasticsearchexporter + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Deprecate retry::max_requests in favor of retry::max_retries + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [32344] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: retry::max_retries will be exactly retry::max_requests - 1 + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/exporter/elasticsearchexporter/README.md b/exporter/elasticsearchexporter/README.md index 5ec203f13674..b620b81158e9 100644 --- a/exporter/elasticsearchexporter/README.md +++ b/exporter/elasticsearchexporter/README.md @@ -202,7 +202,8 @@ The behaviour of this bulk indexing can be configured with the following setting - `interval` (default=30s): Write buffer flush time limit. - `retry`: Elasticsearch bulk request retry settings - `enabled` (default=true): Enable/Disable request retry on error. Failed requests are retried with exponential backoff. - - `max_requests` (default=3): Number of HTTP request retries. + - `max_requests` (DEPRECATED, use retry::max_retries instead): Number of HTTP request retries including the initial attempt. If used, `retry::max_retries` will be set to `max_requests - 1`. + - `max_retries` (default=2): Number of HTTP request retries. To disable retries, set `retry::enabled` to `false` instead of setting `max_retries` to `0`. - `initial_interval` (default=100ms): Initial waiting time if a HTTP request failed. - `max_interval` (default=1m): Max waiting time if a HTTP request failed. - `retry_on_status` (default=[429]): Status codes that trigger request or document level retries. Request level retry and document level retry status codes are shared and cannot be configured separately. To avoid duplicates, it defaults to `[429]`. diff --git a/exporter/elasticsearchexporter/bulkindexer.go b/exporter/elasticsearchexporter/bulkindexer.go index 471ddc2dc7b9..62bc329a26f8 100644 --- a/exporter/elasticsearchexporter/bulkindexer.go +++ b/exporter/elasticsearchexporter/bulkindexer.go @@ -51,6 +51,8 @@ type bulkIndexerSession interface { Flush(context.Context) error } +const defaultMaxRetries = 2 + func newBulkIndexer(logger *zap.Logger, client *elasticsearch.Client, config *Config) (bulkIndexer, error) { if config.Batcher.Enabled != nil { return newSyncBulkIndexer(logger, client, config), nil @@ -58,20 +60,25 @@ func newBulkIndexer(logger *zap.Logger, client *elasticsearch.Client, config *Co return newAsyncBulkIndexer(logger, client, config) } -func newSyncBulkIndexer(logger *zap.Logger, client *elasticsearch.Client, config *Config) *syncBulkIndexer { - var maxDocRetry int +func bulkIndexerConfig(client *elasticsearch.Client, config *Config) docappender.BulkIndexerConfig { + var maxDocRetries int if config.Retry.Enabled { - // max_requests includes initial attempt - // See https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/32344 - maxDocRetry = config.Retry.MaxRequests - 1 + maxDocRetries = defaultMaxRetries + if config.Retry.MaxRetries != 0 { + maxDocRetries = config.Retry.MaxRetries + } } + return docappender.BulkIndexerConfig{ + Client: client, + MaxDocumentRetries: maxDocRetries, + Pipeline: config.Pipeline, + RetryOnDocumentStatus: config.Retry.RetryOnStatus, + } +} + +func newSyncBulkIndexer(logger *zap.Logger, client *elasticsearch.Client, config *Config) *syncBulkIndexer { return &syncBulkIndexer{ - config: docappender.BulkIndexerConfig{ - Client: client, - MaxDocumentRetries: maxDocRetry, - Pipeline: config.Pipeline, - RetryOnDocumentStatus: config.Retry.RetryOnStatus, - }, + config: bulkIndexerConfig(client, config), flushTimeout: config.Timeout, retryConfig: config.Retry, logger: logger, @@ -165,13 +172,6 @@ func newAsyncBulkIndexer(logger *zap.Logger, client *elasticsearch.Client, confi flushBytes = 5e+6 } - var maxDocRetry int - if config.Retry.Enabled { - // max_requests includes initial attempt - // See https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/32344 - maxDocRetry = config.Retry.MaxRequests - 1 - } - pool := &asyncBulkIndexer{ wg: sync.WaitGroup{}, items: make(chan docappender.BulkIndexerItem, config.NumWorkers), @@ -180,12 +180,7 @@ func newAsyncBulkIndexer(logger *zap.Logger, client *elasticsearch.Client, confi pool.wg.Add(numWorkers) for i := 0; i < numWorkers; i++ { - bi, err := docappender.NewBulkIndexer(docappender.BulkIndexerConfig{ - Client: client, - MaxDocumentRetries: maxDocRetry, - Pipeline: config.Pipeline, - RetryOnDocumentStatus: config.Retry.RetryOnStatus, - }) + bi, err := docappender.NewBulkIndexer(bulkIndexerConfig(client, config)) if err != nil { return nil, err } diff --git a/exporter/elasticsearchexporter/config.go b/exporter/elasticsearchexporter/config.go index 072bd725c6fe..fe794d6db430 100644 --- a/exporter/elasticsearchexporter/config.go +++ b/exporter/elasticsearchexporter/config.go @@ -169,9 +169,13 @@ type RetrySettings struct { // Enabled allows users to disable retry without having to comment out all settings. Enabled bool `mapstructure:"enabled"` - // MaxRequests configures how often an HTTP request is retried before it is assumed to be failed. + // MaxRequests configures how often an HTTP request is attempted before it is assumed to be failed. + // Deprecated: use MaxRetries instead. MaxRequests int `mapstructure:"max_requests"` + // MaxRetries configures how many times an HTTP request is retried. + MaxRetries int `mapstructure:"max_retries"` + // InitialInterval configures the initial waiting time if a request failed. InitialInterval time.Duration `mapstructure:"initial_interval"` @@ -273,6 +277,17 @@ func (cfg *Config) Validate() error { // TODO support confighttp.ClientConfig.Compression return errors.New("compression is not currently configurable") } + + if cfg.Retry.MaxRequests != 0 && cfg.Retry.MaxRetries != 0 { + return errors.New("must not specify both retry::max_requests and retry::max_retries") + } + if cfg.Retry.MaxRequests < 0 { + return errors.New("retry::max_requests should be non-negative") + } + if cfg.Retry.MaxRetries < 0 { + return errors.New("retry::max_retries should be non-negative") + } + return nil } @@ -355,11 +370,16 @@ func (cfg *Config) MappingMode() MappingMode { return mappingModes[cfg.Mapping.Mode] } -func logConfigDeprecationWarnings(cfg *Config, logger *zap.Logger) { +func handleDeprecatedConfig(cfg *Config, logger *zap.Logger) { if cfg.Mapping.Dedup != nil { logger.Warn("dedup is deprecated, and is always enabled") } if cfg.Mapping.Dedot && cfg.MappingMode() != MappingECS || !cfg.Mapping.Dedot && cfg.MappingMode() == MappingECS { logger.Warn("dedot has been deprecated: in the future, dedotting will always be performed in ECS mode only") } + if cfg.Retry.MaxRequests != 0 { + cfg.Retry.MaxRetries = cfg.Retry.MaxRequests - 1 + // Do not set cfg.Retry.Enabled = false if cfg.Retry.MaxRequest = 1 to avoid breaking change on behavior + logger.Warn("retry::max_requests has been deprecated, and will be removed in a future version. Use retry::max_retries instead.") + } } diff --git a/exporter/elasticsearchexporter/config_test.go b/exporter/elasticsearchexporter/config_test.go index a27a28ccfe6e..9934dbb7365b 100644 --- a/exporter/elasticsearchexporter/config_test.go +++ b/exporter/elasticsearchexporter/config_test.go @@ -94,7 +94,7 @@ func TestConfig(t *testing.T) { }, Retry: RetrySettings{ Enabled: true, - MaxRequests: 5, + MaxRetries: 5, InitialInterval: 100 * time.Millisecond, MaxInterval: 1 * time.Minute, RetryOnStatus: []int{http.StatusTooManyRequests, http.StatusInternalServerError}, @@ -164,7 +164,7 @@ func TestConfig(t *testing.T) { }, Retry: RetrySettings{ Enabled: true, - MaxRequests: 5, + MaxRetries: 5, InitialInterval: 100 * time.Millisecond, MaxInterval: 1 * time.Minute, RetryOnStatus: []int{http.StatusTooManyRequests, http.StatusInternalServerError}, @@ -234,7 +234,7 @@ func TestConfig(t *testing.T) { }, Retry: RetrySettings{ Enabled: true, - MaxRequests: 5, + MaxRetries: 5, InitialInterval: 100 * time.Millisecond, MaxInterval: 1 * time.Minute, RetryOnStatus: []int{http.StatusTooManyRequests, http.StatusInternalServerError}, @@ -391,6 +391,14 @@ func TestConfig_Validate(t *testing.T) { }), err: `compression is not currently configurable`, }, + "both max_retries and max_requests specified": { + config: withDefaultConfig(func(cfg *Config) { + cfg.Endpoints = []string{"http://test:9200"} + cfg.Retry.MaxRetries = 1 + cfg.Retry.MaxRequests = 1 + }), + err: `must not specify both retry::max_requests and retry::max_retries`, + }, } for name, tt := range tests { diff --git a/exporter/elasticsearchexporter/esclient.go b/exporter/elasticsearchexporter/esclient.go index 23c2d48bb9ef..556718242bbf 100644 --- a/exporter/elasticsearchexporter/esclient.go +++ b/exporter/elasticsearchexporter/esclient.go @@ -90,16 +90,6 @@ func newElasticsearchClient( headers := make(http.Header) headers.Set("User-Agent", userAgent) - // maxRetries configures the maximum number of event publishing attempts, - // including the first send and additional retries. - - maxRetries := config.Retry.MaxRequests - 1 - retryDisabled := !config.Retry.Enabled || maxRetries <= 0 - - if retryDisabled { - maxRetries = 0 - } - // endpoints converts Config.Endpoints, Config.CloudID, // and Config.ClientConfig.Endpoint to a list of addresses. endpoints, err := config.endpoints() @@ -113,6 +103,11 @@ func newElasticsearchClient( logResponseBody: config.LogResponseBody, } + maxRetries := defaultMaxRetries + if config.Retry.MaxRetries != 0 { + maxRetries = config.Retry.MaxRetries + } + return elasticsearch.NewClient(elasticsearch.Config{ Transport: httpClient.Transport, @@ -125,7 +120,7 @@ func newElasticsearchClient( // configure retry behavior RetryOnStatus: config.Retry.RetryOnStatus, - DisableRetry: retryDisabled, + DisableRetry: !config.Retry.Enabled, EnableRetryOnTimeout: config.Retry.Enabled, //RetryOnError: retryOnError, // should be used from esclient version 8 onwards MaxRetries: maxRetries, diff --git a/exporter/elasticsearchexporter/exporter_test.go b/exporter/elasticsearchexporter/exporter_test.go index f1b455e41e1e..9a9b86a5be6f 100644 --- a/exporter/elasticsearchexporter/exporter_test.go +++ b/exporter/elasticsearchexporter/exporter_test.go @@ -540,14 +540,10 @@ func TestExporterLogs(t *testing.T) { t.Run("no retry", func(t *testing.T) { configurations := map[string]func(*Config){ - "max_requests limited": func(cfg *Config) { - cfg.Retry.MaxRequests = 1 - cfg.Retry.InitialInterval = 1 * time.Millisecond - cfg.Retry.MaxInterval = 10 * time.Millisecond - }, "retry.enabled is false": func(cfg *Config) { cfg.Retry.Enabled = false - cfg.Retry.MaxRequests = 10 + cfg.Retry.RetryOnStatus = []int{429} + cfg.Retry.MaxRetries = 10 cfg.Retry.InitialInterval = 1 * time.Millisecond cfg.Retry.MaxInterval = 10 * time.Millisecond }, @@ -557,7 +553,7 @@ func TestExporterLogs(t *testing.T) { "fail http request": func(attempts *atomic.Int64) bulkHandler { return func([]itemRequest) ([]itemResponse, error) { attempts.Add(1) - return nil, &httpTestError{message: "oops"} + return nil, &httpTestError{message: "oops", status: 429} } }, "fail item": func(attempts *atomic.Int64) bulkHandler { diff --git a/exporter/elasticsearchexporter/factory.go b/exporter/elasticsearchexporter/factory.go index 3f48ca1e2ec7..61af38d5cee6 100644 --- a/exporter/elasticsearchexporter/factory.go +++ b/exporter/elasticsearchexporter/factory.go @@ -63,7 +63,7 @@ func createDefaultConfig() component.Config { }, Retry: RetrySettings{ Enabled: true, - MaxRequests: 3, + MaxRetries: 0, // default is set in exporter code InitialInterval: 100 * time.Millisecond, MaxInterval: 1 * time.Minute, RetryOnStatus: []int{ @@ -110,7 +110,7 @@ func createLogsExporter( set.Logger.Warn("index option are deprecated and replaced with logs_index and traces_index.") index = cf.Index } - logConfigDeprecationWarnings(cf, set.Logger) + handleDeprecatedConfig(cf, set.Logger) exporter := newExporter(cf, set, index, cf.LogsDynamicIndex.Enabled) @@ -129,7 +129,7 @@ func createMetricsExporter( cfg component.Config, ) (exporter.Metrics, error) { cf := cfg.(*Config) - logConfigDeprecationWarnings(cf, set.Logger) + handleDeprecatedConfig(cf, set.Logger) exporter := newExporter(cf, set, cf.MetricsIndex, cf.MetricsDynamicIndex.Enabled) @@ -147,7 +147,7 @@ func createTracesExporter(ctx context.Context, cfg component.Config, ) (exporter.Traces, error) { cf := cfg.(*Config) - logConfigDeprecationWarnings(cf, set.Logger) + handleDeprecatedConfig(cf, set.Logger) exporter := newExporter(cf, set, cf.TracesIndex, cf.TracesDynamicIndex.Enabled) diff --git a/exporter/elasticsearchexporter/testdata/config.yaml b/exporter/elasticsearchexporter/testdata/config.yaml index 6f614399b579..d76d300a51c1 100644 --- a/exporter/elasticsearchexporter/testdata/config.yaml +++ b/exporter/elasticsearchexporter/testdata/config.yaml @@ -17,7 +17,7 @@ elasticsearch/trace: flush: bytes: 10485760 retry: - max_requests: 5 + max_retries: 5 retry_on_status: - 429 - 500 @@ -38,7 +38,7 @@ elasticsearch/metric: flush: bytes: 10485760 retry: - max_requests: 5 + max_retries: 5 retry_on_status: - 429 - 500 @@ -61,7 +61,7 @@ elasticsearch/log: flush: bytes: 10485760 retry: - max_requests: 5 + max_retries: 5 retry_on_status: - 429 - 500 From 2c349181a2e7f0eab0f115f92e15753a1fffe787 Mon Sep 17 00:00:00 2001 From: Yifan Yang Date: Fri, 18 Oct 2024 06:12:38 +0800 Subject: [PATCH 05/12] [chore][docs][pkg/stanza] Fix typo in field.md link to entry.md (#35854) #### Description Fixed a typo in the pkg/stanza/docs/types/field.md documentation. The link was incorrectly pointing to ../types/field.md and is now corrected to ../types/entry.md. #### Testing #### Documentation Signed-off-by: YifanYang6 --- pkg/stanza/docs/types/field.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/stanza/docs/types/field.md b/pkg/stanza/docs/types/field.md index 35f54be3bc9b..b6fb313b8c6d 100644 --- a/pkg/stanza/docs/types/field.md +++ b/pkg/stanza/docs/types/field.md @@ -1,6 +1,6 @@ ## Fields -A _Field_ is a reference to a value in a log [entry](../types/field.md). +A _Field_ is a reference to a value in a log [entry](../types/entry.md). Many [operators](../operators/README.md) use fields in their configurations. For example, parsers use fields to specify which value to parse and where to write a new value. From d5c641a07c2bca9d5802ab61710bb72dc3ab2f9e Mon Sep 17 00:00:00 2001 From: Roger Coll Date: Fri, 18 Oct 2024 05:34:21 +0200 Subject: [PATCH 06/12] [exporter/opensearch] chore: remove redundant config Validate call (#35233) **Description:** Configuration validation is done during collector's startup, making it redundant when being called inside component's logic. This PR removes the Validate call done during exporter's constructor. **Link to tracking Issue:** https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/33498 (Last component, will close the issue) **Testing:** Added default config use case (validate error) **Documentation:** --- exporter/opensearchexporter/config_test.go | 7 ++++++ exporter/opensearchexporter/factory.go | 16 +++++-------- exporter/opensearchexporter/factory_test.go | 24 ------------------- .../opensearchexporter/sso_log_exporter.go | 8 ++----- .../opensearchexporter/sso_trace_exporter.go | 8 ++----- .../opensearchexporter/testdata/config.yaml | 2 ++ 6 files changed, 19 insertions(+), 46 deletions(-) diff --git a/exporter/opensearchexporter/config_test.go b/exporter/opensearchexporter/config_test.go index d6840945c588..921ce1a00428 100644 --- a/exporter/opensearchexporter/config_test.go +++ b/exporter/opensearchexporter/config_test.go @@ -44,6 +44,13 @@ func TestLoadConfig(t *testing.T) { expected: sampleCfg, configValidateAssert: assert.NoError, }, + { + id: component.NewIDWithName(metadata.Type, "default"), + expected: withDefaultConfig(), + configValidateAssert: func(t assert.TestingT, err error, _ ...any) bool { + return assert.ErrorContains(t, err, "endpoint must be specified") + }, + }, { id: component.NewIDWithName(metadata.Type, "trace"), expected: &Config{ diff --git a/exporter/opensearchexporter/factory.go b/exporter/opensearchexporter/factory.go index a10073ca04ae..418987167a32 100644 --- a/exporter/opensearchexporter/factory.go +++ b/exporter/opensearchexporter/factory.go @@ -41,12 +41,10 @@ func newDefaultConfig() component.Config { func createTracesExporter(ctx context.Context, set exporter.Settings, - cfg component.Config) (exporter.Traces, error) { + cfg component.Config, +) (exporter.Traces, error) { c := cfg.(*Config) - te, e := newSSOTracesExporter(c, set) - if e != nil { - return nil, e - } + te := newSSOTracesExporter(c, set) return exporterhelper.NewTracesExporter(ctx, set, cfg, te.pushTraceData, @@ -58,12 +56,10 @@ func createTracesExporter(ctx context.Context, func createLogsExporter(ctx context.Context, set exporter.Settings, - cfg component.Config) (exporter.Logs, error) { + cfg component.Config, +) (exporter.Logs, error) { c := cfg.(*Config) - le, e := newLogExporter(c, set) - if e != nil { - return nil, e - } + le := newLogExporter(c, set) return exporterhelper.NewLogsExporter(ctx, set, cfg, le.pushLogData, diff --git a/exporter/opensearchexporter/factory_test.go b/exporter/opensearchexporter/factory_test.go index f64dd285231a..1f3ab8ccc31d 100644 --- a/exporter/opensearchexporter/factory_test.go +++ b/exporter/opensearchexporter/factory_test.go @@ -20,30 +20,6 @@ func TestCreateDefaultConfig(t *testing.T) { assert.NoError(t, componenttest.CheckConfigStruct(cfg)) } -func TestFactory_CreateMetricsExporter_Fail(t *testing.T) { - factory := NewFactory() - cfg := factory.CreateDefaultConfig() - params := exportertest.NewNopSettings() - _, err := factory.CreateMetricsExporter(context.Background(), params, cfg) - require.Error(t, err, "expected an error when creating a metrics exporter") -} - -func TestFactory_CreateTracesExporter_Fail(t *testing.T) { - factory := NewFactory() - cfg := factory.CreateDefaultConfig() - params := exportertest.NewNopSettings() - _, err := factory.CreateTracesExporter(context.Background(), params, cfg) - require.Error(t, err, "expected an error when creating a traces exporter") -} - -func TestFactory_CreateLogsExporter_Fail(t *testing.T) { - factory := NewFactory() - cfg := factory.CreateDefaultConfig() - params := exportertest.NewNopSettings() - _, err := factory.CreateLogsExporter(context.Background(), params, cfg) - require.Error(t, err, "expected an error when creating a logs exporter") -} - func TestFactory_CreateTracesExporter(t *testing.T) { factory := NewFactory() cfg := withDefaultConfig(func(cfg *Config) { diff --git a/exporter/opensearchexporter/sso_log_exporter.go b/exporter/opensearchexporter/sso_log_exporter.go index bfa34d90d1f1..fe3584f3e0f0 100644 --- a/exporter/opensearchexporter/sso_log_exporter.go +++ b/exporter/opensearchexporter/sso_log_exporter.go @@ -23,11 +23,7 @@ type logExporter struct { telemetry component.TelemetrySettings } -func newLogExporter(cfg *Config, set exporter.Settings) (*logExporter, error) { - if err := cfg.Validate(); err != nil { - return nil, err - } - +func newLogExporter(cfg *Config, set exporter.Settings) *logExporter { model := &encodeModel{ dedup: cfg.Dedup, dedot: cfg.Dedot, @@ -45,7 +41,7 @@ func newLogExporter(cfg *Config, set exporter.Settings) (*logExporter, error) { bulkAction: cfg.BulkAction, httpSettings: cfg.ClientConfig, model: model, - }, nil + } } func (l *logExporter) Start(ctx context.Context, host component.Host) error { diff --git a/exporter/opensearchexporter/sso_trace_exporter.go b/exporter/opensearchexporter/sso_trace_exporter.go index fdc6dab67e87..3c0f2e4b2211 100644 --- a/exporter/opensearchexporter/sso_trace_exporter.go +++ b/exporter/opensearchexporter/sso_trace_exporter.go @@ -25,11 +25,7 @@ type ssoTracesExporter struct { telemetry component.TelemetrySettings } -func newSSOTracesExporter(cfg *Config, set exporter.Settings) (*ssoTracesExporter, error) { - if err := cfg.Validate(); err != nil { - return nil, err - } - +func newSSOTracesExporter(cfg *Config, set exporter.Settings) *ssoTracesExporter { model := &encodeModel{ dataset: cfg.Dataset, namespace: cfg.Namespace, @@ -42,7 +38,7 @@ func newSSOTracesExporter(cfg *Config, set exporter.Settings) (*ssoTracesExporte bulkAction: cfg.BulkAction, model: model, httpSettings: cfg.ClientConfig, - }, nil + } } func (s *ssoTracesExporter) Start(ctx context.Context, host component.Host) error { diff --git a/exporter/opensearchexporter/testdata/config.yaml b/exporter/opensearchexporter/testdata/config.yaml index a187af23318e..2f967002ed96 100644 --- a/exporter/opensearchexporter/testdata/config.yaml +++ b/exporter/opensearchexporter/testdata/config.yaml @@ -7,6 +7,8 @@ opensearch: http: endpoint: https://opensearch.example.com:9200 +opensearch/default: + opensearch/empty_namespace: dataset: ngnix namespace: "" From 17afe149c35348922df127d2e9263b60236ec501 Mon Sep 17 00:00:00 2001 From: Antoine Toulme Date: Thu, 17 Oct 2024 22:53:05 -0700 Subject: [PATCH 07/12] [receiver/ntp] add initial implementation (#35850) #### Description Adds initial implementation of ntpreceiver. #### Link to tracking issue #34375 --- receiver/ntpreceiver/config.go | 19 ++++++ receiver/ntpreceiver/config_test.go | 65 +++++++++++++++++++ receiver/ntpreceiver/documentation.md | 4 +- receiver/ntpreceiver/factory.go | 24 ++++++- receiver/ntpreceiver/factory_test.go | 18 +++++ .../ntpreceiver/generated_component_test.go | 51 +++++++++++++++ receiver/ntpreceiver/go.mod | 3 +- receiver/ntpreceiver/go.sum | 2 + .../internal/metadata/generated_metrics.go | 4 +- .../metadata/generated_metrics_test.go | 4 +- receiver/ntpreceiver/metadata.yaml | 8 +-- receiver/ntpreceiver/receiver.go | 46 +++++++++++++ 12 files changed, 233 insertions(+), 15 deletions(-) create mode 100644 receiver/ntpreceiver/config_test.go create mode 100644 receiver/ntpreceiver/factory_test.go create mode 100644 receiver/ntpreceiver/receiver.go diff --git a/receiver/ntpreceiver/config.go b/receiver/ntpreceiver/config.go index b5f78740c498..934d3df77021 100644 --- a/receiver/ntpreceiver/config.go +++ b/receiver/ntpreceiver/config.go @@ -4,6 +4,11 @@ package ntpreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/ntpreceiver" import ( + "errors" + "fmt" + "net" + "time" + "go.opentelemetry.io/collector/receiver/scraperhelper" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/ntpreceiver/internal/metadata" @@ -13,5 +18,19 @@ import ( type Config struct { scraperhelper.ControllerConfig `mapstructure:",squash"` metadata.MetricsBuilderConfig `mapstructure:",squash"` + Version int `mapstructure:"version"` Endpoint string `mapstructure:"endpoint"` } + +func (c *Config) Validate() error { + var errs []error + _, _, err := net.SplitHostPort(c.Endpoint) + if err != nil { + errs = append(errs, err) + } + // respect terms of service https://www.pool.ntp.org/tos.html + if c.ControllerConfig.CollectionInterval < 30*time.Minute { + errs = append(errs, fmt.Errorf("collection interval %v is less than minimum 30m", c.ControllerConfig.CollectionInterval)) + } + return errors.Join(errs...) +} diff --git a/receiver/ntpreceiver/config_test.go b/receiver/ntpreceiver/config_test.go new file mode 100644 index 000000000000..1ba68eec7f26 --- /dev/null +++ b/receiver/ntpreceiver/config_test.go @@ -0,0 +1,65 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ntpreceiver + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/receiver/scraperhelper" +) + +func TestValidate(t *testing.T) { + for _, tt := range []struct { + name string + c *Config + errorExpected string + }{ + { + name: "no host", + c: &Config{ + Version: 4, + Endpoint: "", + ControllerConfig: scraperhelper.ControllerConfig{CollectionInterval: 45 * time.Minute}, + }, + errorExpected: "missing port in address", + }, + { + name: "no port", + c: &Config{ + Version: 4, + Endpoint: "pool.ntp.org", + ControllerConfig: scraperhelper.ControllerConfig{CollectionInterval: 45 * time.Minute}, + }, + errorExpected: "address pool.ntp.org: missing port in address", + }, + { + name: "valid", + c: &Config{ + Version: 4, + Endpoint: "pool.ntp.org:123", + ControllerConfig: scraperhelper.ControllerConfig{CollectionInterval: 45 * time.Minute}, + }, + }, + { + name: "interval too small", + c: &Config{ + Version: 4, + Endpoint: "pool.ntp.org:123", + ControllerConfig: scraperhelper.ControllerConfig{CollectionInterval: 29 * time.Minute}, + }, + errorExpected: "collection interval 29m0s is less than minimum 30m", + }, + } { + t.Run(tt.name, func(t *testing.T) { + err := tt.c.Validate() + if tt.errorExpected == "" { + require.NoError(t, err) + } else { + require.EqualError(t, err, tt.errorExpected) + } + }) + } +} diff --git a/receiver/ntpreceiver/documentation.md b/receiver/ntpreceiver/documentation.md index 182193963883..01463c1de0cf 100644 --- a/receiver/ntpreceiver/documentation.md +++ b/receiver/ntpreceiver/documentation.md @@ -14,11 +14,11 @@ metrics: ### ntp.offset -Time difference between local and NTP server clocks in seconds. +Time difference between local and NTP server clocks | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | -| s | Gauge | Int | +| ns | Gauge | Int | ## Resource Attributes diff --git a/receiver/ntpreceiver/factory.go b/receiver/ntpreceiver/factory.go index 07b3a9a7534f..7ff2d5a4be22 100644 --- a/receiver/ntpreceiver/factory.go +++ b/receiver/ntpreceiver/factory.go @@ -5,6 +5,7 @@ package ntpreceiver // import "github.com/open-telemetry/opentelemetry-collector import ( "context" + "time" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" @@ -23,12 +24,29 @@ func NewFactory() receiver.Factory { } func createDefaultConfig() component.Config { + scraperConfig := scraperhelper.NewDefaultControllerConfig() + scraperConfig.CollectionInterval = 30 * time.Minute return &Config{ - ControllerConfig: scraperhelper.NewDefaultControllerConfig(), + ControllerConfig: scraperConfig, MetricsBuilderConfig: metadata.DefaultMetricsBuilderConfig(), + Version: 4, + Endpoint: "pool.ntp.org:123", } } -func createMetricsReceiver(_ context.Context, _ receiver.Settings, _ component.Config, _ consumer.Metrics) (receiver.Metrics, error) { - return nil, nil +func createMetricsReceiver(_ context.Context, settings receiver.Settings, cfg component.Config, consumer consumer.Metrics) (receiver.Metrics, error) { + rCfg := cfg.(*Config) + mp := newScraper(rCfg, settings) + s, err := scraperhelper.NewScraper(metadata.Type, mp.scrape) + if err != nil { + return nil, err + } + opt := scraperhelper.AddScraper(s) + + return scraperhelper.NewScraperControllerReceiver( + &rCfg.ControllerConfig, + settings, + consumer, + opt, + ) } diff --git a/receiver/ntpreceiver/factory_test.go b/receiver/ntpreceiver/factory_test.go new file mode 100644 index 000000000000..718ced896c44 --- /dev/null +++ b/receiver/ntpreceiver/factory_test.go @@ -0,0 +1,18 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ntpreceiver + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestCreateDefaultConfig(t *testing.T) { + c := createDefaultConfig().(*Config) + require.Equal(t, 4, c.Version) + require.Equal(t, "pool.ntp.org:123", c.Endpoint) + require.Equal(t, 30*time.Minute, c.CollectionInterval) +} diff --git a/receiver/ntpreceiver/generated_component_test.go b/receiver/ntpreceiver/generated_component_test.go index 5e8b22347941..a454a725fcfc 100644 --- a/receiver/ntpreceiver/generated_component_test.go +++ b/receiver/ntpreceiver/generated_component_test.go @@ -3,10 +3,16 @@ package ntpreceiver import ( + "context" "testing" "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/confmap/confmaptest" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/receiver" + "go.opentelemetry.io/collector/receiver/receivertest" ) func TestComponentFactoryType(t *testing.T) { @@ -16,3 +22,48 @@ func TestComponentFactoryType(t *testing.T) { func TestComponentConfigStruct(t *testing.T) { require.NoError(t, componenttest.CheckConfigStruct(NewFactory().CreateDefaultConfig())) } + +func TestComponentLifecycle(t *testing.T) { + factory := NewFactory() + + tests := []struct { + name string + createFn func(ctx context.Context, set receiver.Settings, cfg component.Config) (component.Component, error) + }{ + + { + name: "metrics", + createFn: func(ctx context.Context, set receiver.Settings, cfg component.Config) (component.Component, error) { + return factory.CreateMetrics(ctx, set, cfg, consumertest.NewNop()) + }, + }, + } + + cm, err := confmaptest.LoadConf("metadata.yaml") + require.NoError(t, err) + cfg := factory.CreateDefaultConfig() + sub, err := cm.Sub("tests::config") + require.NoError(t, err) + require.NoError(t, sub.Unmarshal(&cfg)) + + for _, tt := range tests { + t.Run(tt.name+"-shutdown", func(t *testing.T) { + c, err := tt.createFn(context.Background(), receivertest.NewNopSettings(), cfg) + require.NoError(t, err) + err = c.Shutdown(context.Background()) + require.NoError(t, err) + }) + t.Run(tt.name+"-lifecycle", func(t *testing.T) { + firstRcvr, err := tt.createFn(context.Background(), receivertest.NewNopSettings(), cfg) + require.NoError(t, err) + host := componenttest.NewNopHost() + require.NoError(t, err) + require.NoError(t, firstRcvr.Start(context.Background(), host)) + require.NoError(t, firstRcvr.Shutdown(context.Background())) + secondRcvr, err := tt.createFn(context.Background(), receivertest.NewNopSettings(), cfg) + require.NoError(t, err) + require.NoError(t, secondRcvr.Start(context.Background(), host)) + require.NoError(t, secondRcvr.Shutdown(context.Background())) + }) + } +} diff --git a/receiver/ntpreceiver/go.mod b/receiver/ntpreceiver/go.mod index 71d67ef93dd2..2d20374b13e1 100644 --- a/receiver/ntpreceiver/go.mod +++ b/receiver/ntpreceiver/go.mod @@ -3,11 +3,13 @@ module github.com/open-telemetry/opentelemetry-collector-contrib/receiver/ntprec go 1.22.0 require ( + github.com/beevik/ntp v1.4.3 github.com/google/go-cmp v0.6.0 github.com/stretchr/testify v1.9.0 go.opentelemetry.io/collector/component v0.111.1-0.20241008154146-ea48c09c31ae go.opentelemetry.io/collector/confmap v1.17.1-0.20241008154146-ea48c09c31ae go.opentelemetry.io/collector/consumer v0.111.1-0.20241008154146-ea48c09c31ae + go.opentelemetry.io/collector/consumer/consumertest v0.111.1-0.20241008154146-ea48c09c31ae go.opentelemetry.io/collector/filter v0.111.1-0.20241008154146-ea48c09c31ae go.opentelemetry.io/collector/pdata v1.17.1-0.20241008154146-ea48c09c31ae go.opentelemetry.io/collector/receiver v0.111.1-0.20241008154146-ea48c09c31ae @@ -34,7 +36,6 @@ require ( github.com/rogpeppe/go-internal v1.12.0 // indirect go.opentelemetry.io/collector/config/configtelemetry v0.111.1-0.20241008154146-ea48c09c31ae // indirect go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.1-0.20241008154146-ea48c09c31ae // indirect - go.opentelemetry.io/collector/consumer/consumertest v0.111.1-0.20241008154146-ea48c09c31ae // indirect go.opentelemetry.io/collector/internal/globalsignal v0.111.1-0.20241008154146-ea48c09c31ae // indirect go.opentelemetry.io/collector/pdata/pprofile v0.111.1-0.20241008154146-ea48c09c31ae // indirect go.opentelemetry.io/collector/pipeline v0.111.1-0.20241008154146-ea48c09c31ae // indirect diff --git a/receiver/ntpreceiver/go.sum b/receiver/ntpreceiver/go.sum index 65f0a94a89a6..7c55d9505af5 100644 --- a/receiver/ntpreceiver/go.sum +++ b/receiver/ntpreceiver/go.sum @@ -1,3 +1,5 @@ +github.com/beevik/ntp v1.4.3 h1:PlbTvE5NNy4QHmA4Mg57n7mcFTmr1W1j3gcK7L1lqho= +github.com/beevik/ntp v1.4.3/go.mod h1:Unr8Zg+2dRn7d8bHFuehIMSvvUYssHMxW3Q5Nx4RW5Q= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= diff --git a/receiver/ntpreceiver/internal/metadata/generated_metrics.go b/receiver/ntpreceiver/internal/metadata/generated_metrics.go index afc023c251d6..0b77e5ddfea6 100644 --- a/receiver/ntpreceiver/internal/metadata/generated_metrics.go +++ b/receiver/ntpreceiver/internal/metadata/generated_metrics.go @@ -21,8 +21,8 @@ type metricNtpOffset struct { // init fills ntp.offset metric with initial data. func (m *metricNtpOffset) init() { m.data.SetName("ntp.offset") - m.data.SetDescription("Time difference between local and NTP server clocks in seconds.") - m.data.SetUnit("s") + m.data.SetDescription("Time difference between local and NTP server clocks") + m.data.SetUnit("ns") m.data.SetEmptyGauge() } diff --git a/receiver/ntpreceiver/internal/metadata/generated_metrics_test.go b/receiver/ntpreceiver/internal/metadata/generated_metrics_test.go index 4ea82d6f71ac..4f92fc0d4c33 100644 --- a/receiver/ntpreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/ntpreceiver/internal/metadata/generated_metrics_test.go @@ -101,8 +101,8 @@ func TestMetricsBuilder(t *testing.T) { validatedMetrics["ntp.offset"] = true assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Time difference between local and NTP server clocks in seconds.", ms.At(i).Description()) - assert.Equal(t, "s", ms.At(i).Unit()) + assert.Equal(t, "Time difference between local and NTP server clocks", ms.At(i).Description()) + assert.Equal(t, "ns", ms.At(i).Unit()) dp := ms.At(i).Gauge().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) diff --git a/receiver/ntpreceiver/metadata.yaml b/receiver/ntpreceiver/metadata.yaml index c6536b0a05c9..f54de24bafdd 100644 --- a/receiver/ntpreceiver/metadata.yaml +++ b/receiver/ntpreceiver/metadata.yaml @@ -16,12 +16,10 @@ resource_attributes: metrics: ntp.offset: - description: Time difference between local and NTP server clocks in seconds. - unit: "s" + description: Time difference between local and NTP server clocks + unit: "ns" gauge: value_type: int enabled: true -tests: - skip_lifecycle: true - skip_shutdown: true \ No newline at end of file +tests: \ No newline at end of file diff --git a/receiver/ntpreceiver/receiver.go b/receiver/ntpreceiver/receiver.go new file mode 100644 index 000000000000..d52605431b46 --- /dev/null +++ b/receiver/ntpreceiver/receiver.go @@ -0,0 +1,46 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ntpreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/ntpreceiver" + +import ( + "context" + "time" + + "github.com/beevik/ntp" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/receiver" + "go.uber.org/zap" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/ntpreceiver/internal/metadata" +) + +type scraper struct { + logger *zap.Logger + mb *metadata.MetricsBuilder + version int + timeout time.Duration + endpoint string +} + +func (s *scraper) scrape(context.Context) (pmetric.Metrics, error) { + options := ntp.QueryOptions{Version: s.version, Timeout: s.timeout} + response, err := ntp.QueryWithOptions(s.endpoint, options) + if err != nil { + return pmetric.Metrics{}, err + } + s.mb.RecordNtpOffsetDataPoint(pcommon.NewTimestampFromTime(time.Now()), response.ClockOffset.Nanoseconds()) + s.mb.NewResourceBuilder().SetNtpHost(s.endpoint) + return s.mb.Emit(), nil +} + +func newScraper(cfg *Config, settings receiver.Settings) *scraper { + return &scraper{ + logger: settings.TelemetrySettings.Logger, + mb: metadata.NewMetricsBuilder(cfg.MetricsBuilderConfig, settings), + version: cfg.Version, + timeout: cfg.ControllerConfig.Timeout, + endpoint: cfg.Endpoint, + } +} From 1fab9bbe819594e0b2ae50ae9e6aed057031f7e5 Mon Sep 17 00:00:00 2001 From: Carson Ip Date: Fri, 18 Oct 2024 07:18:24 +0100 Subject: [PATCH 08/12] [exporter/elasticsearch] Make OTel mapping mode send to data streams only (#35839) #### Description Make OTel mapping mode use RequireDataStream in docappender, which means it will only send to data streams. This prevents auto creating regular indices in OTel mapping mode due to a race condition in Elasticsearch where otel-data index templates are not ready. #### Link to tracking issue #### Testing #### Documentation --- ...xporter_otel-mode-require-data-stream.yaml | 27 +++++++++ exporter/elasticsearchexporter/bulkindexer.go | 1 + .../elasticsearchexporter/bulkindexer_test.go | 55 +++++++++++++++++++ 3 files changed, 83 insertions(+) create mode 100644 .chloggen/elasticsearchexporter_otel-mode-require-data-stream.yaml diff --git a/.chloggen/elasticsearchexporter_otel-mode-require-data-stream.yaml b/.chloggen/elasticsearchexporter_otel-mode-require-data-stream.yaml new file mode 100644 index 000000000000..e7aebbbff1b9 --- /dev/null +++ b/.chloggen/elasticsearchexporter_otel-mode-require-data-stream.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: bug_fix + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: elasticsearchexporter + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Make OTel mapping mode send to data streams only + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [35839] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: This prevents auto creating regular indices in OTel mapping mode due to a race condition in Elasticsearch where otel-data index templates are not ready. + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/exporter/elasticsearchexporter/bulkindexer.go b/exporter/elasticsearchexporter/bulkindexer.go index 62bc329a26f8..21b48814914d 100644 --- a/exporter/elasticsearchexporter/bulkindexer.go +++ b/exporter/elasticsearchexporter/bulkindexer.go @@ -73,6 +73,7 @@ func bulkIndexerConfig(client *elasticsearch.Client, config *Config) docappender MaxDocumentRetries: maxDocRetries, Pipeline: config.Pipeline, RetryOnDocumentStatus: config.Retry.RetryOnStatus, + RequireDataStream: config.MappingMode() == MappingOTel, } } diff --git a/exporter/elasticsearchexporter/bulkindexer_test.go b/exporter/elasticsearchexporter/bulkindexer_test.go index b417942734d6..7a75c6f5a0f1 100644 --- a/exporter/elasticsearchexporter/bulkindexer_test.go +++ b/exporter/elasticsearchexporter/bulkindexer_test.go @@ -115,6 +115,61 @@ func TestAsyncBulkIndexer_flush(t *testing.T) { } } +func TestAsyncBulkIndexer_requireDataStream(t *testing.T) { + tests := []struct { + name string + config Config + wantRequireDataStream bool + }{ + { + name: "ecs", + config: Config{ + NumWorkers: 1, + Mapping: MappingsSettings{Mode: MappingECS.String()}, + }, + wantRequireDataStream: false, + }, + { + name: "otel", + config: Config{ + NumWorkers: 1, + Mapping: MappingsSettings{Mode: MappingOTel.String()}, + }, + wantRequireDataStream: true, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + requireDataStreamCh := make(chan bool, 1) + client, err := elasticsearch.NewClient(elasticsearch.Config{Transport: &mockTransport{ + RoundTripFunc: func(r *http.Request) (*http.Response, error) { + if r.URL.Path == "/_bulk" { + requireDataStreamCh <- r.URL.Query().Get("require_data_stream") == "true" + } + return &http.Response{ + Header: http.Header{"X-Elastic-Product": []string{"Elasticsearch"}}, + Body: io.NopCloser(strings.NewReader(successResp)), + }, nil + }, + }}) + require.NoError(t, err) + + bulkIndexer, err := newAsyncBulkIndexer(zap.NewNop(), client, &tt.config) + require.NoError(t, err) + session, err := bulkIndexer.StartSession(context.Background()) + require.NoError(t, err) + + assert.NoError(t, session.Add(context.Background(), "foo", strings.NewReader(`{"foo": "bar"}`), nil)) + assert.NoError(t, bulkIndexer.Close(context.Background())) + + assert.Equal(t, tt.wantRequireDataStream, <-requireDataStreamCh) + }) + } +} + func TestAsyncBulkIndexer_flush_error(t *testing.T) { tests := []struct { name string From e6936f2736b8df672545ee9278f1c61517a911e4 Mon Sep 17 00:00:00 2001 From: VihasMakwana <121151420+VihasMakwana@users.noreply.github.com> Date: Fri, 18 Oct 2024 15:03:41 +0530 Subject: [PATCH 09/12] [chore][receiver/loki] follow receiver contract (#35327) **Description:** Follow receiver contract for `loki`. This also includes an internal errorutil package which will be used by other network receivers as well. **Link to tracking Issue:** https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/5909 **Testing:** Added --- internal/coreinternal/errorutil/grpc.go | 25 ++++++++ internal/coreinternal/go.mod | 2 +- receiver/lokireceiver/go.mod | 2 +- receiver/lokireceiver/loki.go | 8 +++ receiver/lokireceiver/loki_test.go | 76 +++++++++++++++++++++++++ 5 files changed, 111 insertions(+), 2 deletions(-) create mode 100644 internal/coreinternal/errorutil/grpc.go diff --git a/internal/coreinternal/errorutil/grpc.go b/internal/coreinternal/errorutil/grpc.go new file mode 100644 index 000000000000..08b75990f0fc --- /dev/null +++ b/internal/coreinternal/errorutil/grpc.go @@ -0,0 +1,25 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package errorutil // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/errorutil" + +import ( + "go.opentelemetry.io/collector/consumer/consumererror" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func GrpcError(err error) error { + s, ok := status.FromError(err) + if !ok { + // Default to a retryable error + // https://github.com/open-telemetry/opentelemetry-proto/blob/main/docs/specification.md#failures + code := codes.Unavailable + if consumererror.IsPermanent(err) { + // non-retryable error + code = codes.Unknown + } + s = status.New(code, err.Error()) + } + return s.Err() +} diff --git a/internal/coreinternal/go.mod b/internal/coreinternal/go.mod index ed4f490e9bcb..8675c13635fe 100644 --- a/internal/coreinternal/go.mod +++ b/internal/coreinternal/go.mod @@ -23,6 +23,7 @@ require ( go.uber.org/multierr v1.11.0 go.uber.org/zap v1.27.0 golang.org/x/text v0.19.0 + google.golang.org/grpc v1.67.1 ) require ( @@ -87,7 +88,6 @@ require ( golang.org/x/sys v0.25.0 // indirect golang.org/x/tools v0.23.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd // indirect - google.golang.org/grpc v1.67.1 // indirect google.golang.org/protobuf v1.35.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/receiver/lokireceiver/go.mod b/receiver/lokireceiver/go.mod index 1c5b291f8fc3..d5be9e59e979 100644 --- a/receiver/lokireceiver/go.mod +++ b/receiver/lokireceiver/go.mod @@ -9,7 +9,7 @@ require ( github.com/grafana/loki/pkg/push v0.0.0-20240514112848-a1b1eeb09583 github.com/json-iterator/go v1.1.12 github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.111.0 - github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.111.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.111.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.111.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.111.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/loki v0.111.0 diff --git a/receiver/lokireceiver/loki.go b/receiver/lokireceiver/loki.go index 1d4cc5c3f0c1..a994c0ae121e 100644 --- a/receiver/lokireceiver/loki.go +++ b/receiver/lokireceiver/loki.go @@ -21,6 +21,7 @@ import ( "go.uber.org/zap" "google.golang.org/grpc" + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/errorutil" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/loki" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/lokireceiver/internal" ) @@ -163,6 +164,9 @@ func (r *lokiReceiver) Push(ctx context.Context, pushRequest *push.PushRequest) logRecordCount := logs.LogRecordCount() err = r.nextConsumer.ConsumeLogs(ctx, logs) r.obsrepGRPC.EndLogsOp(ctx, "protobuf", logRecordCount, err) + if err != nil { + return &push.PushResponse{}, errorutil.GrpcError(err) + } return &push.PushResponse{}, nil } @@ -219,6 +223,10 @@ func handleLogs(resp http.ResponseWriter, req *http.Request, r *lokiReceiver) { logRecordCount := logs.LogRecordCount() err = r.nextConsumer.ConsumeLogs(ctx, logs) r.obsrepHTTP.EndLogsOp(ctx, "json", logRecordCount, err) + if err != nil { + errorutil.HTTPError(resp, err) + return + } resp.WriteHeader(http.StatusNoContent) } diff --git a/receiver/lokireceiver/loki_test.go b/receiver/lokireceiver/loki_test.go index bf208b780d39..1b17bdc11314 100644 --- a/receiver/lokireceiver/loki_test.go +++ b/receiver/lokireceiver/loki_test.go @@ -8,6 +8,7 @@ import ( "compress/gzip" "compress/zlib" "context" + "errors" "fmt" "net" "net/http" @@ -23,6 +24,7 @@ import ( "go.opentelemetry.io/collector/config/configgrpc" "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/config/confignet" + "go.opentelemetry.io/collector/consumer/consumererror" "go.opentelemetry.io/collector/consumer/consumertest" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/plog" @@ -362,6 +364,80 @@ func TestSendingPushRequestToGRPCEndpoint(t *testing.T) { } } +func TestExpectedStatus(t *testing.T) { + + testcases := []struct { + name string + err error + expectedGrpcError string + expectedHTTPError string + }{ + { + name: "permanent-error", + err: consumererror.NewPermanent(errors.New("permanent")), + expectedGrpcError: "rpc error: code = Unknown desc = Permanent error: permanent", + expectedHTTPError: "failed to upload logs; HTTP status code: 400", + }, + { + name: "non-permanent-error", + err: errors.New("non-permanent"), + expectedGrpcError: "rpc error: code = Unavailable desc = non-permanent", + expectedHTTPError: "failed to upload logs; HTTP status code: 503", + }, + } + for _, tt := range testcases { + t.Run(tt.name, func(t *testing.T) { + httpAddr := testutil.GetAvailableLocalAddress(t) + config := &Config{ + Protocols: Protocols{ + GRPC: &configgrpc.ServerConfig{ + NetAddr: confignet.AddrConfig{ + Endpoint: testutil.GetAvailableLocalAddress(t), + Transport: confignet.TransportTypeTCP, + }, + }, + HTTP: &confighttp.ServerConfig{ + Endpoint: httpAddr, + }, + }, + KeepTimestamp: true, + } + + consumer := consumertest.NewErr(tt.err) + lr, err := newLokiReceiver(config, consumer, receivertest.NewNopSettings()) + require.NoError(t, err) + + require.NoError(t, lr.Start(context.Background(), componenttest.NewNopHost())) + t.Cleanup(func() { require.NoError(t, lr.Shutdown(context.Background())) }) + conn, err := grpc.NewClient(config.GRPC.NetAddr.Endpoint, grpc.WithTransportCredentials(insecure.NewCredentials())) + require.NoError(t, err) + defer conn.Close() + grpcClient := push.NewPusherClient(conn) + + body := &push.PushRequest{ + Streams: []push.Stream{ + { + Labels: "{foo=\"bar\"}", + Entries: []push.Entry{ + { + Timestamp: time.Unix(0, 1676888496000000000), + Line: "logline 1", + }, + }, + }, + }, + } + + _, err = grpcClient.Push(context.Background(), body) + require.EqualError(t, err, tt.expectedGrpcError) + + _, port, _ := net.SplitHostPort(httpAddr) + collectorAddr := fmt.Sprintf("http://localhost:%s/loki/api/v1/push", port) + require.EqualError(t, sendToCollector(collectorAddr, "application/json", "", []byte(`{"streams": [{"stream": {"foo": "bar"},"values": [[ "1676888496000000000", "logline 1" ]]}]}`)), tt.expectedHTTPError) + }) + } +} + type Log struct { Timestamp int64 Body pcommon.Value From 2b9ec0282937fc7aea8db948fe794a1273f322c5 Mon Sep 17 00:00:00 2001 From: Andrzej Stencel Date: Fri, 18 Oct 2024 13:35:41 +0200 Subject: [PATCH 10/12] [chore][receiver/filelog] docs: fix docs on setting `header` to `false` (#35864) Setting `heaader` to `false` results in error when starting the collector. The value of `header` must either be a map or `null`. --- receiver/filelogreceiver/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/receiver/filelogreceiver/README.md b/receiver/filelogreceiver/README.md index c6c4a5fe9c4a..c34df2508a0d 100644 --- a/receiver/filelogreceiver/README.md +++ b/receiver/filelogreceiver/README.md @@ -45,7 +45,7 @@ Tails and parses logs from files. | `resource` | {} | A map of `key: value` pairs to add to the entry's resource. | | `operators` | [] | An array of [operators](../../pkg/stanza/docs/operators/README.md#what-operators-are-available). See below for more details. | | `storage` | none | The ID of a storage extension to be used to store file offsets. File offsets allow the receiver to pick up where it left off in the case of a collector restart. If no storage extension is used, the receiver will manage offsets in memory only. | -| `header` | nil | Specifies options for parsing header metadata. Requires that the `filelog.allowHeaderMetadataParsing` feature gate is enabled. See below for details. Must be `false` when `start_at` is set to `end`. | +| `header` | nil | Specifies options for parsing header metadata. Requires that the `filelog.allowHeaderMetadataParsing` feature gate is enabled. See below for details. Must not be set when `start_at` is set to `end`. | | `header.pattern` | required for header metadata parsing | A regex that matches every header line. | | `header.metadata_operators` | required for header metadata parsing | A list of operators used to parse metadata from the header. | | `retry_on_failure.enabled` | `false` | If `true`, the receiver will pause reading a file and attempt to resend the current batch of logs if it encounters an error from downstream components. | From 633ed51c3ca552c255130e2387ea7382433fd61e Mon Sep 17 00:00:00 2001 From: sh0rez Date: Fri, 18 Oct 2024 17:32:52 +0200 Subject: [PATCH 11/12] processor/deltatocumulative: golden tests (#35562) **Description:** Rewrites most tests to use a pkg/golden like approach: - tests are directories under `testdata/` - tests consist of multiple stages, ran in series - each stage is a [txtar](https://pkg.go.dev/golang.org/x/tools/txtar), containing: - `in`: pmetric yaml of the `ConsumeMetrics` input - `out`: expected output on the sink after calling `ConsumeMetrics` The multi-stage setup allows to exercise multiple Requests in one test. Using txtar allows to co-locate common yaml easily. I plan to add metric assertions in there later too. **Link to tracking Issue:** none **Testing:** Tests were rewritten **Documentation:** not needed --- processor/deltatocumulativeprocessor/go.mod | 8 +- processor/deltatocumulativeprocessor/go.sum | 10 +- .../internal/testar/decode.go | 112 ++++++ .../internal/testar/read_test.go | 58 +++ .../internal/testdata/random/random.go | 24 +- .../deltatocumulativeprocessor/linear.go | 2 +- .../processor_test.go | 336 ++++------------ .../testdata/limit/1.test | 47 +++ .../testdata/limit/2.test | 49 +++ .../testdata/limit/config.yaml | 1 + .../testdata/notemporality-ignored/1.test | 57 +++ .../testdata/notemporality-ignored/in.yaml | 27 -- .../testdata/notemporality-ignored/out.yaml | 27 -- .../testdata/timestamps/1.test | 36 ++ .../testdata/tracking/1.test | 376 ++++++++++++++++++ 15 files changed, 841 insertions(+), 329 deletions(-) create mode 100644 processor/deltatocumulativeprocessor/internal/testar/decode.go create mode 100644 processor/deltatocumulativeprocessor/internal/testar/read_test.go create mode 100644 processor/deltatocumulativeprocessor/testdata/limit/1.test create mode 100644 processor/deltatocumulativeprocessor/testdata/limit/2.test create mode 100644 processor/deltatocumulativeprocessor/testdata/limit/config.yaml create mode 100644 processor/deltatocumulativeprocessor/testdata/notemporality-ignored/1.test delete mode 100644 processor/deltatocumulativeprocessor/testdata/notemporality-ignored/in.yaml delete mode 100644 processor/deltatocumulativeprocessor/testdata/notemporality-ignored/out.yaml create mode 100644 processor/deltatocumulativeprocessor/testdata/timestamps/1.test create mode 100644 processor/deltatocumulativeprocessor/testdata/tracking/1.test diff --git a/processor/deltatocumulativeprocessor/go.mod b/processor/deltatocumulativeprocessor/go.mod index 1c4e225bd097..f6e8f201aa3e 100644 --- a/processor/deltatocumulativeprocessor/go.mod +++ b/processor/deltatocumulativeprocessor/go.mod @@ -5,7 +5,6 @@ go 1.22.0 require ( github.com/google/go-cmp v0.6.0 github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.111.0 - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.111.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.111.0 github.com/stretchr/testify v1.9.0 go.opentelemetry.io/collector/component v0.111.1-0.20241008154146-ea48c09c31ae @@ -21,6 +20,8 @@ require ( go.opentelemetry.io/otel/trace v1.31.0 go.uber.org/goleak v1.3.0 go.uber.org/zap v1.27.0 + golang.org/x/tools v0.25.0 + gopkg.in/yaml.v3 v3.0.1 ) require ( @@ -50,13 +51,12 @@ require ( go.opentelemetry.io/collector/processor/processorprofiles v0.111.1-0.20241008154146-ea48c09c31ae // indirect go.opentelemetry.io/otel/sdk v1.31.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/net v0.28.0 // indirect + golang.org/x/net v0.29.0 // indirect golang.org/x/sys v0.26.0 // indirect - golang.org/x/text v0.17.0 // indirect + golang.org/x/text v0.18.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd // indirect google.golang.org/grpc v1.67.1 // indirect google.golang.org/protobuf v1.35.1 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect ) replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil => ../../pkg/pdatautil diff --git a/processor/deltatocumulativeprocessor/go.sum b/processor/deltatocumulativeprocessor/go.sum index 0b4ad58ccdf4..a60aff0b0442 100644 --- a/processor/deltatocumulativeprocessor/go.sum +++ b/processor/deltatocumulativeprocessor/go.sum @@ -103,8 +103,8 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= -golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -115,12 +115,14 @@ golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= +golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/processor/deltatocumulativeprocessor/internal/testar/decode.go b/processor/deltatocumulativeprocessor/internal/testar/decode.go new file mode 100644 index 000000000000..5141df958fbd --- /dev/null +++ b/processor/deltatocumulativeprocessor/internal/testar/decode.go @@ -0,0 +1,112 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// testar is a textual archive (based on [golang.org/x/tools/txtar]) to define +// test fixtures. +// +// Archive data is read into struct fields, optionally calling parsers for field +// types other than string or []byte: +// +// type T struct { +// Literal string `testar:"file1"` +// Parsed int `testar:"file2,myparser"` +// } +// +// var into T +// err := Read(data, &into) +// +// See [Read] and [Parser] for examples. +package testar // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/testar" + +import ( + "fmt" + "io/fs" + "reflect" + "strings" + + "golang.org/x/tools/txtar" +) + +// Read archive data into the fields of struct *T +func Read[T any](data []byte, into *T, parsers ...Format) error { + ar := txtar.Parse(data) + return Decode(ar, into, parsers...) +} + +func ReadFile[T any](file string, into *T, parsers ...Format) error { + ar, err := txtar.ParseFile(file) + if err != nil { + return err + } + return Decode(ar, into, parsers...) +} + +func Decode[T any](ar *txtar.Archive, into *T, parsers ...Format) error { + arfs, err := txtar.FS(ar) + if err != nil { + return err + } + + pv := reflect.ValueOf(into) + if pv.Kind() != reflect.Pointer { + return fmt.Errorf("into must be pointer") + } + sv := pv.Elem() + + for i := range sv.NumField() { + f := sv.Type().Field(i) + tag := f.Tag.Get("testar") + if tag == "" { + continue + } + + name, format, _ := strings.Cut(tag, ",") + data, err := fs.ReadFile(arfs, name) + if err != nil { + return fmt.Errorf("%s: %w", name, err) + } + + err = formats(parsers).Parse(format, data, sv.Field(i).Addr().Interface()) + if err != nil { + return fmt.Errorf("%s: %w", name, err) + } + } + return nil +} + +type formats []Format + +func (fmts formats) Parse(name string, data []byte, into any) error { + if name == "" { + return LiteralParser(data, into) + } + + for _, f := range fmts { + if f.name == name { + return f.parse(data, into) + } + } + return fmt.Errorf("no such format: %q", name) +} + +type Format struct { + name string + parse func(file []byte, into any) error +} + +func Parser(name string, fn func(data []byte, into any) error) Format { + return Format{name: name, parse: fn} +} + +// LiteralParser sets data unaltered into a []byte or string +func LiteralParser(data []byte, into any) error { + switch ptr := into.(type) { + case *[]byte: + *ptr = append([]byte(nil), data...) + case *string: + *ptr = string(data) + default: + return fmt.Errorf("pass *[]byte, *string or use a parser. got %T", into) + } + return nil +} diff --git a/processor/deltatocumulativeprocessor/internal/testar/read_test.go b/processor/deltatocumulativeprocessor/internal/testar/read_test.go new file mode 100644 index 000000000000..6279ac35527e --- /dev/null +++ b/processor/deltatocumulativeprocessor/internal/testar/read_test.go @@ -0,0 +1,58 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package testar + +import ( + "fmt" + "strconv" + "strings" +) + +func ExampleRead() { + data := []byte(` +-- foo -- +hello + +-- bar -- +world +`) + + var into struct { + Foo string `testar:"foo"` + Bar []byte `testar:"bar"` + } + + _ = Read(data, &into) + fmt.Printf("foo: %T(%q)\n", into.Foo, into.Foo) + fmt.Printf("bar: %T(%q)\n", into.Bar, into.Bar) + + // Output: + // foo: string("hello\n\n") + // bar: []uint8("world\n") +} + +func ExampleParser() { + data := []byte(` +-- foobar -- +377927 +`) + + var into struct { + Foobar int `testar:"foobar,atoi"` + } + + _ = Read(data, &into, Parser("atoi", func(file []byte, into any) error { + n, err := strconv.Atoi(strings.TrimSpace(string(file))) + if err != nil { + return err + } + *(into.(*int)) = n + return nil + })) + + fmt.Printf("foobar: %T(%d)\n", into.Foobar, into.Foobar) + + // Output: + // foobar: int(377927) +} diff --git a/processor/deltatocumulativeprocessor/internal/testdata/random/random.go b/processor/deltatocumulativeprocessor/internal/testdata/random/random.go index e205fa358882..ca0642cf8795 100644 --- a/processor/deltatocumulativeprocessor/internal/testdata/random/random.go +++ b/processor/deltatocumulativeprocessor/internal/testdata/random/random.go @@ -68,23 +68,35 @@ func (m Metric[P]) Stream() (streams.Ident, P) { } func Resource() pcommon.Resource { + return ResourceN(10) +} + +func ResourceN(n int) pcommon.Resource { res := pcommon.NewResource() - for i := 0; i < 10; i++ { - res.Attributes().PutStr(randStr(), randStr()) - } + Attributes(n).MoveTo(res.Attributes()) return res } func Scope() pcommon.InstrumentationScope { + return ScopeN(3) +} + +func ScopeN(n int) pcommon.InstrumentationScope { scope := pcommon.NewInstrumentationScope() scope.SetName(randStr()) scope.SetVersion(randStr()) - for i := 0; i < 3; i++ { - scope.Attributes().PutStr(randStr(), randStr()) - } + Attributes(n).MoveTo(scope.Attributes()) return scope } +func Attributes(n int) pcommon.Map { + m := pcommon.NewMap() + for i := 0; i < n; i++ { + m.PutStr(randStr(), randStr()) + } + return m +} + func randStr() string { return strconv.FormatInt(randInt(), 16) } diff --git a/processor/deltatocumulativeprocessor/linear.go b/processor/deltatocumulativeprocessor/linear.go index b333ab851627..34edcd377eb2 100644 --- a/processor/deltatocumulativeprocessor/linear.go +++ b/processor/deltatocumulativeprocessor/linear.go @@ -110,7 +110,7 @@ func (p *Linear) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error { acc, err := func() (data.Number, error) { if !ok { // new stream: there is no existing aggregation, so start new with current dp - return dp, nil + return dp.Clone(), nil } // tracked stream: add incoming delta dp to existing cumulative aggregation return acc, delta.AccumulateInto(acc, dp) diff --git a/processor/deltatocumulativeprocessor/processor_test.go b/processor/deltatocumulativeprocessor/processor_test.go index a2cbf5957ac6..12d4452e621f 100644 --- a/processor/deltatocumulativeprocessor/processor_test.go +++ b/processor/deltatocumulativeprocessor/processor_test.go @@ -5,308 +5,124 @@ package deltatocumulativeprocessor import ( "context" + "encoding/json" + "errors" + "io/fs" "math" - "math/rand" + "os" "path/filepath" - "strconv" "testing" - "time" - "github.com/google/go-cmp/cmp/cmpopts" "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/confmap/confmaptest" "go.opentelemetry.io/collector/consumer/consumertest" - "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/processor" "go.opentelemetry.io/collector/processor/processortest" "go.opentelemetry.io/otel/sdk/metric/metricdata" + "gopkg.in/yaml.v3" - "github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/identity" - "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden" - "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/datatest/compare" - "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/metrics" - "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/streams" - "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/testdata/random" + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/testar" ) -func setup(t *testing.T, cfg *Config) (processor.Metrics, *consumertest.MetricsSink) { - t.Helper() - - next := &consumertest.MetricsSink{} - if cfg == nil { - cfg = &Config{MaxStale: 0, MaxStreams: math.MaxInt} - } - - proc, err := NewFactory().CreateMetrics( - context.Background(), - processortest.NewNopSettings(), - cfg, - next, - ) +func TestProcessor(t *testing.T) { + fis, err := os.ReadDir("testdata") require.NoError(t, err) - return proc, next -} - -// TestAccumulation verifies stream identification works correctly by writing -// 100 random dps spread across 10 different streams. -// Processor output is compared against a manual aggregation on a per-stream basis. -// -// Uses Sum datatype for testing, as we are not testing actual aggregation (see -// internal/data for tests), but proper stream separation -func TestAccumulation(t *testing.T) { - proc, sink := setup(t, nil) - - sum := random.Sum() + for _, fi := range fis { + if !fi.IsDir() { + continue + } - // create 10 distinct streams - const N = 10 - sbs := make([]SumBuilder, N) - for i := range sbs { - _, base := sum.Stream() - sbs[i] = SumBuilder{Metric: sum, base: base} - } + type Stage struct { + In pmetric.Metrics `testar:"in,pmetric"` + Out pmetric.Metrics `testar:"out,pmetric"` + } - // init manual aggregation state - want := make(map[identity.Stream]data.Number) - for _, s := range sbs { - id := s.id(pmetric.AggregationTemporalityCumulative) - want[id] = s.point(0, 0, 0) - } + read := func(file string, into *Stage) error { + return testar.ReadFile(file, into, + testar.Parser("pmetric", unmarshalMetrics), + ) + } - for i := 0; i < 100; i++ { - s := sbs[rand.Intn(N)] + dir := fi.Name() + t.Run(dir, func(t *testing.T) { + file := func(f string) string { + return filepath.Join("testdata", dir, f) + } - v := int64(rand.Intn(255)) - ts := pcommon.Timestamp(i) + ctx := context.Background() + cfg := config(t, file("config.yaml")) + proc, sink := setup(t, cfg) - // write to processor - in := s.delta(s.point(0, ts, v)) - rms := s.resourceMetrics(in) - err := proc.ConsumeMetrics(context.Background(), rms) - require.NoError(t, err) + stages, _ := filepath.Glob(file("*.test")) + for _, file := range stages { + var stage Stage + err := read(file, &stage) + require.NoError(t, err) - // aggregate manually - wantv := want[s.id(pmetric.AggregationTemporalityCumulative)] - wantv.SetIntValue(wantv.IntValue() + v) - wantv.SetTimestamp(ts) - } + sink.Reset() + err = proc.ConsumeMetrics(ctx, stage.In) + require.NoError(t, err) - // get the final processor output for each stream - got := make(map[identity.Stream]data.Number) - for _, md := range sink.AllMetrics() { - metrics.All(md)(func(m metrics.Metric) bool { - sum := metrics.Sum(m) - streams.Datapoints(sum)(func(id identity.Stream, dp data.Number) bool { - got[id] = dp - return true - }) - return true + out := []pmetric.Metrics{stage.Out} + if diff := compare.Diff(out, sink.AllMetrics()); diff != "" { + t.Fatal(diff) + } + } }) - } - sort := cmpopts.SortMaps(func(a, b identity.Stream) bool { - return a.Hash().Sum64() < b.Hash().Sum64() - }) - if diff := compare.Diff(want, got, sort); diff != "" { - t.Fatal(diff) } } -// TestTimestamp verifies timestamp handling, most notably: -// - Timestamp() keeps getting advanced -// - StartTimestamp() stays the same -func TestTimestamps(t *testing.T) { - proc, sink := setup(t, nil) - - sb := stream() - point := func(start, last pcommon.Timestamp) data.Number { - return sb.point(start, last, 0) +func config(t *testing.T, file string) *Config { + cfg := NewFactory().CreateDefaultConfig().(*Config) + cm, err := confmaptest.LoadConf(file) + if errors.Is(err, fs.ErrNotExist) { + return cfg } + require.NoError(t, err) - cases := []struct { - in data.Number - out data.Number - drop bool - }{{ - // first: take as-is - in: point(1000, 1100), - out: point(1000, 1100), - }, { - // subsequent: take, but keep start-ts - in: point(1100, 1200), - out: point(1000, 1200), - }, { - // gap: take - in: point(1300, 1400), - out: point(1000, 1400), - }, { - // out of order - in: point(1200, 1300), - drop: true, - }, { - // older start - in: point(500, 550), - drop: true, - }} - - for i, cs := range cases { - t.Run(strconv.Itoa(i), func(t *testing.T) { - sink.Reset() - - in := sb.resourceMetrics(sb.delta(cs.in)) - want := make([]pmetric.Metrics, 0) - if !cs.drop { - want = []pmetric.Metrics{sb.resourceMetrics(sb.cumul(cs.out))} - } - - err := proc.ConsumeMetrics(context.Background(), in) - require.NoError(t, err) - - out := sink.AllMetrics() - if diff := compare.Diff(want, out); diff != "" { - t.Fatal(diff) - } - }) - } + err = cm.Unmarshal(cfg) + require.NoError(t, err) + return cfg } -func TestStreamLimit(t *testing.T) { - proc, sink := setup(t, &Config{MaxStale: 5 * time.Minute, MaxStreams: 10}) - - good := make([]SumBuilder, 10) - for i := range good { - good[i] = stream() - } - bad := stream() - _ = bad - - diff := func(want, got []pmetric.Metrics) { - t.Helper() - if diff := compare.Diff(want, got); diff != "" { - t.Fatal(diff) - } - } - - writeGood := func(ts pcommon.Timestamp) { - for i, sb := range good { - in := sb.resourceMetrics(sb.delta(sb.point(0, ts+pcommon.Timestamp(i), 0))) - want := sb.resourceMetrics(sb.cumul(sb.point(0, ts+pcommon.Timestamp(i), 0))) - - err := proc.ConsumeMetrics(context.Background(), in) - require.NoError(t, err) +func setup(t *testing.T, cfg *Config) (processor.Metrics, *consumertest.MetricsSink) { + t.Helper() - diff([]pmetric.Metrics{want}, sink.AllMetrics()) - sink.Reset() - } + next := &consumertest.MetricsSink{} + if cfg == nil { + cfg = &Config{MaxStale: 0, MaxStreams: math.MaxInt} } - // write up to limit must work - writeGood(0) - - // extra stream must be dropped, nothing written - in := bad.resourceMetrics(bad.delta(bad.point(0, 0, 0))) - err := proc.ConsumeMetrics(context.Background(), in) + proc, err := NewFactory().CreateMetrics( + context.Background(), + processortest.NewNopSettings(), + cfg, + next, + ) require.NoError(t, err) - diff([]pmetric.Metrics{}, sink.AllMetrics()) - sink.Reset() - - // writing existing streams must still work - writeGood(100) -} - -type copyable interface { - CopyTo(pmetric.Metric) -} - -func (s SumBuilder) resourceMetrics(metrics ...copyable) pmetric.Metrics { - md := pmetric.NewMetrics() - - rm := md.ResourceMetrics().AppendEmpty() - s.Resource().CopyTo(rm.Resource()) - - sm := rm.ScopeMetrics().AppendEmpty() - s.Scope().CopyTo(sm.Scope()) - - for _, m := range metrics { - m.CopyTo(sm.Metrics().AppendEmpty()) - } - return md -} -type SumBuilder struct { - random.Metric[data.Number] - base data.Number + return proc, next } -func (s SumBuilder) with(dps ...data.Number) pmetric.Metric { - m := pmetric.NewMetric() - s.Metric.CopyTo(m) - - for _, dp := range dps { - dp.NumberDataPoint.CopyTo(m.Sum().DataPoints().AppendEmpty()) +func unmarshalMetrics(data []byte, into any) error { + var tmp any + if err := yaml.Unmarshal(data, &tmp); err != nil { + return err } - - return m -} - -func (s SumBuilder) delta(dps ...data.Number) pmetric.Metric { - m := s.with(dps...) - m.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) - return m -} - -func (s SumBuilder) cumul(dps ...data.Number) pmetric.Metric { - m := s.with(dps...) - m.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) - return m -} - -func (s SumBuilder) id(temp pmetric.AggregationTemporality) identity.Stream { - m := s.with(s.base) - m.Sum().SetAggregationTemporality(temp) - - mid := identity.OfMetric(s.Ident().Scope(), m) - return identity.OfStream(mid, s.base) -} - -func (s SumBuilder) point(start, ts pcommon.Timestamp, value int64) data.Number { - dp := s.base.Clone() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetIntValue(value) - return dp -} - -func stream() SumBuilder { - sum := random.Sum() - _, base := sum.Stream() - return SumBuilder{Metric: sum, base: base} -} - -func TestIgnore(t *testing.T) { - proc, sink := setup(t, nil) - - dir := "./testdata/notemporality-ignored" - open := func(file string) pmetric.Metrics { - t.Helper() - md, err := golden.ReadMetrics(filepath.Join(dir, file)) - require.NoError(t, err) - return md + data, err := json.Marshal(tmp) + if err != nil { + return err } - - in := open("in.yaml") - out := open("out.yaml") - - ctx := context.Background() - - err := proc.ConsumeMetrics(ctx, in) - require.NoError(t, err) - - if diff := compare.Diff([]pmetric.Metrics{out}, sink.AllMetrics()); diff != "" { - t.Fatal(diff) + md, err := (&pmetric.JSONUnmarshaler{}).UnmarshalMetrics(data) + if err != nil { + return err } + *(into.(*pmetric.Metrics)) = md + return nil } func TestTelemetry(t *testing.T) { diff --git a/processor/deltatocumulativeprocessor/testdata/limit/1.test b/processor/deltatocumulativeprocessor/testdata/limit/1.test new file mode 100644 index 000000000000..0acad04bf3c6 --- /dev/null +++ b/processor/deltatocumulativeprocessor/testdata/limit/1.test @@ -0,0 +1,47 @@ +-- in -- +resourceMetrics: + - schemaUrl: https://test.com/resource + scopeMetrics: + - schemaUrl: https://test.com/scope + scope: + name: Test + version: 1.2.3 + metrics: + - name: sum + sum: + aggregationTemporality: 1 # delta + dataPoints: + - {timeUnixNano: 1, asDouble: 1, attributes: [{key: series, value: {stringValue: "0"}}]} + - {timeUnixNano: 1, asDouble: 1, attributes: [{key: series, value: {stringValue: "1"}}]} + - {timeUnixNano: 1, asDouble: 1, attributes: [{key: series, value: {stringValue: "2"}}]} + - {timeUnixNano: 1, asDouble: 1, attributes: [{key: series, value: {stringValue: "3"}}]} + - {timeUnixNano: 1, asDouble: 1, attributes: [{key: series, value: {stringValue: "4"}}]} + - {timeUnixNano: 1, asDouble: 1, attributes: [{key: series, value: {stringValue: "5"}}]} + - {timeUnixNano: 1, asDouble: 1, attributes: [{key: series, value: {stringValue: "6"}}]} + - {timeUnixNano: 1, asDouble: 1, attributes: [{key: series, value: {stringValue: "7"}}]} + - {timeUnixNano: 1, asDouble: 1, attributes: [{key: series, value: {stringValue: "8"}}]} + - {timeUnixNano: 1, asDouble: 1, attributes: [{key: series, value: {stringValue: "9"}}]} + +-- out -- +resourceMetrics: + - schemaUrl: https://test.com/resource + scopeMetrics: + - schemaUrl: https://test.com/scope + scope: + name: Test + version: 1.2.3 + metrics: + - name: sum + sum: + aggregationTemporality: 2 # cumulative + dataPoints: + - {timeUnixNano: 1, asDouble: 1, attributes: [{key: series, value: {stringValue: "0"}}]} + - {timeUnixNano: 1, asDouble: 1, attributes: [{key: series, value: {stringValue: "1"}}]} + - {timeUnixNano: 1, asDouble: 1, attributes: [{key: series, value: {stringValue: "2"}}]} + - {timeUnixNano: 1, asDouble: 1, attributes: [{key: series, value: {stringValue: "3"}}]} + - {timeUnixNano: 1, asDouble: 1, attributes: [{key: series, value: {stringValue: "4"}}]} + - {timeUnixNano: 1, asDouble: 1, attributes: [{key: series, value: {stringValue: "5"}}]} + - {timeUnixNano: 1, asDouble: 1, attributes: [{key: series, value: {stringValue: "6"}}]} + - {timeUnixNano: 1, asDouble: 1, attributes: [{key: series, value: {stringValue: "7"}}]} + - {timeUnixNano: 1, asDouble: 1, attributes: [{key: series, value: {stringValue: "8"}}]} + - {timeUnixNano: 1, asDouble: 1, attributes: [{key: series, value: {stringValue: "9"}}]} diff --git a/processor/deltatocumulativeprocessor/testdata/limit/2.test b/processor/deltatocumulativeprocessor/testdata/limit/2.test new file mode 100644 index 000000000000..20cd03a7db41 --- /dev/null +++ b/processor/deltatocumulativeprocessor/testdata/limit/2.test @@ -0,0 +1,49 @@ +-- in -- +resourceMetrics: + - schemaUrl: https://test.com/resource + scopeMetrics: + - schemaUrl: https://test.com/scope + scope: + name: Test + version: 1.2.3 + metrics: + - name: sum + sum: + aggregationTemporality: 1 # delta + dataPoints: + - {timeUnixNano: 2, asDouble: 1, attributes: [{key: series, value: {stringValue: "0"}}]} + - {timeUnixNano: 2, asDouble: 1, attributes: [{key: series, value: {stringValue: "1"}}]} + - {timeUnixNano: 2, asDouble: 1, attributes: [{key: series, value: {stringValue: "2"}}]} + - {timeUnixNano: 2, asDouble: 1, attributes: [{key: series, value: {stringValue: "3"}}]} + - {timeUnixNano: 2, asDouble: 1, attributes: [{key: series, value: {stringValue: "4"}}]} + - {timeUnixNano: 2, asDouble: 1, attributes: [{key: series, value: {stringValue: "5"}}]} + - {timeUnixNano: 2, asDouble: 1, attributes: [{key: series, value: {stringValue: "6"}}]} + - {timeUnixNano: 2, asDouble: 1, attributes: [{key: series, value: {stringValue: "7"}}]} + - {timeUnixNano: 2, asDouble: 1, attributes: [{key: series, value: {stringValue: "8"}}]} + - {timeUnixNano: 2, asDouble: 1, attributes: [{key: series, value: {stringValue: "9"}}]} + - {timeUnixNano: 2, asDouble: 1, attributes: [{key: series, value: {stringValue: "x"}}]} # will exceed limit + +-- out -- +resourceMetrics: + - schemaUrl: https://test.com/resource + scopeMetrics: + - schemaUrl: https://test.com/scope + scope: + name: Test + version: 1.2.3 + metrics: + - name: sum + sum: + aggregationTemporality: 2 # cumulative + dataPoints: + - {timeUnixNano: 2, asDouble: 2, attributes: [{key: series, value: {stringValue: "0"}}]} + - {timeUnixNano: 2, asDouble: 2, attributes: [{key: series, value: {stringValue: "1"}}]} + - {timeUnixNano: 2, asDouble: 2, attributes: [{key: series, value: {stringValue: "2"}}]} + - {timeUnixNano: 2, asDouble: 2, attributes: [{key: series, value: {stringValue: "3"}}]} + - {timeUnixNano: 2, asDouble: 2, attributes: [{key: series, value: {stringValue: "4"}}]} + - {timeUnixNano: 2, asDouble: 2, attributes: [{key: series, value: {stringValue: "5"}}]} + - {timeUnixNano: 2, asDouble: 2, attributes: [{key: series, value: {stringValue: "6"}}]} + - {timeUnixNano: 2, asDouble: 2, attributes: [{key: series, value: {stringValue: "7"}}]} + - {timeUnixNano: 2, asDouble: 2, attributes: [{key: series, value: {stringValue: "8"}}]} + - {timeUnixNano: 2, asDouble: 2, attributes: [{key: series, value: {stringValue: "9"}}]} + # - {timeUnixNano: 2, asDouble: 2, attributes: [{key: series, value: {stringValue: "x"}}]} # dropped diff --git a/processor/deltatocumulativeprocessor/testdata/limit/config.yaml b/processor/deltatocumulativeprocessor/testdata/limit/config.yaml new file mode 100644 index 000000000000..b8642c6cd928 --- /dev/null +++ b/processor/deltatocumulativeprocessor/testdata/limit/config.yaml @@ -0,0 +1 @@ +max_streams: 10 diff --git a/processor/deltatocumulativeprocessor/testdata/notemporality-ignored/1.test b/processor/deltatocumulativeprocessor/testdata/notemporality-ignored/1.test new file mode 100644 index 000000000000..c7c743bcde30 --- /dev/null +++ b/processor/deltatocumulativeprocessor/testdata/notemporality-ignored/1.test @@ -0,0 +1,57 @@ +-- in -- +resourceMetrics: + - schemaUrl: https://test.com/resource + resource: + attributes: + - key: resattr + value: { stringValue: stringoo } + scopeMetrics: + - schemaUrl: https://test.com/scope + scope: + name: Test + version: 1.2.3 + attributes: + - key: scopeattr + value: { stringValue: string } + metrics: + - name: test.gauge + gauge: + dataPoints: + - timeUnixNano: 1 + asDouble: 1 + - name: test.summary + summary: + dataPoints: + - timeUnixNano: 1 + quantileValues: + - quantile: 0.25 + value: 25 + +-- out -- +resourceMetrics: + - schemaUrl: https://test.com/resource + resource: + attributes: + - key: resattr + value: { stringValue: stringoo } + scopeMetrics: + - schemaUrl: https://test.com/scope + scope: + name: Test + version: 1.2.3 + attributes: + - key: scopeattr + value: { stringValue: string } + metrics: + - name: test.gauge + gauge: + dataPoints: + - timeUnixNano: 1 + asDouble: 1 + - name: test.summary + summary: + dataPoints: + - timeUnixNano: 1 + quantileValues: + - quantile: 0.25 + value: 25 diff --git a/processor/deltatocumulativeprocessor/testdata/notemporality-ignored/in.yaml b/processor/deltatocumulativeprocessor/testdata/notemporality-ignored/in.yaml deleted file mode 100644 index 095de3947de0..000000000000 --- a/processor/deltatocumulativeprocessor/testdata/notemporality-ignored/in.yaml +++ /dev/null @@ -1,27 +0,0 @@ -resourceMetrics: - - schemaUrl: https://test.com/resource - resource: - attributes: - - key: resattr - value: { stringValue: stringoo } - scopeMetrics: - - schemaUrl: https://test.com/scope - scope: - name: Test - version: 1.2.3 - attributes: - - key: scopeattr - value: { stringValue: string } - metrics: - - name: test.gauge - gauge: - dataPoints: - - timeUnixNano: 1 - asDouble: 1 - - name: test.summary - summary: - dataPoints: - - timeUnixNano: 1 - quantileValues: - - quantile: 0.25 - value: 25 diff --git a/processor/deltatocumulativeprocessor/testdata/notemporality-ignored/out.yaml b/processor/deltatocumulativeprocessor/testdata/notemporality-ignored/out.yaml deleted file mode 100644 index 095de3947de0..000000000000 --- a/processor/deltatocumulativeprocessor/testdata/notemporality-ignored/out.yaml +++ /dev/null @@ -1,27 +0,0 @@ -resourceMetrics: - - schemaUrl: https://test.com/resource - resource: - attributes: - - key: resattr - value: { stringValue: stringoo } - scopeMetrics: - - schemaUrl: https://test.com/scope - scope: - name: Test - version: 1.2.3 - attributes: - - key: scopeattr - value: { stringValue: string } - metrics: - - name: test.gauge - gauge: - dataPoints: - - timeUnixNano: 1 - asDouble: 1 - - name: test.summary - summary: - dataPoints: - - timeUnixNano: 1 - quantileValues: - - quantile: 0.25 - value: 25 diff --git a/processor/deltatocumulativeprocessor/testdata/timestamps/1.test b/processor/deltatocumulativeprocessor/testdata/timestamps/1.test new file mode 100644 index 000000000000..4f6d48c54e36 --- /dev/null +++ b/processor/deltatocumulativeprocessor/testdata/timestamps/1.test @@ -0,0 +1,36 @@ +-- in -- +resourceMetrics: + - schemaUrl: https://test.com/resource + scopeMetrics: + - schemaUrl: https://test.com/scope + scope: + name: Test + version: 1.2.3 + metrics: + - name: sum + sum: + aggregationTemporality: 1 # delta + dataPoints: + - {startTimeUnixNano: 1000, timeUnixNano: 1100, asDouble: 0} + - {startTimeUnixNano: 1100, timeUnixNano: 1200, asDouble: 0} + # - {startTimeUnixNano: 1200, timeUnixNano: 1300, asDouble: 0} + - {startTimeUnixNano: 1300, timeUnixNano: 1400, asDouble: 0} # gap (previous sample missing): accept + - {startTimeUnixNano: 1200, timeUnixNano: 1300, asDouble: 0} # out of order: drop + - {startTimeUnixNano: 500, timeUnixNano: 550, asDouble: 0} # belongs to older series: drop + +-- out -- +resourceMetrics: + - schemaUrl: https://test.com/resource + scopeMetrics: + - schemaUrl: https://test.com/scope + scope: + name: Test + version: 1.2.3 + metrics: + - name: sum + sum: + aggregationTemporality: 2 # cumulative + dataPoints: + - {startTimeUnixNano: 1000, timeUnixNano: 1100, asDouble: 0} + - {startTimeUnixNano: 1000, timeUnixNano: 1200, asDouble: 0} + - {startTimeUnixNano: 1000, timeUnixNano: 1400, asDouble: 0} diff --git a/processor/deltatocumulativeprocessor/testdata/tracking/1.test b/processor/deltatocumulativeprocessor/testdata/tracking/1.test new file mode 100644 index 000000000000..76ab437989c2 --- /dev/null +++ b/processor/deltatocumulativeprocessor/testdata/tracking/1.test @@ -0,0 +1,376 @@ +-- in -- +resourceMetrics: + - resource: + attributes: + - {key: "21f4", value: {stringValue: "42c8"}} + - {key: "7e7", value: {stringValue: "4b13"}} + scopeMetrics: + - metrics: + - description: 754d + name: 29c9 + sum: + aggregationTemporality: 1 + dataPoints: + - {attributes: [{key: "10d", value: {stringValue: "68c6"}}], startTimeUnixNano: "1000", timeUnixNano: "1100", asInt: "124"} + - {attributes: [{key: "7861", value: {stringValue: "3d13"}}], startTimeUnixNano: "1000", timeUnixNano: "1700", asInt: "22"} + - {attributes: [{key: "7861", value: {stringValue: "3d13"}}], startTimeUnixNano: "1000", timeUnixNano: "2400", asInt: "27"} + - {attributes: [{key: "10d", value: {stringValue: "68c6"}}], startTimeUnixNano: "1000", timeUnixNano: "3100", asInt: "115"} + - {attributes: [{key: "10d", value: {stringValue: "68c6"}}], startTimeUnixNano: "1000", timeUnixNano: "4600", asInt: "47"} + - {attributes: [{key: "10d", value: {stringValue: "68c6"}}], startTimeUnixNano: "1000", timeUnixNano: "5200", asInt: "34"} + - {attributes: [{key: "10d", value: {stringValue: "68c6"}}], startTimeUnixNano: "1000", timeUnixNano: "6400", asInt: "98"} + - {attributes: [{key: "7861", value: {stringValue: "3d13"}}], startTimeUnixNano: "1000", timeUnixNano: "6800", asInt: "36"} + - {attributes: [{key: "7861", value: {stringValue: "3d13"}}], startTimeUnixNano: "1000", timeUnixNano: "7100", asInt: "48"} + - {attributes: [{key: "7861", value: {stringValue: "3d13"}}], startTimeUnixNano: "1000", timeUnixNano: "8200", asInt: "118"} + - {attributes: [{key: "7861", value: {stringValue: "3d13"}}], startTimeUnixNano: "1000", timeUnixNano: "8300", asInt: "62"} + - {attributes: [{key: "7861", value: {stringValue: "3d13"}}], startTimeUnixNano: "1000", timeUnixNano: "8700", asInt: "118"} + - {attributes: [{key: "10d", value: {stringValue: "68c6"}}], startTimeUnixNano: "1000", timeUnixNano: "9000", asInt: "51"} + - {attributes: [{key: "10d", value: {stringValue: "68c6"}}], startTimeUnixNano: "1000", timeUnixNano: "9400", asInt: "49"} + - {attributes: [{key: "10d", value: {stringValue: "68c6"}}], startTimeUnixNano: "1000", timeUnixNano: "10200", asInt: "97"} + - {attributes: [{key: "7861", value: {stringValue: "3d13"}}], startTimeUnixNano: "1000", timeUnixNano: "10400", asInt: "21"} + unit: "7337" + - description: 611d + name: "5e04" + sum: + aggregationTemporality: 1 + dataPoints: + - {attributes: [{key: "1f1d", value: {stringValue: "762b"}}], startTimeUnixNano: "1000", timeUnixNano: "1400", asInt: "84"} + - {attributes: [{key: "1f1d", value: {stringValue: "762b"}}], startTimeUnixNano: "1000", timeUnixNano: "1800", asInt: "53"} + - {attributes: [{key: "6eeb", value: {stringValue: "e44"}}], startTimeUnixNano: "1000", timeUnixNano: "3400", asInt: "122"} + - {attributes: [{key: "6eeb", value: {stringValue: "e44"}}], startTimeUnixNano: "1000", timeUnixNano: "3600", asInt: "104"} + - {attributes: [{key: "6eeb", value: {stringValue: "e44"}}], startTimeUnixNano: "1000", timeUnixNano: "4100", asInt: "91"} + - {attributes: [{key: "6eeb", value: {stringValue: "e44"}}], startTimeUnixNano: "1000", timeUnixNano: "5300", asInt: "123"} + - {attributes: [{key: "6eeb", value: {stringValue: "e44"}}], startTimeUnixNano: "1000", timeUnixNano: "5600", asInt: "112"} + - {attributes: [{key: "1f1d", value: {stringValue: "762b"}}], startTimeUnixNano: "1000", timeUnixNano: "7600", asInt: "83"} + - {attributes: [{key: "1f1d", value: {stringValue: "762b"}}], startTimeUnixNano: "1000", timeUnixNano: "7700", asInt: "8"} + - {attributes: [{key: "6eeb", value: {stringValue: "e44"}}], startTimeUnixNano: "1000", timeUnixNano: "8500", asInt: "12"} + - {attributes: [{key: "6eeb", value: {stringValue: "e44"}}], startTimeUnixNano: "1000", timeUnixNano: "9100", asInt: "57"} + unit: 6db0 + scope: + attributes: + - {key: "553", value: {stringValue: "144a"}} + - {key: "5ab6", value: {stringValue: "9a8"}} + name: "7715" + version: 7bfb + - metrics: + - description: 52f4 + name: 476d + sum: + aggregationTemporality: 1 + dataPoints: + - {attributes: [{key: "19b0", value: {stringValue: "114f"}}], startTimeUnixNano: "1000", timeUnixNano: "2100", asInt: "66"} + - {attributes: [{key: "18d7", value: {stringValue: "6b33"}}], startTimeUnixNano: "1000", timeUnixNano: "2900", asInt: "112"} + - {attributes: [{key: "19b0", value: {stringValue: "114f"}}], startTimeUnixNano: "1000", timeUnixNano: "3000", asInt: "63"} + - {attributes: [{key: "18d7", value: {stringValue: "6b33"}}], startTimeUnixNano: "1000", timeUnixNano: "3900", asInt: "86"} + - {attributes: [{key: "18d7", value: {stringValue: "6b33"}}], startTimeUnixNano: "1000", timeUnixNano: "4000", asInt: "11"} + - {attributes: [{key: "18d7", value: {stringValue: "6b33"}}], startTimeUnixNano: "1000", timeUnixNano: "4200", asInt: "116"} + - {attributes: [{key: "18d7", value: {stringValue: "6b33"}}], startTimeUnixNano: "1000", timeUnixNano: "4300", asInt: "69"} + - {attributes: [{key: "18d7", value: {stringValue: "6b33"}}], startTimeUnixNano: "1000", timeUnixNano: "5400", asInt: "31"} + - {attributes: [{key: "18d7", value: {stringValue: "6b33"}}], startTimeUnixNano: "1000", timeUnixNano: "6500", asInt: "31"} + - {attributes: [{key: "19b0", value: {stringValue: "114f"}}], startTimeUnixNano: "1000", timeUnixNano: "7300", asInt: "78"} + - {attributes: [{key: "19b0", value: {stringValue: "114f"}}], startTimeUnixNano: "1000", timeUnixNano: "7900", asInt: "3"} + - {attributes: [{key: "18d7", value: {stringValue: "6b33"}}], startTimeUnixNano: "1000", timeUnixNano: "8000", asInt: "73"} + - {attributes: [{key: "18d7", value: {stringValue: "6b33"}}], startTimeUnixNano: "1000", timeUnixNano: "8900", asInt: "30"} + - {attributes: [{key: "18d7", value: {stringValue: "6b33"}}], startTimeUnixNano: "1000", timeUnixNano: "9500", asInt: "85"} + - {attributes: [{key: "18d7", value: {stringValue: "6b33"}}], startTimeUnixNano: "1000", timeUnixNano: "9600", asInt: "52"} + - {attributes: [{key: "19b0", value: {stringValue: "114f"}}], startTimeUnixNano: "1000", timeUnixNano: "9900", asInt: "66"} + - {attributes: [{key: "19b0", value: {stringValue: "114f"}}], startTimeUnixNano: "1000", timeUnixNano: "10000", asInt: "43"} + unit: 331a + - description: "3985" + name: "2128" + sum: + aggregationTemporality: 1 + dataPoints: + - {attributes: [{key: "b0", value: {stringValue: "3b97"}}], startTimeUnixNano: "1000", timeUnixNano: "1500", asInt: "63"} + - {attributes: [{key: "6c73", value: {stringValue: "2fc5"}}], startTimeUnixNano: "1000", timeUnixNano: "1600", asInt: "26"} + - {attributes: [{key: "6c73", value: {stringValue: "2fc5"}}], startTimeUnixNano: "1000", timeUnixNano: "2500", asInt: "105"} + - {attributes: [{key: "6c73", value: {stringValue: "2fc5"}}], startTimeUnixNano: "1000", timeUnixNano: "3200", asInt: "68"} + - {attributes: [{key: "6c73", value: {stringValue: "2fc5"}}], startTimeUnixNano: "1000", timeUnixNano: "5500", asInt: "8"} + - {attributes: [{key: "6c73", value: {stringValue: "2fc5"}}], startTimeUnixNano: "1000", timeUnixNano: "5800", asInt: "16"} + - {attributes: [{key: "b0", value: {stringValue: "3b97"}}], startTimeUnixNano: "1000", timeUnixNano: "6100", asInt: "5"} + - {attributes: [{key: "6c73", value: {stringValue: "2fc5"}}], startTimeUnixNano: "1000", timeUnixNano: "6700", asInt: "78"} + - {attributes: [{key: "6c73", value: {stringValue: "2fc5"}}], startTimeUnixNano: "1000", timeUnixNano: "7000", asInt: "1"} + - {attributes: [{key: "6c73", value: {stringValue: "2fc5"}}], startTimeUnixNano: "1000", timeUnixNano: "7500", asInt: "10"} + - {attributes: [{key: "6c73", value: {stringValue: "2fc5"}}], startTimeUnixNano: "1000", timeUnixNano: "9800", asInt: "119"} + - {attributes: [{key: "b0", value: {stringValue: "3b97"}}], startTimeUnixNano: "1000", timeUnixNano: "10300", asInt: "80"} + - {attributes: [{key: "b0", value: {stringValue: "3b97"}}], startTimeUnixNano: "1000", timeUnixNano: "10500", asInt: "49"} + - {attributes: [{key: "6c73", value: {stringValue: "2fc5"}}], startTimeUnixNano: "1000", timeUnixNano: "10600", asInt: "62"} + - {attributes: [{key: "b0", value: {stringValue: "3b97"}}], startTimeUnixNano: "1000", timeUnixNano: "10700", asInt: "87"} + unit: 164b + scope: + attributes: + - {key: "6c70", value: {stringValue: "2773"}} + - {key: "766b", value: {stringValue: "370c"}} + name: 6c45 + version: 74f9 + - resource: + attributes: + - {key: "2aa", value: {stringValue: "7f34"}} + - {key: "261e", value: {stringValue: "3076"}} + scopeMetrics: + - metrics: + - description: 2d50 + name: "5863" + sum: + aggregationTemporality: 1 + dataPoints: + - {attributes: [{key: "27bd", value: {stringValue: "21a"}}], startTimeUnixNano: "1000", timeUnixNano: "2200", asInt: "52"} + - {attributes: [{key: "27bd", value: {stringValue: "21a"}}], startTimeUnixNano: "1000", timeUnixNano: "2700", asInt: "46"} + - {attributes: [{key: "33cf", value: {stringValue: "23b0"}}], startTimeUnixNano: "1000", timeUnixNano: "4400", asInt: "84"} + - {attributes: [{key: "27bd", value: {stringValue: "21a"}}], startTimeUnixNano: "1000", timeUnixNano: "4700", asInt: "23"} + - {attributes: [{key: "33cf", value: {stringValue: "23b0"}}], startTimeUnixNano: "1000", timeUnixNano: "5100", asInt: "21"} + - {attributes: [{key: "33cf", value: {stringValue: "23b0"}}], startTimeUnixNano: "1000", timeUnixNano: "5900", asInt: "54"} + - {attributes: [{key: "33cf", value: {stringValue: "23b0"}}], startTimeUnixNano: "1000", timeUnixNano: "6300", asInt: "120"} + - {attributes: [{key: "27bd", value: {stringValue: "21a"}}], startTimeUnixNano: "1000", timeUnixNano: "7800", asInt: "112"} + - {attributes: [{key: "27bd", value: {stringValue: "21a"}}], startTimeUnixNano: "1000", timeUnixNano: "9300", asInt: "57"} + - {attributes: [{key: "33cf", value: {stringValue: "23b0"}}], startTimeUnixNano: "1000", timeUnixNano: "10800", asInt: "55"} + - {attributes: [{key: "33cf", value: {stringValue: "23b0"}}], startTimeUnixNano: "1000", timeUnixNano: "10900", asInt: "29"} + unit: 541a + - description: 6b2c + name: 430c + sum: + aggregationTemporality: 1 + dataPoints: + - {attributes: [{key: "1ac7", value: {stringValue: "5b1f"}}], startTimeUnixNano: "1000", timeUnixNano: "1200", asInt: "51"} + - {attributes: [{key: "50bf", value: {stringValue: "4e9a"}}], startTimeUnixNano: "1000", timeUnixNano: "1900", asInt: "50"} + - {attributes: [{key: "50bf", value: {stringValue: "4e9a"}}], startTimeUnixNano: "1000", timeUnixNano: "2800", asInt: "86"} + - {attributes: [{key: "50bf", value: {stringValue: "4e9a"}}], startTimeUnixNano: "1000", timeUnixNano: "3300", asInt: "79"} + - {attributes: [{key: "50bf", value: {stringValue: "4e9a"}}], startTimeUnixNano: "1000", timeUnixNano: "3500", asInt: "97"} + - {attributes: [{key: "50bf", value: {stringValue: "4e9a"}}], startTimeUnixNano: "1000", timeUnixNano: "6200", asInt: "58"} + - {attributes: [{key: "50bf", value: {stringValue: "4e9a"}}], startTimeUnixNano: "1000", timeUnixNano: "6900", asInt: "96"} + - {attributes: [{key: "50bf", value: {stringValue: "4e9a"}}], startTimeUnixNano: "1000", timeUnixNano: "7200", asInt: "34"} + - {attributes: [{key: "50bf", value: {stringValue: "4e9a"}}], startTimeUnixNano: "1000", timeUnixNano: "7400", asInt: "5"} + - {attributes: [{key: "50bf", value: {stringValue: "4e9a"}}], startTimeUnixNano: "1000", timeUnixNano: "8800", asInt: "117"} + unit: 785e + scope: + attributes: + - {key: "509", value: {stringValue: "2bcd"}} + - {key: "2aec", value: {stringValue: "390b"}} + name: "6812" + version: 17ce + - metrics: + - description: c6a + name: 3ccc + sum: + aggregationTemporality: 1 + dataPoints: + - {attributes: [{key: "75d9", value: {stringValue: "4b59"}}], startTimeUnixNano: "1000", timeUnixNano: "1300", asInt: "77"} + - {attributes: [{key: "75d9", value: {stringValue: "4b59"}}], startTimeUnixNano: "1000", timeUnixNano: "2300", asInt: "8"} + - {attributes: [{key: "306c", value: {stringValue: "3c61"}}], startTimeUnixNano: "1000", timeUnixNano: "3800", asInt: "41"} + - {attributes: [{key: "306c", value: {stringValue: "3c61"}}], startTimeUnixNano: "1000", timeUnixNano: "5700", asInt: "31"} + - {attributes: [{key: "75d9", value: {stringValue: "4b59"}}], startTimeUnixNano: "1000", timeUnixNano: "6600", asInt: "38"} + - {attributes: [{key: "75d9", value: {stringValue: "4b59"}}], startTimeUnixNano: "1000", timeUnixNano: "8400", asInt: "13"} + - {attributes: [{key: "75d9", value: {stringValue: "4b59"}}], startTimeUnixNano: "1000", timeUnixNano: "8600", asInt: "106"} + - {attributes: [{key: "306c", value: {stringValue: "3c61"}}], startTimeUnixNano: "1000", timeUnixNano: "9200", asInt: "49"} + - {attributes: [{key: "75d9", value: {stringValue: "4b59"}}], startTimeUnixNano: "1000", timeUnixNano: "9700", asInt: "0"} + unit: 1adc + - description: 20fb + name: 61b6 + sum: + aggregationTemporality: 1 + dataPoints: + - {attributes: [{key: "2fad", value: {stringValue: "3ea1"}}], startTimeUnixNano: "1000", timeUnixNano: "1000", asInt: "45"} + - {attributes: [{key: "2afe", value: {stringValue: "22f1"}}], startTimeUnixNano: "1000", timeUnixNano: "2000", asInt: "53"} + - {attributes: [{key: "2fad", value: {stringValue: "3ea1"}}], startTimeUnixNano: "1000", timeUnixNano: "2600", asInt: "47"} + - {attributes: [{key: "2fad", value: {stringValue: "3ea1"}}], startTimeUnixNano: "1000", timeUnixNano: "3700", asInt: "61"} + - {attributes: [{key: "2fad", value: {stringValue: "3ea1"}}], startTimeUnixNano: "1000", timeUnixNano: "4500", asInt: "27"} + - {attributes: [{key: "2fad", value: {stringValue: "3ea1"}}], startTimeUnixNano: "1000", timeUnixNano: "4800", asInt: "37"} + - {attributes: [{key: "2fad", value: {stringValue: "3ea1"}}], startTimeUnixNano: "1000", timeUnixNano: "4900", asInt: "100"} + - {attributes: [{key: "2fad", value: {stringValue: "3ea1"}}], startTimeUnixNano: "1000", timeUnixNano: "5000", asInt: "73"} + - {attributes: [{key: "2afe", value: {stringValue: "22f1"}}], startTimeUnixNano: "1000", timeUnixNano: "6000", asInt: "71"} + - {attributes: [{key: "2afe", value: {stringValue: "22f1"}}], startTimeUnixNano: "1000", timeUnixNano: "8100", asInt: "55"} + - {attributes: [{key: "2afe", value: {stringValue: "22f1"}}], startTimeUnixNano: "1000", timeUnixNano: "10100", asInt: "71"} + unit: "5679" + scope: + attributes: + - {key: "14cf", value: {stringValue: "64ea"}} + - {key: "67ef", value: {stringValue: "4299"}} + name: 58a7 + version: 1cd0 +-- out -- +resourceMetrics: + - resource: + attributes: + - {key: "21f4", value: {stringValue: "42c8"}} + - {key: "7e7", value: {stringValue: "4b13"}} + scopeMetrics: + - metrics: + - description: 754d + name: 29c9 + sum: + aggregationTemporality: 2 + dataPoints: + - {attributes: [{key: "10d", value: {stringValue: "68c6"}}], startTimeUnixNano: "1000", timeUnixNano: "1100", asInt: "124"} + - {attributes: [{key: "7861", value: {stringValue: "3d13"}}], startTimeUnixNano: "1000", timeUnixNano: "1700", asInt: "22"} + - {attributes: [{key: "7861", value: {stringValue: "3d13"}}], startTimeUnixNano: "1000", timeUnixNano: "2400", asInt: "49"} + - {attributes: [{key: "10d", value: {stringValue: "68c6"}}], startTimeUnixNano: "1000", timeUnixNano: "3100", asInt: "239"} + - {attributes: [{key: "10d", value: {stringValue: "68c6"}}], startTimeUnixNano: "1000", timeUnixNano: "4600", asInt: "286"} + - {attributes: [{key: "10d", value: {stringValue: "68c6"}}], startTimeUnixNano: "1000", timeUnixNano: "5200", asInt: "320"} + - {attributes: [{key: "10d", value: {stringValue: "68c6"}}], startTimeUnixNano: "1000", timeUnixNano: "6400", asInt: "418"} + - {attributes: [{key: "7861", value: {stringValue: "3d13"}}], startTimeUnixNano: "1000", timeUnixNano: "6800", asInt: "85"} + - {attributes: [{key: "7861", value: {stringValue: "3d13"}}], startTimeUnixNano: "1000", timeUnixNano: "7100", asInt: "133"} + - {attributes: [{key: "7861", value: {stringValue: "3d13"}}], startTimeUnixNano: "1000", timeUnixNano: "8200", asInt: "251"} + - {attributes: [{key: "7861", value: {stringValue: "3d13"}}], startTimeUnixNano: "1000", timeUnixNano: "8300", asInt: "313"} + - {attributes: [{key: "7861", value: {stringValue: "3d13"}}], startTimeUnixNano: "1000", timeUnixNano: "8700", asInt: "431"} + - {attributes: [{key: "10d", value: {stringValue: "68c6"}}], startTimeUnixNano: "1000", timeUnixNano: "9000", asInt: "469"} + - {attributes: [{key: "10d", value: {stringValue: "68c6"}}], startTimeUnixNano: "1000", timeUnixNano: "9400", asInt: "518"} + - {attributes: [{key: "10d", value: {stringValue: "68c6"}}], startTimeUnixNano: "1000", timeUnixNano: "10200", asInt: "615"} + - {attributes: [{key: "7861", value: {stringValue: "3d13"}}], startTimeUnixNano: "1000", timeUnixNano: "10400", asInt: "452"} + unit: "7337" + - description: 611d + name: "5e04" + sum: + aggregationTemporality: 2 + dataPoints: + - {attributes: [{key: "1f1d", value: {stringValue: "762b"}}], startTimeUnixNano: "1000", timeUnixNano: "1400", asInt: "84"} + - {attributes: [{key: "1f1d", value: {stringValue: "762b"}}], startTimeUnixNano: "1000", timeUnixNano: "1800", asInt: "137"} + - {attributes: [{key: "6eeb", value: {stringValue: "e44"}}], startTimeUnixNano: "1000", timeUnixNano: "3400", asInt: "122"} + - {attributes: [{key: "6eeb", value: {stringValue: "e44"}}], startTimeUnixNano: "1000", timeUnixNano: "3600", asInt: "226"} + - {attributes: [{key: "6eeb", value: {stringValue: "e44"}}], startTimeUnixNano: "1000", timeUnixNano: "4100", asInt: "317"} + - {attributes: [{key: "6eeb", value: {stringValue: "e44"}}], startTimeUnixNano: "1000", timeUnixNano: "5300", asInt: "440"} + - {attributes: [{key: "6eeb", value: {stringValue: "e44"}}], startTimeUnixNano: "1000", timeUnixNano: "5600", asInt: "552"} + - {attributes: [{key: "1f1d", value: {stringValue: "762b"}}], startTimeUnixNano: "1000", timeUnixNano: "7600", asInt: "220"} + - {attributes: [{key: "1f1d", value: {stringValue: "762b"}}], startTimeUnixNano: "1000", timeUnixNano: "7700", asInt: "228"} + - {attributes: [{key: "6eeb", value: {stringValue: "e44"}}], startTimeUnixNano: "1000", timeUnixNano: "8500", asInt: "564"} + - {attributes: [{key: "6eeb", value: {stringValue: "e44"}}], startTimeUnixNano: "1000", timeUnixNano: "9100", asInt: "621"} + unit: 6db0 + scope: + attributes: + - {key: "553", value: {stringValue: "144a"}} + - {key: "5ab6", value: {stringValue: "9a8"}} + name: "7715" + version: 7bfb + - metrics: + - description: 52f4 + name: 476d + sum: + aggregationTemporality: 2 + dataPoints: + - {attributes: [{key: "19b0", value: {stringValue: "114f"}}], startTimeUnixNano: "1000", timeUnixNano: "2100", asInt: "66"} + - {attributes: [{key: "18d7", value: {stringValue: "6b33"}}], startTimeUnixNano: "1000", timeUnixNano: "2900", asInt: "112"} + - {attributes: [{key: "19b0", value: {stringValue: "114f"}}], startTimeUnixNano: "1000", timeUnixNano: "3000", asInt: "129"} + - {attributes: [{key: "18d7", value: {stringValue: "6b33"}}], startTimeUnixNano: "1000", timeUnixNano: "3900", asInt: "198"} + - {attributes: [{key: "18d7", value: {stringValue: "6b33"}}], startTimeUnixNano: "1000", timeUnixNano: "4000", asInt: "209"} + - {attributes: [{key: "18d7", value: {stringValue: "6b33"}}], startTimeUnixNano: "1000", timeUnixNano: "4200", asInt: "325"} + - {attributes: [{key: "18d7", value: {stringValue: "6b33"}}], startTimeUnixNano: "1000", timeUnixNano: "4300", asInt: "394"} + - {attributes: [{key: "18d7", value: {stringValue: "6b33"}}], startTimeUnixNano: "1000", timeUnixNano: "5400", asInt: "425"} + - {attributes: [{key: "18d7", value: {stringValue: "6b33"}}], startTimeUnixNano: "1000", timeUnixNano: "6500", asInt: "456"} + - {attributes: [{key: "19b0", value: {stringValue: "114f"}}], startTimeUnixNano: "1000", timeUnixNano: "7300", asInt: "207"} + - {attributes: [{key: "19b0", value: {stringValue: "114f"}}], startTimeUnixNano: "1000", timeUnixNano: "7900", asInt: "210"} + - {attributes: [{key: "18d7", value: {stringValue: "6b33"}}], startTimeUnixNano: "1000", timeUnixNano: "8000", asInt: "529"} + - {attributes: [{key: "18d7", value: {stringValue: "6b33"}}], startTimeUnixNano: "1000", timeUnixNano: "8900", asInt: "559"} + - {attributes: [{key: "18d7", value: {stringValue: "6b33"}}], startTimeUnixNano: "1000", timeUnixNano: "9500", asInt: "644"} + - {attributes: [{key: "18d7", value: {stringValue: "6b33"}}], startTimeUnixNano: "1000", timeUnixNano: "9600", asInt: "696"} + - {attributes: [{key: "19b0", value: {stringValue: "114f"}}], startTimeUnixNano: "1000", timeUnixNano: "9900", asInt: "276"} + - {attributes: [{key: "19b0", value: {stringValue: "114f"}}], startTimeUnixNano: "1000", timeUnixNano: "10000", asInt: "319"} + unit: 331a + - description: "3985" + name: "2128" + sum: + aggregationTemporality: 2 + dataPoints: + - {attributes: [{key: "b0", value: {stringValue: "3b97"}}], startTimeUnixNano: "1000", timeUnixNano: "1500", asInt: "63"} + - {attributes: [{key: "6c73", value: {stringValue: "2fc5"}}], startTimeUnixNano: "1000", timeUnixNano: "1600", asInt: "26"} + - {attributes: [{key: "6c73", value: {stringValue: "2fc5"}}], startTimeUnixNano: "1000", timeUnixNano: "2500", asInt: "131"} + - {attributes: [{key: "6c73", value: {stringValue: "2fc5"}}], startTimeUnixNano: "1000", timeUnixNano: "3200", asInt: "199"} + - {attributes: [{key: "6c73", value: {stringValue: "2fc5"}}], startTimeUnixNano: "1000", timeUnixNano: "5500", asInt: "207"} + - {attributes: [{key: "6c73", value: {stringValue: "2fc5"}}], startTimeUnixNano: "1000", timeUnixNano: "5800", asInt: "223"} + - {attributes: [{key: "b0", value: {stringValue: "3b97"}}], startTimeUnixNano: "1000", timeUnixNano: "6100", asInt: "68"} + - {attributes: [{key: "6c73", value: {stringValue: "2fc5"}}], startTimeUnixNano: "1000", timeUnixNano: "6700", asInt: "301"} + - {attributes: [{key: "6c73", value: {stringValue: "2fc5"}}], startTimeUnixNano: "1000", timeUnixNano: "7000", asInt: "302"} + - {attributes: [{key: "6c73", value: {stringValue: "2fc5"}}], startTimeUnixNano: "1000", timeUnixNano: "7500", asInt: "312"} + - {attributes: [{key: "6c73", value: {stringValue: "2fc5"}}], startTimeUnixNano: "1000", timeUnixNano: "9800", asInt: "431"} + - {attributes: [{key: "b0", value: {stringValue: "3b97"}}], startTimeUnixNano: "1000", timeUnixNano: "10300", asInt: "148"} + - {attributes: [{key: "b0", value: {stringValue: "3b97"}}], startTimeUnixNano: "1000", timeUnixNano: "10500", asInt: "197"} + - {attributes: [{key: "6c73", value: {stringValue: "2fc5"}}], startTimeUnixNano: "1000", timeUnixNano: "10600", asInt: "493"} + - {attributes: [{key: "b0", value: {stringValue: "3b97"}}], startTimeUnixNano: "1000", timeUnixNano: "10700", asInt: "284"} + unit: 164b + scope: + attributes: + - {key: "6c70", value: {stringValue: "2773"}} + - {key: "766b", value: {stringValue: "370c"}} + name: 6c45 + version: 74f9 + - resource: + attributes: + - {key: "2aa", value: {stringValue: "7f34"}} + - {key: "261e", value: {stringValue: "3076"}} + scopeMetrics: + - metrics: + - description: 2d50 + name: "5863" + sum: + aggregationTemporality: 2 + dataPoints: + - {attributes: [{key: "27bd", value: {stringValue: "21a"}}], startTimeUnixNano: "1000", timeUnixNano: "2200", asInt: "52"} + - {attributes: [{key: "27bd", value: {stringValue: "21a"}}], startTimeUnixNano: "1000", timeUnixNano: "2700", asInt: "98"} + - {attributes: [{key: "33cf", value: {stringValue: "23b0"}}], startTimeUnixNano: "1000", timeUnixNano: "4400", asInt: "84"} + - {attributes: [{key: "27bd", value: {stringValue: "21a"}}], startTimeUnixNano: "1000", timeUnixNano: "4700", asInt: "121"} + - {attributes: [{key: "33cf", value: {stringValue: "23b0"}}], startTimeUnixNano: "1000", timeUnixNano: "5100", asInt: "105"} + - {attributes: [{key: "33cf", value: {stringValue: "23b0"}}], startTimeUnixNano: "1000", timeUnixNano: "5900", asInt: "159"} + - {attributes: [{key: "33cf", value: {stringValue: "23b0"}}], startTimeUnixNano: "1000", timeUnixNano: "6300", asInt: "279"} + - {attributes: [{key: "27bd", value: {stringValue: "21a"}}], startTimeUnixNano: "1000", timeUnixNano: "7800", asInt: "233"} + - {attributes: [{key: "27bd", value: {stringValue: "21a"}}], startTimeUnixNano: "1000", timeUnixNano: "9300", asInt: "290"} + - {attributes: [{key: "33cf", value: {stringValue: "23b0"}}], startTimeUnixNano: "1000", timeUnixNano: "10800", asInt: "334"} + - {attributes: [{key: "33cf", value: {stringValue: "23b0"}}], startTimeUnixNano: "1000", timeUnixNano: "10900", asInt: "363"} + unit: 541a + - description: 6b2c + name: 430c + sum: + aggregationTemporality: 2 + dataPoints: + - {attributes: [{key: "1ac7", value: {stringValue: "5b1f"}}], startTimeUnixNano: "1000", timeUnixNano: "1200", asInt: "51"} + - {attributes: [{key: "50bf", value: {stringValue: "4e9a"}}], startTimeUnixNano: "1000", timeUnixNano: "1900", asInt: "50"} + - {attributes: [{key: "50bf", value: {stringValue: "4e9a"}}], startTimeUnixNano: "1000", timeUnixNano: "2800", asInt: "136"} + - {attributes: [{key: "50bf", value: {stringValue: "4e9a"}}], startTimeUnixNano: "1000", timeUnixNano: "3300", asInt: "215"} + - {attributes: [{key: "50bf", value: {stringValue: "4e9a"}}], startTimeUnixNano: "1000", timeUnixNano: "3500", asInt: "312"} + - {attributes: [{key: "50bf", value: {stringValue: "4e9a"}}], startTimeUnixNano: "1000", timeUnixNano: "6200", asInt: "370"} + - {attributes: [{key: "50bf", value: {stringValue: "4e9a"}}], startTimeUnixNano: "1000", timeUnixNano: "6900", asInt: "466"} + - {attributes: [{key: "50bf", value: {stringValue: "4e9a"}}], startTimeUnixNano: "1000", timeUnixNano: "7200", asInt: "500"} + - {attributes: [{key: "50bf", value: {stringValue: "4e9a"}}], startTimeUnixNano: "1000", timeUnixNano: "7400", asInt: "505"} + - {attributes: [{key: "50bf", value: {stringValue: "4e9a"}}], startTimeUnixNano: "1000", timeUnixNano: "8800", asInt: "622"} + unit: 785e + scope: + attributes: + - {key: "509", value: {stringValue: "2bcd"}} + - {key: "2aec", value: {stringValue: "390b"}} + name: "6812" + version: 17ce + - metrics: + - description: c6a + name: 3ccc + sum: + aggregationTemporality: 2 + dataPoints: + - {attributes: [{key: "75d9", value: {stringValue: "4b59"}}], startTimeUnixNano: "1000", timeUnixNano: "1300", asInt: "77"} + - {attributes: [{key: "75d9", value: {stringValue: "4b59"}}], startTimeUnixNano: "1000", timeUnixNano: "2300", asInt: "85"} + - {attributes: [{key: "306c", value: {stringValue: "3c61"}}], startTimeUnixNano: "1000", timeUnixNano: "3800", asInt: "41"} + - {attributes: [{key: "306c", value: {stringValue: "3c61"}}], startTimeUnixNano: "1000", timeUnixNano: "5700", asInt: "72"} + - {attributes: [{key: "75d9", value: {stringValue: "4b59"}}], startTimeUnixNano: "1000", timeUnixNano: "6600", asInt: "123"} + - {attributes: [{key: "75d9", value: {stringValue: "4b59"}}], startTimeUnixNano: "1000", timeUnixNano: "8400", asInt: "136"} + - {attributes: [{key: "75d9", value: {stringValue: "4b59"}}], startTimeUnixNano: "1000", timeUnixNano: "8600", asInt: "242"} + - {attributes: [{key: "306c", value: {stringValue: "3c61"}}], startTimeUnixNano: "1000", timeUnixNano: "9200", asInt: "121"} + - {attributes: [{key: "75d9", value: {stringValue: "4b59"}}], startTimeUnixNano: "1000", timeUnixNano: "9700", asInt: "242"} + unit: 1adc + - description: 20fb + name: 61b6 + sum: + aggregationTemporality: 2 + dataPoints: + - {attributes: [{key: "2fad", value: {stringValue: "3ea1"}}], startTimeUnixNano: "1000", timeUnixNano: "1000", asInt: "45"} + - {attributes: [{key: "2afe", value: {stringValue: "22f1"}}], startTimeUnixNano: "1000", timeUnixNano: "2000", asInt: "53"} + - {attributes: [{key: "2fad", value: {stringValue: "3ea1"}}], startTimeUnixNano: "1000", timeUnixNano: "2600", asInt: "92"} + - {attributes: [{key: "2fad", value: {stringValue: "3ea1"}}], startTimeUnixNano: "1000", timeUnixNano: "3700", asInt: "153"} + - {attributes: [{key: "2fad", value: {stringValue: "3ea1"}}], startTimeUnixNano: "1000", timeUnixNano: "4500", asInt: "180"} + - {attributes: [{key: "2fad", value: {stringValue: "3ea1"}}], startTimeUnixNano: "1000", timeUnixNano: "4800", asInt: "217"} + - {attributes: [{key: "2fad", value: {stringValue: "3ea1"}}], startTimeUnixNano: "1000", timeUnixNano: "4900", asInt: "317"} + - {attributes: [{key: "2fad", value: {stringValue: "3ea1"}}], startTimeUnixNano: "1000", timeUnixNano: "5000", asInt: "390"} + - {attributes: [{key: "2afe", value: {stringValue: "22f1"}}], startTimeUnixNano: "1000", timeUnixNano: "6000", asInt: "124"} + - {attributes: [{key: "2afe", value: {stringValue: "22f1"}}], startTimeUnixNano: "1000", timeUnixNano: "8100", asInt: "179"} + - {attributes: [{key: "2afe", value: {stringValue: "22f1"}}], startTimeUnixNano: "1000", timeUnixNano: "10100", asInt: "250"} + unit: "5679" + scope: + attributes: + - {key: "14cf", value: {stringValue: "64ea"}} + - {key: "67ef", value: {stringValue: "4299"}} + name: 58a7 + version: 1cd0 From 29d0174a544b655647c47ffcc70cbb505c8becf9 Mon Sep 17 00:00:00 2001 From: Alex Boten <223565+codeboten@users.noreply.github.com> Date: Fri, 18 Oct 2024 09:18:14 -0700 Subject: [PATCH 12/12] [chore] update test to use Contains (#35875) This follows the recommendations from the new lint version Signed-off-by: Alex Boten <223565+codeboten@users.noreply.github.com> --- receiver/snmpreceiver/client_test.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/receiver/snmpreceiver/client_test.go b/receiver/snmpreceiver/client_test.go index 8cfb26ad1f09..5bc38d09ad29 100644 --- a/receiver/snmpreceiver/client_test.go +++ b/receiver/snmpreceiver/client_test.go @@ -8,7 +8,6 @@ import ( "fmt" "math" "strconv" - "strings" "testing" "github.com/gosnmp/gosnmp" @@ -83,9 +82,9 @@ func TestNewClient(t *testing.T) { func compareConfigToClient(t *testing.T, client *snmpClient, cfg *Config) { t.Helper() - require.True(t, strings.Contains(cfg.Endpoint, client.client.GetTarget())) - require.True(t, strings.Contains(cfg.Endpoint, strconv.FormatInt(int64(client.client.GetPort()), 10))) - require.True(t, strings.Contains(cfg.Endpoint, client.client.GetTransport())) + require.Contains(t, cfg.Endpoint, client.client.GetTarget()) + require.Contains(t, cfg.Endpoint, strconv.FormatInt(int64(client.client.GetPort()), 10)) + require.Contains(t, cfg.Endpoint, client.client.GetTransport()) switch cfg.Version { case "v1": require.Equal(t, gosnmp.Version1, client.client.GetVersion())