diff --git a/.chloggen/metricsgeneration_match_attrs.yaml b/.chloggen/metricsgeneration_match_attrs.yaml new file mode 100644 index 000000000000..c3b844844b95 --- /dev/null +++ b/.chloggen/metricsgeneration_match_attrs.yaml @@ -0,0 +1,28 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: metricsgenerationprocessor + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Introduce functionality to only do metric calculations on data points whose attributes match + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [35425] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: | + This functionality can be enabled by the `metricsgeneration.MatchAttributes` feature gate, which is disabled by default. + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/processor/metricsgenerationprocessor/README.md b/processor/metricsgenerationprocessor/README.md index 42498a5ca312..13203d06c107 100644 --- a/processor/metricsgenerationprocessor/README.md +++ b/processor/metricsgenerationprocessor/README.md @@ -16,13 +16,24 @@ ## Description -The metrics generation processor (`experimental_metricsgenerationprocessor`) can be used to create new metrics using existing metrics following a given rule. This processor currently supports the following two approaches for creating a new metric. +The metrics generation processor (`experimental_metricsgenerationprocessor`) can be used to create new metrics using existing metrics following a given rule. This processor currently supports the following two rule types for creating a new metric. -1. It can create a new metric from two existing metrics by applying one of the following arithmetic operations: add, subtract, multiply, divide, or percent. One use case is to calculate the `pod.memory.utilization` metric like the following equation- +1. `calculate`: It can create a new metric from two existing metrics by applying one of the following arithmetic operations: add, subtract, multiply, divide, or percent. One use case is to calculate the `pod.memory.utilization` metric like the following equation- `pod.memory.utilization` = (`pod.memory.usage.bytes` / `node.memory.limit`) -1. It can create a new metric by scaling the value of an existing metric with a given constant number. One use case is to convert `pod.memory.usage` metric values from Megabytes to Bytes (multiply the existing metric's value by 1,048,576) +1. `scale`: It can create a new metric by scaling the value of an existing metric with a given constant number. One use case is to convert `pod.memory.usage` metric values from Megabytes to Bytes (multiply the existing metric's value by 1,048,576) -Note: The created metric's type is inherited from the metric configured as `metric1`. +## `calculate` Rule Functionality + +There are some specific behaviors of the `calculate` metric generation rule that users may want to be aware of: + +- The created metric will have the same type as the metric configured as `metric1`. +- If no valid data points are calculated for the metric being created, it will not be created. + This ensures the processor is not emitting new metrics that are empty. +- Users may want to have metric calculations done on data points whose overlapping attributes match. To enable this + behavior, please enable the feature gate `metricsgeneration.MatchAttributes`. This feature gate is disabled + by default, meaning the value used for `metric2` during the calculations is simply the first data point's value. + Refer to [documentation](https://github.com/open-telemetry/opentelemetry-collector/blob/main/featuregate/README.md) + for more information on how to enable and disable feature gates. ## Configuration diff --git a/processor/metricsgenerationprocessor/go.mod b/processor/metricsgenerationprocessor/go.mod index 3e5da49fa0d7..0dc2cfa6aa14 100644 --- a/processor/metricsgenerationprocessor/go.mod +++ b/processor/metricsgenerationprocessor/go.mod @@ -10,6 +10,7 @@ require ( go.opentelemetry.io/collector/confmap v1.17.1-0.20241008154146-ea48c09c31ae go.opentelemetry.io/collector/consumer v0.111.1-0.20241008154146-ea48c09c31ae go.opentelemetry.io/collector/consumer/consumertest v0.111.1-0.20241008154146-ea48c09c31ae + go.opentelemetry.io/collector/featuregate v1.17.1-0.20241008154146-ea48c09c31ae go.opentelemetry.io/collector/pdata v1.17.1-0.20241008154146-ea48c09c31ae go.opentelemetry.io/collector/processor v0.111.1-0.20241008154146-ea48c09c31ae go.uber.org/goleak v1.3.0 @@ -24,6 +25,7 @@ require ( github.com/go-viper/mapstructure/v2 v2.1.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/google/uuid v1.6.0 // indirect + github.com/hashicorp/go-version v1.7.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/knadh/koanf/maps v0.1.1 // indirect github.com/knadh/koanf/providers/confmap v0.1.0 // indirect diff --git a/processor/metricsgenerationprocessor/go.sum b/processor/metricsgenerationprocessor/go.sum index bc841962267c..6000616b83e4 100644 --- a/processor/metricsgenerationprocessor/go.sum +++ b/processor/metricsgenerationprocessor/go.sum @@ -17,6 +17,8 @@ github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= +github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= @@ -64,6 +66,8 @@ go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.1-0.2024100815414 go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.1-0.20241008154146-ea48c09c31ae/go.mod h1:GK0QMMiRBWl4IhIF/7ZKgzBlR9SdRSpRlqyNInN4ZoU= go.opentelemetry.io/collector/consumer/consumertest v0.111.1-0.20241008154146-ea48c09c31ae h1:HFj6D19fJYm3KV8QidQmMApmLjzoNkzh8El5OkTGySo= go.opentelemetry.io/collector/consumer/consumertest v0.111.1-0.20241008154146-ea48c09c31ae/go.mod h1:UDZRrSgaFAwWO6I34fj0KjabVAuBCAnmizsleyIe3I4= +go.opentelemetry.io/collector/featuregate v1.17.1-0.20241008154146-ea48c09c31ae h1:pUq/CeF1eZVO6rjwYqMniDmTyYByqQTWfsyt3m1lmD0= +go.opentelemetry.io/collector/featuregate v1.17.1-0.20241008154146-ea48c09c31ae/go.mod h1:47xrISO71vJ83LSMm8+yIDsUbKktUp48Ovt7RR6VbRs= go.opentelemetry.io/collector/internal/globalsignal v0.111.1-0.20241008154146-ea48c09c31ae h1:fublc0EO06p79/OWw2jWVPSPNBMiBcB+0QpLes587DU= go.opentelemetry.io/collector/internal/globalsignal v0.111.1-0.20241008154146-ea48c09c31ae/go.mod h1:GqMXodPWOxK5uqpX8MaMXC2389y2XJTa5nPwf8FYDK8= go.opentelemetry.io/collector/pdata v1.17.1-0.20241008154146-ea48c09c31ae h1:PcwZe1RD8tC4SZExhf0f5HqK+ZuWGsowHaBBU4PiUv0= diff --git a/processor/metricsgenerationprocessor/processor.go b/processor/metricsgenerationprocessor/processor.go index a058533af1e1..425bef0bf70d 100644 --- a/processor/metricsgenerationprocessor/processor.go +++ b/processor/metricsgenerationprocessor/processor.go @@ -5,12 +5,22 @@ package metricsgenerationprocessor // import "github.com/open-telemetry/opentele import ( "context" + "fmt" "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/featuregate" "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" ) +var matchAttributes = featuregate.GlobalRegistry().MustRegister( + "metricsgeneration.MatchAttributes", + featuregate.StageAlpha, + featuregate.WithRegisterDescription("When enabled, the metric calculations will only be done between data points whose attributes match."), + featuregate.WithRegisterReferenceURL("https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/35425"), + featuregate.WithRegisterFromVersion("v0.112.0"), +) + type metricsGenerationProcessor struct { rules []internalRule logger *zap.Logger @@ -47,25 +57,41 @@ func (mgp *metricsGenerationProcessor) processMetrics(_ context.Context, md pmet nameToMetricMap := getNameToMetricMap(rm) for _, rule := range mgp.rules { - operand2 := float64(0) _, ok := nameToMetricMap[rule.metric1] if !ok { mgp.logger.Debug("Missing first metric", zap.String("metric_name", rule.metric1)) continue } - if rule.ruleType == string(calculate) { + switch rule.ruleType { + case string(calculate): + // Operation type is validated during config validation, but this adds extra validation as a safety net + ot := OperationType(rule.operation) + if !ot.isValid() { + mgp.logger.Debug(fmt.Sprintf("Invalid operation type '%s' specified for rule: %s. This rule is skipped.", rule.operation, rule.name)) + continue + } + metric2, ok := nameToMetricMap[rule.metric2] if !ok { mgp.logger.Debug("Missing second metric", zap.String("metric_name", rule.metric2)) continue } - operand2 = getMetricValue(metric2) - } else if rule.ruleType == string(scale) { - operand2 = rule.scaleBy + if matchAttributes.IsEnabled() { + generateCalculatedMetrics(rm, metric2, rule, mgp.logger) + } else { + // When matching metric attributes isn't required the value of the first data point of metric2 is + // used for all calculations. The resulting logic is the same as generating a new metric from + // a scalar. + generateScalarMetrics(rm, getMetricValue(metric2), rule, mgp.logger) + } + case string(scale): + generateScalarMetrics(rm, rule.scaleBy, rule, mgp.logger) + default: + mgp.logger.Error(fmt.Sprintf("Invalid rule type configured: '%s'. This rule is skipped.", rule.ruleType)) + continue } - generateMetrics(rm, operand2, rule, mgp.logger) } } return md, nil diff --git a/processor/metricsgenerationprocessor/processor_test.go b/processor/metricsgenerationprocessor/processor_test.go index 5f9f7f5e867f..7004f3b42977 100644 --- a/processor/metricsgenerationprocessor/processor_test.go +++ b/processor/metricsgenerationprocessor/processor_test.go @@ -14,6 +14,7 @@ import ( "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/confmap/confmaptest" "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/featuregate" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/processor/processortest" @@ -392,130 +393,173 @@ func getOutputForIntGaugeTest() pmetric.Metrics { } type goldenTestCases struct { - name string - testDir string + name string + testDir string + matchAttributesFlagEnabled bool } func TestGoldenFileMetrics(t *testing.T) { // Test description by test data directory: - // input_metric_types: These tests are to ensure calculations can be done on both sums and gauges - // result_metric_types: These tests are to ensure the created metric's type is correct - // metric2_zero_value: These tests are to ensure metrics are created properly when the second metric's (metric2) - // value is 0. + // input_metric_types: These tests are to ensure calculations can be done on both sums and gauges + // result_metric_types: These tests are to ensure the created metric's type is correct + // metric2_zero_value: These tests are to ensure metrics are created properly when the second metric's (metric2) + // value is 0. + // match_attributes: These tests are to ensure the correct data points are generated when the + // match attributes feature gate is enabled. testCaseNames := []goldenTestCases{ { - name: "sum_gauge_metric", - testDir: "input_metric_types", + // Keep this test case to show that existing behavior has remained unchanged when + // feature gate is disabled. + name: "sum_gauge_metric", + testDir: "input_metric_types", + matchAttributesFlagEnabled: false, }, { - name: "add_sum_sum", - testDir: "result_metric_types", + name: "sum_gauge_metric_match_attrs", + testDir: "input_metric_types", + matchAttributesFlagEnabled: true, }, { - name: "add_gauge_gauge", - testDir: "result_metric_types", + name: "add_sum_sum", + testDir: "result_metric_types", + matchAttributesFlagEnabled: true, }, { - name: "add_gauge_sum", - testDir: "result_metric_types", + name: "add_gauge_gauge", + testDir: "result_metric_types", + matchAttributesFlagEnabled: true, }, { - name: "add_sum_gauge", - testDir: "result_metric_types", + name: "add_gauge_sum", + testDir: "result_metric_types", + matchAttributesFlagEnabled: true, }, { - name: "multiply_gauge_sum", - testDir: "result_metric_types", + name: "add_sum_gauge", + testDir: "result_metric_types", + matchAttributesFlagEnabled: true, }, { - name: "multiply_sum_gauge", - testDir: "result_metric_types", + name: "multiply_gauge_sum", + testDir: "result_metric_types", + matchAttributesFlagEnabled: true, }, { - name: "divide_gauge_sum", - testDir: "result_metric_types", + name: "multiply_sum_gauge", + testDir: "result_metric_types", + matchAttributesFlagEnabled: true, }, { - name: "divide_sum_gauge", - testDir: "result_metric_types", + name: "divide_gauge_sum", + testDir: "result_metric_types", + matchAttributesFlagEnabled: true, }, { - name: "subtract_gauge_sum", - testDir: "result_metric_types", + name: "divide_sum_gauge", + testDir: "result_metric_types", + matchAttributesFlagEnabled: true, }, { - name: "subtract_sum_gauge", - testDir: "result_metric_types", + name: "subtract_gauge_sum", + testDir: "result_metric_types", + matchAttributesFlagEnabled: true, }, { - name: "percent_sum_gauge", - testDir: "result_metric_types", + name: "subtract_sum_gauge", + testDir: "result_metric_types", + matchAttributesFlagEnabled: true, }, { - name: "percent_gauge_sum", - testDir: "result_metric_types", + name: "percent_sum_gauge", + testDir: "result_metric_types", + matchAttributesFlagEnabled: true, }, { - name: "metric2_zero_add", - testDir: "metric2_zero_value", + name: "percent_gauge_sum", + testDir: "result_metric_types", + matchAttributesFlagEnabled: true, }, { - name: "metric2_zero_subtract", - testDir: "metric2_zero_value", + name: "metric2_zero_add", + testDir: "metric2_zero_value", + matchAttributesFlagEnabled: true, }, { - name: "metric2_zero_multiply", - testDir: "metric2_zero_value", + name: "metric2_zero_subtract", + testDir: "metric2_zero_value", + matchAttributesFlagEnabled: true, }, { - name: "metric2_zero_divide", - testDir: "metric2_zero_value", + name: "metric2_zero_multiply", + testDir: "metric2_zero_value", + matchAttributesFlagEnabled: true, }, { - name: "metric2_zero_percent", - testDir: "metric2_zero_value", + name: "metric2_zero_divide", + testDir: "metric2_zero_value", + matchAttributesFlagEnabled: true, + }, + { + name: "metric2_zero_percent", + testDir: "metric2_zero_value", + matchAttributesFlagEnabled: true, + }, + { + name: "match_attributes_disabled", + testDir: "match_attributes", + matchAttributesFlagEnabled: false, + }, + { + name: "match_attributes_enabled", + testDir: "match_attributes", + matchAttributesFlagEnabled: true, }, } for _, testCase := range testCaseNames { - cm, err := confmaptest.LoadConf(filepath.Join("testdata", testCase.testDir, "config.yaml")) - assert.NoError(t, err) - - next := new(consumertest.MetricsSink) - factory := NewFactory() - cfg := factory.CreateDefaultConfig() - - sub, err := cm.Sub(fmt.Sprintf("%s/%s", "experimental_metricsgeneration", testCase.name)) - require.NoError(t, err) - require.NoError(t, sub.Unmarshal(cfg)) - - mgp, err := factory.CreateMetrics( - context.Background(), - processortest.NewNopSettings(), - cfg, - next, - ) - assert.NotNil(t, mgp) - assert.NoError(t, err) - - assert.True(t, mgp.Capabilities().MutatesData) - require.NoError(t, mgp.Start(context.Background(), nil)) - - inputMetrics, err := golden.ReadMetrics(filepath.Join("testdata", testCase.testDir, "metrics_input.yaml")) - assert.NoError(t, err) - - err = mgp.ConsumeMetrics(context.Background(), inputMetrics) - assert.NoError(t, err) - - got := next.AllMetrics() - // golden.WriteMetrics(t, filepath.Join("testdata", testCase.testDir, fmt.Sprintf("%s_%s", testCase.name, "expected.yaml")), got[0]) - expected, err := golden.ReadMetrics(filepath.Join("testdata", testCase.testDir, fmt.Sprintf("%s_%s", testCase.name, "expected.yaml"))) - assert.NoError(t, err) - assert.Len(t, got, 1) - err = pmetrictest.CompareMetrics(expected, got[0], - pmetrictest.IgnoreMetricDataPointsOrder(), - pmetrictest.IgnoreStartTimestamp(), - pmetrictest.IgnoreTimestamp()) - assert.NoError(t, err) + t.Run(testCase.name, func(t *testing.T) { + require.NoError(t, featuregate.GlobalRegistry().Set(matchAttributes.ID(), testCase.matchAttributesFlagEnabled)) + + cm, err := confmaptest.LoadConf(filepath.Join("testdata", testCase.testDir, "config.yaml")) + assert.NoError(t, err) + + next := new(consumertest.MetricsSink) + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + + sub, err := cm.Sub(fmt.Sprintf("%s/%s", "experimental_metricsgeneration", testCase.name)) + require.NoError(t, err) + require.NoError(t, sub.Unmarshal(cfg)) + + mgp, err := factory.CreateMetrics( + context.Background(), + processortest.NewNopSettings(), + cfg, + next, + ) + assert.NotNil(t, mgp) + assert.NoError(t, err) + + assert.True(t, mgp.Capabilities().MutatesData) + require.NoError(t, mgp.Start(context.Background(), nil)) + + inputMetrics, err := golden.ReadMetrics(filepath.Join("testdata", testCase.testDir, "metrics_input.yaml")) + assert.NoError(t, err) + + err = mgp.ConsumeMetrics(context.Background(), inputMetrics) + assert.NoError(t, err) + + got := next.AllMetrics() + expectedFilePath := filepath.Join("testdata", testCase.testDir, fmt.Sprintf("%s_%s", testCase.name, "expected.yaml")) + // golden.WriteMetrics(t, expectedFilePath, got[0]) + expected, err := golden.ReadMetrics(expectedFilePath) + assert.NoError(t, err) + assert.Len(t, got, 1) + err = pmetrictest.CompareMetrics(expected, got[0], + pmetrictest.IgnoreMetricDataPointsOrder(), + pmetrictest.IgnoreStartTimestamp(), + pmetrictest.IgnoreTimestamp()) + assert.NoError(t, err) + }) } } diff --git a/processor/metricsgenerationprocessor/testdata/input_metric_types/config.yaml b/processor/metricsgenerationprocessor/testdata/input_metric_types/config.yaml index fad7d6fda678..8bfd66adc7e2 100644 --- a/processor/metricsgenerationprocessor/testdata/input_metric_types/config.yaml +++ b/processor/metricsgenerationprocessor/testdata/input_metric_types/config.yaml @@ -6,3 +6,11 @@ experimental_metricsgeneration/sum_gauge_metric: metric1: system.filesystem.usage metric2: system.filesystem.utilization operation: divide +experimental_metricsgeneration/sum_gauge_metric_match_attrs: + rules: + - name: system.filesystem.capacity + unit: bytes + type: calculate + metric1: system.filesystem.usage + metric2: system.filesystem.utilization + operation: divide diff --git a/processor/metricsgenerationprocessor/testdata/input_metric_types/sum_gauge_metric_match_attrs_expected.yaml b/processor/metricsgenerationprocessor/testdata/input_metric_types/sum_gauge_metric_match_attrs_expected.yaml new file mode 100644 index 000000000000..fcab25f3aa2e --- /dev/null +++ b/processor/metricsgenerationprocessor/testdata/input_metric_types/sum_gauge_metric_match_attrs_expected.yaml @@ -0,0 +1,1148 @@ +resourceMetrics: + - resource: {} + schemaUrl: https://opentelemetry.io/schemas/1.9.0 + scopeMetrics: + - metrics: + - description: FileSystem inodes used. + name: system.filesystem.inodes.usage + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "2183953600" + attributes: + - key: device + value: + stringValue: /dev/disk1s1 + - key: mode + value: + stringValue: rw + - key: mountpoint + value: + stringValue: /System/Volumes/Data + - key: state + value: + stringValue: free + - key: type + value: + stringValue: apfs + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "4770142" + attributes: + - key: device + value: + stringValue: /dev/disk1s1 + - key: mode + value: + stringValue: rw + - key: mountpoint + value: + stringValue: /System/Volumes/Data + - key: state + value: + stringValue: used + - key: type + value: + stringValue: apfs + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "2183953600" + attributes: + - key: device + value: + stringValue: /dev/disk1s2 + - key: mode + value: + stringValue: rw + - key: mountpoint + value: + stringValue: /System/Volumes/Preboot + - key: state + value: + stringValue: free + - key: type + value: + stringValue: apfs + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "1813" + attributes: + - key: device + value: + stringValue: /dev/disk1s2 + - key: mode + value: + stringValue: rw + - key: mountpoint + value: + stringValue: /System/Volumes/Preboot + - key: state + value: + stringValue: used + - key: type + value: + stringValue: apfs + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "2183953600" + attributes: + - key: device + value: + stringValue: /dev/disk1s4s1 + - key: mode + value: + stringValue: ro + - key: mountpoint + value: + stringValue: / + - key: state + value: + stringValue: free + - key: type + value: + stringValue: apfs + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "404475" + attributes: + - key: device + value: + stringValue: /dev/disk1s4s1 + - key: mode + value: + stringValue: ro + - key: mountpoint + value: + stringValue: / + - key: state + value: + stringValue: used + - key: type + value: + stringValue: apfs + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "2183953600" + attributes: + - key: device + value: + stringValue: /dev/disk1s5 + - key: mode + value: + stringValue: rw + - key: mountpoint + value: + stringValue: /System/Volumes/Update + - key: state + value: + stringValue: free + - key: type + value: + stringValue: apfs + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "24" + attributes: + - key: device + value: + stringValue: /dev/disk1s5 + - key: mode + value: + stringValue: rw + - key: mountpoint + value: + stringValue: /System/Volumes/Update + - key: state + value: + stringValue: used + - key: type + value: + stringValue: apfs + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "2183953600" + attributes: + - key: device + value: + stringValue: /dev/disk1s6 + - key: mode + value: + stringValue: rw + - key: mountpoint + value: + stringValue: /System/Volumes/VM + - key: state + value: + stringValue: free + - key: type + value: + stringValue: apfs + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "4" + attributes: + - key: device + value: + stringValue: /dev/disk1s6 + - key: mode + value: + stringValue: rw + - key: mountpoint + value: + stringValue: /System/Volumes/VM + - key: state + value: + stringValue: used + - key: type + value: + stringValue: apfs + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: device + value: + stringValue: devfs + - key: mode + value: + stringValue: rw + - key: mountpoint + value: + stringValue: /dev + - key: state + value: + stringValue: free + - key: type + value: + stringValue: devfs + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "666" + attributes: + - key: device + value: + stringValue: devfs + - key: mode + value: + stringValue: rw + - key: mountpoint + value: + stringValue: /dev + - key: state + value: + stringValue: used + - key: type + value: + stringValue: devfs + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: device + value: + stringValue: map auto_home + - key: mode + value: + stringValue: rw + - key: mountpoint + value: + stringValue: /System/Volumes/Data/home + - key: state + value: + stringValue: free + - key: type + value: + stringValue: autofs + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: device + value: + stringValue: map auto_home + - key: mode + value: + stringValue: rw + - key: mountpoint + value: + stringValue: /System/Volumes/Data/home + - key: state + value: + stringValue: used + - key: type + value: + stringValue: autofs + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{inodes}' + - description: Filesystem bytes used. + name: system.filesystem.usage + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "223636848640" + attributes: + - key: device + value: + stringValue: /dev/disk1s1 + - key: mode + value: + stringValue: rw + - key: mountpoint + value: + stringValue: /System/Volumes/Data + - key: state + value: + stringValue: free + - key: type + value: + stringValue: apfs + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: device + value: + stringValue: /dev/disk1s1 + - key: mode + value: + stringValue: rw + - key: mountpoint + value: + stringValue: /System/Volumes/Data + - key: state + value: + stringValue: reserved + - key: type + value: + stringValue: apfs + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "276326326272" + attributes: + - key: device + value: + stringValue: /dev/disk1s1 + - key: mode + value: + stringValue: rw + - key: mountpoint + value: + stringValue: /System/Volumes/Data + - key: state + value: + stringValue: used + - key: type + value: + stringValue: apfs + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "223636848640" + attributes: + - key: device + value: + stringValue: /dev/disk1s2 + - key: mode + value: + stringValue: rw + - key: mountpoint + value: + stringValue: /System/Volumes/Preboot + - key: state + value: + stringValue: free + - key: type + value: + stringValue: apfs + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: device + value: + stringValue: /dev/disk1s2 + - key: mode + value: + stringValue: rw + - key: mountpoint + value: + stringValue: /System/Volumes/Preboot + - key: state + value: + stringValue: reserved + - key: type + value: + stringValue: apfs + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "276326326272" + attributes: + - key: device + value: + stringValue: /dev/disk1s2 + - key: mode + value: + stringValue: rw + - key: mountpoint + value: + stringValue: /System/Volumes/Preboot + - key: state + value: + stringValue: used + - key: type + value: + stringValue: apfs + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "223636848640" + attributes: + - key: device + value: + stringValue: /dev/disk1s4s1 + - key: mode + value: + stringValue: ro + - key: mountpoint + value: + stringValue: / + - key: state + value: + stringValue: free + - key: type + value: + stringValue: apfs + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: device + value: + stringValue: /dev/disk1s4s1 + - key: mode + value: + stringValue: ro + - key: mountpoint + value: + stringValue: / + - key: state + value: + stringValue: reserved + - key: type + value: + stringValue: apfs + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "276326326272" + attributes: + - key: device + value: + stringValue: /dev/disk1s4s1 + - key: mode + value: + stringValue: ro + - key: mountpoint + value: + stringValue: / + - key: state + value: + stringValue: used + - key: type + value: + stringValue: apfs + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "223636848640" + attributes: + - key: device + value: + stringValue: /dev/disk1s5 + - key: mode + value: + stringValue: rw + - key: mountpoint + value: + stringValue: /System/Volumes/Update + - key: state + value: + stringValue: free + - key: type + value: + stringValue: apfs + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: device + value: + stringValue: /dev/disk1s5 + - key: mode + value: + stringValue: rw + - key: mountpoint + value: + stringValue: /System/Volumes/Update + - key: state + value: + stringValue: reserved + - key: type + value: + stringValue: apfs + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "276326326272" + attributes: + - key: device + value: + stringValue: /dev/disk1s5 + - key: mode + value: + stringValue: rw + - key: mountpoint + value: + stringValue: /System/Volumes/Update + - key: state + value: + stringValue: used + - key: type + value: + stringValue: apfs + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "223636848640" + attributes: + - key: device + value: + stringValue: /dev/disk1s6 + - key: mode + value: + stringValue: rw + - key: mountpoint + value: + stringValue: /System/Volumes/VM + - key: state + value: + stringValue: free + - key: type + value: + stringValue: apfs + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: device + value: + stringValue: /dev/disk1s6 + - key: mode + value: + stringValue: rw + - key: mountpoint + value: + stringValue: /System/Volumes/VM + - key: state + value: + stringValue: reserved + - key: type + value: + stringValue: apfs + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "276326326272" + attributes: + - key: device + value: + stringValue: /dev/disk1s6 + - key: mode + value: + stringValue: rw + - key: mountpoint + value: + stringValue: /System/Volumes/VM + - key: state + value: + stringValue: used + - key: type + value: + stringValue: apfs + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: device + value: + stringValue: devfs + - key: mode + value: + stringValue: rw + - key: mountpoint + value: + stringValue: /dev + - key: state + value: + stringValue: free + - key: type + value: + stringValue: devfs + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: device + value: + stringValue: devfs + - key: mode + value: + stringValue: rw + - key: mountpoint + value: + stringValue: /dev + - key: state + value: + stringValue: reserved + - key: type + value: + stringValue: devfs + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "197120" + attributes: + - key: device + value: + stringValue: devfs + - key: mode + value: + stringValue: rw + - key: mountpoint + value: + stringValue: /dev + - key: state + value: + stringValue: used + - key: type + value: + stringValue: devfs + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: device + value: + stringValue: map auto_home + - key: mode + value: + stringValue: rw + - key: mountpoint + value: + stringValue: /System/Volumes/Data/home + - key: state + value: + stringValue: free + - key: type + value: + stringValue: autofs + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: device + value: + stringValue: map auto_home + - key: mode + value: + stringValue: rw + - key: mountpoint + value: + stringValue: /System/Volumes/Data/home + - key: state + value: + stringValue: reserved + - key: type + value: + stringValue: autofs + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: device + value: + stringValue: map auto_home + - key: mode + value: + stringValue: rw + - key: mountpoint + value: + stringValue: /System/Volumes/Data/home + - key: state + value: + stringValue: used + - key: type + value: + stringValue: autofs + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Fraction of filesystem bytes used. + gauge: + dataPoints: + - asDouble: 0.5526933585071281 + attributes: + - key: device + value: + stringValue: /dev/disk1s1 + - key: mode + value: + stringValue: rw + - key: mountpoint + value: + stringValue: /System/Volumes/Data + - key: type + value: + stringValue: apfs + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asDouble: 0.5526933585071281 + attributes: + - key: device + value: + stringValue: /dev/disk1s2 + - key: mode + value: + stringValue: rw + - key: mountpoint + value: + stringValue: /System/Volumes/Preboot + - key: type + value: + stringValue: apfs + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asDouble: 0.5526933585071281 + attributes: + - key: device + value: + stringValue: /dev/disk1s4s1 + - key: mode + value: + stringValue: ro + - key: mountpoint + value: + stringValue: / + - key: type + value: + stringValue: apfs + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asDouble: 0.5526933585071281 + attributes: + - key: device + value: + stringValue: /dev/disk1s5 + - key: mode + value: + stringValue: rw + - key: mountpoint + value: + stringValue: /System/Volumes/Update + - key: type + value: + stringValue: apfs + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asDouble: 0.5526933585071281 + attributes: + - key: device + value: + stringValue: /dev/disk1s6 + - key: mode + value: + stringValue: rw + - key: mountpoint + value: + stringValue: /System/Volumes/VM + - key: type + value: + stringValue: apfs + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asDouble: 1 + attributes: + - key: device + value: + stringValue: devfs + - key: mode + value: + stringValue: rw + - key: mountpoint + value: + stringValue: /dev + - key: type + value: + stringValue: devfs + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asDouble: 0 + attributes: + - key: device + value: + stringValue: map auto_home + - key: mode + value: + stringValue: rw + - key: mountpoint + value: + stringValue: /System/Volumes/Data/home + - key: type + value: + stringValue: autofs + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: system.filesystem.utilization + unit: "1" + - name: system.filesystem.capacity + sum: + dataPoints: + - asDouble: 4.046309679639759e+11 + attributes: + - key: device + value: + stringValue: /dev/disk1s1 + - key: mode + value: + stringValue: rw + - key: mountpoint + value: + stringValue: /System/Volumes/Data + - key: state + value: + stringValue: free + - key: type + value: + stringValue: apfs + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asDouble: 0 + attributes: + - key: device + value: + stringValue: /dev/disk1s1 + - key: mode + value: + stringValue: rw + - key: mountpoint + value: + stringValue: /System/Volumes/Data + - key: state + value: + stringValue: reserved + - key: type + value: + stringValue: apfs + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asDouble: 4.99963174912e+11 + attributes: + - key: device + value: + stringValue: /dev/disk1s1 + - key: mode + value: + stringValue: rw + - key: mountpoint + value: + stringValue: /System/Volumes/Data + - key: state + value: + stringValue: used + - key: type + value: + stringValue: apfs + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asDouble: 4.046309679639759e+11 + attributes: + - key: device + value: + stringValue: /dev/disk1s2 + - key: mode + value: + stringValue: rw + - key: mountpoint + value: + stringValue: /System/Volumes/Preboot + - key: state + value: + stringValue: free + - key: type + value: + stringValue: apfs + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asDouble: 0 + attributes: + - key: device + value: + stringValue: /dev/disk1s2 + - key: mode + value: + stringValue: rw + - key: mountpoint + value: + stringValue: /System/Volumes/Preboot + - key: state + value: + stringValue: reserved + - key: type + value: + stringValue: apfs + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asDouble: 4.99963174912e+11 + attributes: + - key: device + value: + stringValue: /dev/disk1s2 + - key: mode + value: + stringValue: rw + - key: mountpoint + value: + stringValue: /System/Volumes/Preboot + - key: state + value: + stringValue: used + - key: type + value: + stringValue: apfs + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asDouble: 4.046309679639759e+11 + attributes: + - key: device + value: + stringValue: /dev/disk1s4s1 + - key: mode + value: + stringValue: ro + - key: mountpoint + value: + stringValue: / + - key: state + value: + stringValue: free + - key: type + value: + stringValue: apfs + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asDouble: 0 + attributes: + - key: device + value: + stringValue: /dev/disk1s4s1 + - key: mode + value: + stringValue: ro + - key: mountpoint + value: + stringValue: / + - key: state + value: + stringValue: reserved + - key: type + value: + stringValue: apfs + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asDouble: 4.99963174912e+11 + attributes: + - key: device + value: + stringValue: /dev/disk1s4s1 + - key: mode + value: + stringValue: ro + - key: mountpoint + value: + stringValue: / + - key: state + value: + stringValue: used + - key: type + value: + stringValue: apfs + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asDouble: 4.046309679639759e+11 + attributes: + - key: device + value: + stringValue: /dev/disk1s5 + - key: mode + value: + stringValue: rw + - key: mountpoint + value: + stringValue: /System/Volumes/Update + - key: state + value: + stringValue: free + - key: type + value: + stringValue: apfs + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asDouble: 0 + attributes: + - key: device + value: + stringValue: /dev/disk1s5 + - key: mode + value: + stringValue: rw + - key: mountpoint + value: + stringValue: /System/Volumes/Update + - key: state + value: + stringValue: reserved + - key: type + value: + stringValue: apfs + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asDouble: 4.99963174912e+11 + attributes: + - key: device + value: + stringValue: /dev/disk1s5 + - key: mode + value: + stringValue: rw + - key: mountpoint + value: + stringValue: /System/Volumes/Update + - key: state + value: + stringValue: used + - key: type + value: + stringValue: apfs + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asDouble: 4.046309679639759e+11 + attributes: + - key: device + value: + stringValue: /dev/disk1s6 + - key: mode + value: + stringValue: rw + - key: mountpoint + value: + stringValue: /System/Volumes/VM + - key: state + value: + stringValue: free + - key: type + value: + stringValue: apfs + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asDouble: 0 + attributes: + - key: device + value: + stringValue: /dev/disk1s6 + - key: mode + value: + stringValue: rw + - key: mountpoint + value: + stringValue: /System/Volumes/VM + - key: state + value: + stringValue: reserved + - key: type + value: + stringValue: apfs + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asDouble: 4.99963174912e+11 + attributes: + - key: device + value: + stringValue: /dev/disk1s6 + - key: mode + value: + stringValue: rw + - key: mountpoint + value: + stringValue: /System/Volumes/VM + - key: state + value: + stringValue: used + - key: type + value: + stringValue: apfs + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asDouble: 0 + attributes: + - key: device + value: + stringValue: devfs + - key: mode + value: + stringValue: rw + - key: mountpoint + value: + stringValue: /dev + - key: state + value: + stringValue: free + - key: type + value: + stringValue: devfs + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asDouble: 0 + attributes: + - key: device + value: + stringValue: devfs + - key: mode + value: + stringValue: rw + - key: mountpoint + value: + stringValue: /dev + - key: state + value: + stringValue: reserved + - key: type + value: + stringValue: devfs + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asDouble: 197120 + attributes: + - key: device + value: + stringValue: devfs + - key: mode + value: + stringValue: rw + - key: mountpoint + value: + stringValue: /dev + - key: state + value: + stringValue: used + - key: type + value: + stringValue: devfs + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: bytes + scope: + name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper + version: 0.110.0-dev diff --git a/processor/metricsgenerationprocessor/testdata/match_attributes/config.yaml b/processor/metricsgenerationprocessor/testdata/match_attributes/config.yaml new file mode 100644 index 000000000000..d9a0a13c3696 --- /dev/null +++ b/processor/metricsgenerationprocessor/testdata/match_attributes/config.yaml @@ -0,0 +1,14 @@ +experimental_metricsgeneration/match_attributes_disabled: + rules: + - name: new_metric + metric1: capacity.total + metric2: capacity.used + operation: add + type: calculate +experimental_metricsgeneration/match_attributes_enabled: + rules: + - name: new_metric + metric1: capacity.total + metric2: capacity.used + operation: add + type: calculate diff --git a/processor/metricsgenerationprocessor/testdata/match_attributes/match_attributes_disabled_expected.yaml b/processor/metricsgenerationprocessor/testdata/match_attributes/match_attributes_disabled_expected.yaml new file mode 100644 index 000000000000..1e76f78ed0d7 --- /dev/null +++ b/processor/metricsgenerationprocessor/testdata/match_attributes/match_attributes_disabled_expected.yaml @@ -0,0 +1,121 @@ +resourceMetrics: + - resource: {} + schemaUrl: https://opentelemetry.io/schemas/1.9.0 + scopeMetrics: + - metrics: + - description: total capacity + name: capacity.total + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1000" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "2000" + attributes: + - key: device + value: + stringValue: /dev/disk1 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "3000" + attributes: + - key: device + value: + stringValue: /dev/disk2 + - key: encrypted + value: + boolValue: true + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: used capacity + gauge: + dataPoints: + - asInt: "10" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "20" + attributes: + - key: device + value: + stringValue: /dev/disk1 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "30" + attributes: + - key: device + value: + stringValue: /dev/disk4 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "50" + attributes: + - key: device + value: + stringValue: /dev/disk2 + - key: encrypted + value: + boolValue: false + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "40" + attributes: + - key: device + value: + stringValue: /dev/disk2 + - key: encrypted + value: + boolValue: true + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "60" + attributes: + - key: bool_test + value: + boolValue: true + - key: device + value: + stringValue: /dev/disk1 + - key: double_test + value: + doubleValue: 200 + - key: encrypted + value: + boolValue: true + - key: int_test + value: + intValue: "100" + - key: owner + value: + stringValue: foo + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: capacity.used + unit: By + - name: new_metric + sum: + dataPoints: + - asDouble: 1010 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asDouble: 2010 + attributes: + - key: device + value: + stringValue: /dev/disk1 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asDouble: 3010 + attributes: + - key: device + value: + stringValue: /dev/disk2 + - key: encrypted + value: + boolValue: true + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + scope: + name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper + version: latest diff --git a/processor/metricsgenerationprocessor/testdata/match_attributes/match_attributes_enabled_expected.yaml b/processor/metricsgenerationprocessor/testdata/match_attributes/match_attributes_enabled_expected.yaml new file mode 100644 index 000000000000..8fc488baa758 --- /dev/null +++ b/processor/metricsgenerationprocessor/testdata/match_attributes/match_attributes_enabled_expected.yaml @@ -0,0 +1,216 @@ +resourceMetrics: + - resource: {} + schemaUrl: https://opentelemetry.io/schemas/1.9.0 + scopeMetrics: + - metrics: + - description: total capacity + name: capacity.total + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1000" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "2000" + attributes: + - key: device + value: + stringValue: /dev/disk1 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "3000" + attributes: + - key: device + value: + stringValue: /dev/disk2 + - key: encrypted + value: + boolValue: true + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: used capacity + gauge: + dataPoints: + - asInt: "10" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "20" + attributes: + - key: device + value: + stringValue: /dev/disk1 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "30" + attributes: + - key: device + value: + stringValue: /dev/disk4 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "50" + attributes: + - key: device + value: + stringValue: /dev/disk2 + - key: encrypted + value: + boolValue: false + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "40" + attributes: + - key: device + value: + stringValue: /dev/disk2 + - key: encrypted + value: + boolValue: true + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "60" + attributes: + - key: bool_test + value: + boolValue: true + - key: device + value: + stringValue: /dev/disk1 + - key: double_test + value: + doubleValue: 200 + - key: encrypted + value: + boolValue: true + - key: int_test + value: + intValue: "100" + - key: owner + value: + stringValue: foo + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: capacity.used + unit: By + - name: new_metric + sum: + dataPoints: + - asDouble: 1010 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asDouble: 1020 + attributes: + - key: device + value: + stringValue: /dev/disk1 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asDouble: 2010 + attributes: + - key: device + value: + stringValue: /dev/disk1 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asDouble: 2020 + attributes: + - key: device + value: + stringValue: /dev/disk1 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asDouble: 1030 + attributes: + - key: device + value: + stringValue: /dev/disk4 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asDouble: 1050 + attributes: + - key: device + value: + stringValue: /dev/disk2 + - key: encrypted + value: + boolValue: false + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asDouble: 1040 + attributes: + - key: device + value: + stringValue: /dev/disk2 + - key: encrypted + value: + boolValue: true + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asDouble: 3010 + attributes: + - key: device + value: + stringValue: /dev/disk2 + - key: encrypted + value: + boolValue: true + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asDouble: 3040 + attributes: + - key: device + value: + stringValue: /dev/disk2 + - key: encrypted + value: + boolValue: true + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asDouble: 1060 + attributes: + - key: bool_test + value: + boolValue: true + - key: device + value: + stringValue: /dev/disk1 + - key: double_test + value: + doubleValue: 200 + - key: encrypted + value: + boolValue: true + - key: int_test + value: + intValue: "100" + - key: owner + value: + stringValue: foo + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asDouble: 2060 + attributes: + - key: bool_test + value: + boolValue: true + - key: device + value: + stringValue: /dev/disk1 + - key: double_test + value: + doubleValue: 200 + - key: encrypted + value: + boolValue: true + - key: int_test + value: + intValue: "100" + - key: owner + value: + stringValue: foo + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + scope: + name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper + version: latest diff --git a/processor/metricsgenerationprocessor/testdata/match_attributes/metrics_input.yaml b/processor/metricsgenerationprocessor/testdata/match_attributes/metrics_input.yaml new file mode 100644 index 000000000000..33fb6465dbcf --- /dev/null +++ b/processor/metricsgenerationprocessor/testdata/match_attributes/metrics_input.yaml @@ -0,0 +1,100 @@ +resourceMetrics: + - resource: {} + schemaUrl: https://opentelemetry.io/schemas/1.9.0 + scopeMetrics: + - metrics: + - description: total capacity + name: capacity.total + sum: + aggregationTemporality: 2 + dataPoints: + # No attributes to ensure it matches with everything + - asInt: "1000" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "2000" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: device + value: + stringValue: "/dev/disk1" + - asInt: "3000" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: device + value: + stringValue: "/dev/disk2" + - key: encrypted + value: + boolValue: true + unit: By + - description: used capacity + name: capacity.used + gauge: + aggregationTemporality: 2 + dataPoints: + - asInt: "10" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "20" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: device + value: + stringValue: "/dev/disk1" + - asInt: "30" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: device + value: + stringValue: "/dev/disk4" + - asInt: "40" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: device + value: + stringValue: "/dev/disk2" + - key: encrypted + value: + boolValue: true + - asInt: "50" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: device + value: + stringValue: "/dev/disk2" + - key: encrypted + value: + boolValue: false + - asInt: "60" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: device + value: + stringValue: "/dev/disk1" + - key: encrypted + value: + boolValue: true + - key: owner + value: + stringValue: "foo" + - key: bool_test + value: + boolValue: true + - key: int_test + value: + intValue: 100 + - key: double_test + value: + doubleValue: 200 + unit: By + scope: + name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper + version: latest diff --git a/processor/metricsgenerationprocessor/testdata/metric2_zero_value/config.yaml b/processor/metricsgenerationprocessor/testdata/metric2_zero_value/config.yaml index af80af4f110a..3148c6c1b79b 100644 --- a/processor/metricsgenerationprocessor/testdata/metric2_zero_value/config.yaml +++ b/processor/metricsgenerationprocessor/testdata/metric2_zero_value/config.yaml @@ -4,27 +4,32 @@ experimental_metricsgeneration/metric2_zero_add: metric1: capacity.total metric2: capacity.used operation: add + type: calculate experimental_metricsgeneration/metric2_zero_subtract: rules: - name: new_metric metric1: capacity.total metric2: capacity.used operation: subtract + type: calculate experimental_metricsgeneration/metric2_zero_multiply: rules: - name: new_metric metric1: capacity.total metric2: capacity.used operation: multiply + type: calculate experimental_metricsgeneration/metric2_zero_divide: rules: - name: new_metric metric1: capacity.total metric2: capacity.used operation: divide + type: calculate experimental_metricsgeneration/metric2_zero_percent: rules: - name: new_metric metric1: capacity.total metric2: capacity.used operation: percent + type: calculate diff --git a/processor/metricsgenerationprocessor/utils.go b/processor/metricsgenerationprocessor/utils.go index a47f98f223c8..72fab9ca7f6e 100644 --- a/processor/metricsgenerationprocessor/utils.go +++ b/processor/metricsgenerationprocessor/utils.go @@ -6,6 +6,7 @@ package metricsgenerationprocessor // import "github.com/open-telemetry/opentele import ( "fmt" + "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" ) @@ -50,36 +51,130 @@ func getMetricValue(metric pmetric.Metric) float64 { return 0 } -// generateMetrics creates a new metric based on the given rule and add it to the Resource Metric. +// generateCalculatedMetrics creates a new metric based on the given rule and adds it to the scope metric. // The value for newly calculated metrics is always a floating point number. -func generateMetrics(rm pmetric.ResourceMetrics, operand2 float64, rule internalRule, logger *zap.Logger) { +// Note: This method assumes the matchAttributes feature flag is enabled. +func generateCalculatedMetrics(rm pmetric.ResourceMetrics, metric2 pmetric.Metric, rule internalRule, logger *zap.Logger) { ilms := rm.ScopeMetrics() for i := 0; i < ilms.Len(); i++ { ilm := ilms.At(i) metricSlice := ilm.Metrics() for j := 0; j < metricSlice.Len(); j++ { metric := metricSlice.At(j) + if metric.Name() == rule.metric1 { - newMetric := generateMetric(metric, operand2, rule.operation, logger) - - dataPointCount := 0 - switch newMetric.Type() { - case pmetric.MetricTypeSum: - dataPointCount = newMetric.Sum().DataPoints().Len() - case pmetric.MetricTypeGauge: - dataPointCount = newMetric.Gauge().DataPoints().Len() - } + newMetric := generateMetricFromMatchingAttributes(metric, metric2, rule, logger) + appendNewMetric(ilm, newMetric, rule.name, rule.unit) + } + } + } +} + +// Calculates a new metric based on the calculation-type rule specified. New data points will be generated for each +// calculation of the input metrics where overlapping attributes have matching values. +func generateMetricFromMatchingAttributes(metric1 pmetric.Metric, metric2 pmetric.Metric, rule internalRule, logger *zap.Logger) pmetric.Metric { + var metric1DataPoints pmetric.NumberDataPointSlice + var toDataPoints pmetric.NumberDataPointSlice + to := pmetric.NewMetric() - // Only create a new metric if valid data points were calculated successfully - if dataPointCount > 0 { - appendMetric(ilm, newMetric, rule.name, rule.unit) + // Setup to metric and get metric1 data points + switch metricType := metric1.Type(); metricType { + case pmetric.MetricTypeGauge: + to.SetEmptyGauge() + metric1DataPoints = metric1.Gauge().DataPoints() + toDataPoints = to.Gauge().DataPoints() + case pmetric.MetricTypeSum: + to.SetEmptySum() + metric1DataPoints = metric1.Sum().DataPoints() + toDataPoints = to.Sum().DataPoints() + default: + logger.Debug(fmt.Sprintf("Calculations are only supported on gauge or sum metric types. Given metric '%s' is of type `%s`", metric1.Name(), metricType.String())) + return pmetric.NewMetric() + } + + // Get metric2 data points + var metric2DataPoints pmetric.NumberDataPointSlice + switch metricType := metric2.Type(); metricType { + case pmetric.MetricTypeGauge: + metric2DataPoints = metric2.Gauge().DataPoints() + case pmetric.MetricTypeSum: + metric2DataPoints = metric2.Sum().DataPoints() + default: + logger.Debug(fmt.Sprintf("Calculations are only supported on gauge or sum metric types. Given metric '%s' is of type `%s`", metric2.Name(), metricType.String())) + return pmetric.NewMetric() + } + + for i := 0; i < metric1DataPoints.Len(); i++ { + metric1DP := metric1DataPoints.At(i) + + for j := 0; j < metric2DataPoints.Len(); j++ { + metric2DP := metric2DataPoints.At(j) + if dataPointAttributesMatch(metric1DP, metric2DP) { + val, err := calculateValue(dataPointValue(metric1DP), dataPointValue(metric2DP), rule.operation, rule.name) + + if err != nil { + logger.Debug(err.Error()) + } else { + newDP := toDataPoints.AppendEmpty() + metric1DP.CopyTo(newDP) + newDP.SetDoubleValue(val) + + metric2DP.Attributes().Range(func(k string, v pcommon.Value) bool { + v.CopyTo(newDP.Attributes().PutEmpty(k)) + // Always return true to ensure iteration over all attributes + return true + }) } } } } + + return to } -func generateMetric(from pmetric.Metric, operand2 float64, operation string, logger *zap.Logger) pmetric.Metric { +func dataPointValue(dp pmetric.NumberDataPoint) float64 { + switch dp.ValueType() { + case pmetric.NumberDataPointValueTypeDouble: + return dp.DoubleValue() + case pmetric.NumberDataPointValueTypeInt: + return float64(dp.IntValue()) + default: + return 0 + } +} + +func dataPointAttributesMatch(dp1, dp2 pmetric.NumberDataPoint) bool { + attributesMatch := true + dp1.Attributes().Range(func(key string, dp1Val pcommon.Value) bool { + dp1Val.Type() + if dp2Val, keyExists := dp2.Attributes().Get(key); keyExists && dp1Val.AsRaw() != dp2Val.AsRaw() { + attributesMatch = false + return false + } + return true + }) + + return attributesMatch +} + +// generateScalarMetrics creates a new metric based on a scalar type rule and adds it to the scope metric. +// The value for newly calculated metrics is always a floating point number. +func generateScalarMetrics(rm pmetric.ResourceMetrics, operand2 float64, rule internalRule, logger *zap.Logger) { + ilms := rm.ScopeMetrics() + for i := 0; i < ilms.Len(); i++ { + ilm := ilms.At(i) + metricSlice := ilm.Metrics() + for j := 0; j < metricSlice.Len(); j++ { + metric := metricSlice.At(j) + if metric.Name() == rule.metric1 { + newMetric := generateMetricFromOperand(metric, operand2, rule.operation, logger) + appendNewMetric(ilm, newMetric, rule.name, rule.unit) + } + } + } +} + +func generateMetricFromOperand(from pmetric.Metric, operand2 float64, operation string, logger *zap.Logger) pmetric.Metric { var dataPoints pmetric.NumberDataPointSlice to := pmetric.NewMetric() @@ -126,15 +221,25 @@ func generateMetric(from pmetric.Metric, operand2 float64, operation string, log return to } -// Append the scope metrics with the new metric -func appendMetric(ilm pmetric.ScopeMetrics, newMetric pmetric.Metric, name, unit string) pmetric.Metric { - metric := ilm.Metrics().AppendEmpty() - newMetric.MoveTo(metric) +// Append the new metric to the scope metrics. This will only append the new metric if it +// has data points. +func appendNewMetric(ilm pmetric.ScopeMetrics, newMetric pmetric.Metric, name, unit string) { + dataPointCount := 0 + switch newMetric.Type() { + case pmetric.MetricTypeSum: + dataPointCount = newMetric.Sum().DataPoints().Len() + case pmetric.MetricTypeGauge: + dataPointCount = newMetric.Gauge().DataPoints().Len() + } - metric.SetUnit(unit) - metric.SetName(name) + // Only create a new metric if valid data points were calculated successfully + if dataPointCount > 0 { + metric := ilm.Metrics().AppendEmpty() + newMetric.MoveTo(metric) - return metric + metric.SetUnit(unit) + metric.SetName(name) + } } func calculateValue(operand1 float64, operand2 float64, operation string, metricName string) (float64, error) {