From dca11d494a895b5d603eafe2cf0130b1af72a64e Mon Sep 17 00:00:00 2001 From: Daniel Jaglowski Date: Fri, 25 Oct 2024 14:18:10 -0400 Subject: [PATCH 1/4] [connector/routing] Add ability to route log records based on OTTL log context (#35939) This PR resolves https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/35948 by adding a `context` field to routing table items. The default value of `context` is `resource`, so that existing users will not see a difference. `log` context may also be used. Each routing table item may have a difference context. `match_once` must be `true` in order to use `log` context. --- .chloggen/split-log-records.yaml | 27 ++++ connector/routingconnector/README.md | 100 ++++++++++++- connector/routingconnector/config.go | 14 ++ connector/routingconnector/config_test.go | 31 ++++ connector/routingconnector/logs.go | 40 ++--- connector/routingconnector/logs_test.go | 10 ++ connector/routingconnector/metrics.go | 2 +- connector/routingconnector/router.go | 96 ++++++++---- .../all_match_first_only/config.yaml | 13 ++ .../all_match_first_only/input.yaml | 141 ++++++++++++++++++ .../all_match_first_only/sink_0.yaml | 141 ++++++++++++++++++ .../all_match_last_only/config.yaml | 13 ++ .../all_match_last_only/input.yaml | 141 ++++++++++++++++++ .../all_match_last_only/sink_1.yaml | 141 ++++++++++++++++++ .../match_none_with_default/config.yaml | 13 ++ .../match_none_with_default/input.yaml | 141 ++++++++++++++++++ .../match_none_with_default/sink_default.yaml | 141 ++++++++++++++++++ .../match_none_without_default/config.yaml | 12 ++ .../match_none_without_default/input.yaml | 141 ++++++++++++++++++ .../some_match_each_route/config.yaml | 13 ++ .../some_match_each_route/input.yaml | 141 ++++++++++++++++++ .../some_match_each_route/sink_0.yaml | 53 +++++++ .../some_match_each_route/sink_1.yaml | 53 +++++++ .../some_match_each_route/sink_default.yaml | 105 +++++++++++++ .../config.yaml | 13 ++ .../input.yaml | 141 ++++++++++++++++++ .../sink_0.yaml | 41 +++++ .../sink_default.yaml | 111 ++++++++++++++ .../with_resource_condition/config.yaml | 13 ++ .../with_resource_condition/input.yaml | 141 ++++++++++++++++++ .../with_resource_condition/sink_0.yaml | 71 +++++++++ .../with_resource_condition/sink_default.yaml | 71 +++++++++ .../with_scope_condition/config.yaml | 13 ++ .../with_scope_condition/input.yaml | 141 ++++++++++++++++++ .../with_scope_condition/sink_0.yaml | 81 ++++++++++ .../with_scope_condition/sink_default.yaml | 81 ++++++++++ .../match_logs_then_resource/config.yaml | 13 ++ .../match_logs_then_resource/input.yaml | 141 ++++++++++++++++++ .../match_logs_then_resource/sink_0.yaml | 105 +++++++++++++ .../match_logs_then_resource/sink_1.yaml | 53 +++++++ .../sink_default.yaml | 53 +++++++ .../match_resource_then_logs/config.yaml | 13 ++ .../match_resource_then_logs/input.yaml | 141 ++++++++++++++++++ .../match_resource_then_logs/sink_0.yaml | 71 +++++++++ .../match_resource_then_logs/sink_1.yaml | 53 +++++++ .../sink_default.yaml | 53 +++++++ connector/routingconnector/traces.go | 2 +- 47 files changed, 3289 insertions(+), 50 deletions(-) create mode 100644 .chloggen/split-log-records.yaml create mode 100644 connector/routingconnector/testdata/logs/log_context/all_match_first_only/config.yaml create mode 100644 connector/routingconnector/testdata/logs/log_context/all_match_first_only/input.yaml create mode 100644 connector/routingconnector/testdata/logs/log_context/all_match_first_only/sink_0.yaml create mode 100644 connector/routingconnector/testdata/logs/log_context/all_match_last_only/config.yaml create mode 100644 connector/routingconnector/testdata/logs/log_context/all_match_last_only/input.yaml create mode 100644 connector/routingconnector/testdata/logs/log_context/all_match_last_only/sink_1.yaml create mode 100644 connector/routingconnector/testdata/logs/log_context/match_none_with_default/config.yaml create mode 100644 connector/routingconnector/testdata/logs/log_context/match_none_with_default/input.yaml create mode 100644 connector/routingconnector/testdata/logs/log_context/match_none_with_default/sink_default.yaml create mode 100644 connector/routingconnector/testdata/logs/log_context/match_none_without_default/config.yaml create mode 100644 connector/routingconnector/testdata/logs/log_context/match_none_without_default/input.yaml create mode 100644 connector/routingconnector/testdata/logs/log_context/some_match_each_route/config.yaml create mode 100644 connector/routingconnector/testdata/logs/log_context/some_match_each_route/input.yaml create mode 100644 connector/routingconnector/testdata/logs/log_context/some_match_each_route/sink_0.yaml create mode 100644 connector/routingconnector/testdata/logs/log_context/some_match_each_route/sink_1.yaml create mode 100644 connector/routingconnector/testdata/logs/log_context/some_match_each_route/sink_default.yaml create mode 100644 connector/routingconnector/testdata/logs/log_context/with_resource_and_scope_conditions/config.yaml create mode 100644 connector/routingconnector/testdata/logs/log_context/with_resource_and_scope_conditions/input.yaml create mode 100644 connector/routingconnector/testdata/logs/log_context/with_resource_and_scope_conditions/sink_0.yaml create mode 100644 connector/routingconnector/testdata/logs/log_context/with_resource_and_scope_conditions/sink_default.yaml create mode 100644 connector/routingconnector/testdata/logs/log_context/with_resource_condition/config.yaml create mode 100644 connector/routingconnector/testdata/logs/log_context/with_resource_condition/input.yaml create mode 100644 connector/routingconnector/testdata/logs/log_context/with_resource_condition/sink_0.yaml create mode 100644 connector/routingconnector/testdata/logs/log_context/with_resource_condition/sink_default.yaml create mode 100644 connector/routingconnector/testdata/logs/log_context/with_scope_condition/config.yaml create mode 100644 connector/routingconnector/testdata/logs/log_context/with_scope_condition/input.yaml create mode 100644 connector/routingconnector/testdata/logs/log_context/with_scope_condition/sink_0.yaml create mode 100644 connector/routingconnector/testdata/logs/log_context/with_scope_condition/sink_default.yaml create mode 100644 connector/routingconnector/testdata/logs/mixed_context/match_logs_then_resource/config.yaml create mode 100644 connector/routingconnector/testdata/logs/mixed_context/match_logs_then_resource/input.yaml create mode 100644 connector/routingconnector/testdata/logs/mixed_context/match_logs_then_resource/sink_0.yaml create mode 100644 connector/routingconnector/testdata/logs/mixed_context/match_logs_then_resource/sink_1.yaml create mode 100644 connector/routingconnector/testdata/logs/mixed_context/match_logs_then_resource/sink_default.yaml create mode 100644 connector/routingconnector/testdata/logs/mixed_context/match_resource_then_logs/config.yaml create mode 100644 connector/routingconnector/testdata/logs/mixed_context/match_resource_then_logs/input.yaml create mode 100644 connector/routingconnector/testdata/logs/mixed_context/match_resource_then_logs/sink_0.yaml create mode 100644 connector/routingconnector/testdata/logs/mixed_context/match_resource_then_logs/sink_1.yaml create mode 100644 connector/routingconnector/testdata/logs/mixed_context/match_resource_then_logs/sink_default.yaml diff --git a/.chloggen/split-log-records.yaml b/.chloggen/split-log-records.yaml new file mode 100644 index 000000000000..db61dd67953a --- /dev/null +++ b/.chloggen/split-log-records.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: connector/routing + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Add ability to route log records individually using OTTL log record context. + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [19738] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/connector/routingconnector/README.md b/connector/routingconnector/README.md index 8366bcfbb53f..72ebf8a4afe0 100644 --- a/connector/routingconnector/README.md +++ b/connector/routingconnector/README.md @@ -33,14 +33,17 @@ If you are not already familiar with connectors, you may find it helpful to firs The following settings are available: - `table (required)`: the routing table for this connector. +- `table.context (optional, default: resource)`: the [OTTL Context] in which the statement will be evaluated. Currently, only `resource` and `log` are supported. - `table.statement`: the routing condition provided as the [OTTL] statement. Required if `table.condition` is not provided. - `table.condition`: the routing condition provided as the [OTTL] condition. Required if `table.statement` is not provided. - `table.pipelines (required)`: the list of pipelines to use when the routing condition is met. - `default_pipelines (optional)`: contains the list of pipelines to use when a record does not meet any of specified conditions. - `error_mode (optional)`: determines how errors returned from OTTL statements are handled. Valid values are `propagate`, `ignore` and `silent`. If `ignore` or `silent` is used and a statement's condition has an error then the payload will be routed to the default pipelines. When `silent` is used the error is not logged. If not supplied, `propagate` is used. -- `match_once (optional, default: false)`: determines whether the connector matches multiple statements or not. If enabled, the payload will be routed to the first pipeline in the `table` whose routing condition is met. +- `match_once (optional, default: false)`: determines whether the connector matches multiple statements or not. If enabled, the payload will be routed to the first pipeline in the `table` whose routing condition is met. May only be `false` when used with `resource` context. -Example: +### Examples + +Route traces based on an attribute: ```yaml receivers: @@ -91,6 +94,92 @@ service: exporters: [jaeger/ecorp] ``` +Route logs based on region: + +```yaml +receivers: + otlp: + +exporters: + file/other: + path: ./other.log + file/east: + path: ./east.log + file/west: + path: ./west.log + +connectors: + routing: + match_once: true + default_pipelines: [logs/other] + table: + - context: log + condition: attributes["region"] == "east" + pipelines: [logs/east] + - context: log + condition: attributes["region"] == "west" + pipelines: [logs/west] + +service: + pipelines: + logs/in: + receivers: [otlp] + exporters: [routing] + logs/east: + receivers: [routing] + exporters: [file/east] + logs/west: + receivers: [routing] + exporters: [file/west] + logs/other: + receivers: [routing] + exporters: [file/other] +``` + +Route all low level logs to cheap storage. Route the remainder based on service name: + +```yaml +receivers: + otlp: + +exporters: + file/cheap: + path: ./cheap.log + file/service1: + path: ./service1-important.log + file/ecorp: + path: ./service2-important.log + +connectors: + routing: + match_once: true + table: + - context: log + condition: severity_number < SEVERITY_NUMBER_ERROR + pipelines: [logs/cheap] + - context: resource + condition: attributes["service.name"] == "service1" + pipelines: [logs/service1] + - context: resource + condition: attributes["service.name"] == "service2" + pipelines: [logs/service2] + +service: + pipelines: + logs/in: + receivers: [otlp] + exporters: [routing] + logs/cheap: + receivers: [routing] + exporters: [file/cheap] + logs/service1: + receivers: [routing] + exporters: [file/service1] + logs/service2: + receivers: [routing] + exporters: [file/service2] +``` + A signal may get matched by routing conditions of more than one routing table entry. In this case, the signal will be routed to all pipelines of matching routes. Respectively, if none of the routing conditions met, then a signal is routed to default pipelines. @@ -109,10 +198,11 @@ Respectively, if none of the routing conditions met, then a signal is routed to The full list of settings exposed for this connector are documented [here](./config.go) with detailed sample configuration files: -- [logs](./testdata/config_logs.yaml) -- [metrics](./testdata/config_metrics.yaml) -- [traces](./testdata/config_traces.yaml) +- [logs](./testdata/config/logs.yaml) +- [metrics](./testdata/config/metrics.yaml) +- [traces](./testdata/config/traces.yaml) [Connectors README]:https://github.com/open-telemetry/opentelemetry-collector/blob/main/connector/README.md [OTTL]: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/README.md +[OTTL Context]: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/LANGUAGE.md#contexts diff --git a/connector/routingconnector/config.go b/connector/routingconnector/config.go index 8c868e152a00..3824c577c5cc 100644 --- a/connector/routingconnector/config.go +++ b/connector/routingconnector/config.go @@ -67,6 +67,16 @@ func (c *Config) Validate() error { if len(item.Pipelines) == 0 { return errNoPipelines } + + switch item.Context { + case "", "resource": // ok + case "log": + if !c.MatchOnce { + return errors.New("log context is not supported with match_once: false") + } + default: + return errors.New("invalid context: " + item.Context) + } } return nil @@ -74,6 +84,10 @@ func (c *Config) Validate() error { // RoutingTableItem specifies how data should be routed to the different pipelines type RoutingTableItem struct { + // One of "resource" or "log" (other OTTL contexts will be added in the future) + // Optional. Default "resource". + Context string `mapstructure:"context"` + // Statement is a OTTL statement used for making a routing decision. // One of 'Statement' or 'Condition' must be provided. Statement string `mapstructure:"statement"` diff --git a/connector/routingconnector/config_test.go b/connector/routingconnector/config_test.go index 5f3514f76e4d..aa36ad885e16 100644 --- a/connector/routingconnector/config_test.go +++ b/connector/routingconnector/config_test.go @@ -203,6 +203,37 @@ func TestValidateConfig(t *testing.T) { }, error: "invalid route: both condition and statement provided", }, + { + name: "invalid context", + config: &Config{ + Table: []RoutingTableItem{ + { + Context: "invalid", + Statement: `route() where attributes["attr"] == "acme"`, + Pipelines: []pipeline.ID{ + pipeline.NewIDWithName(pipeline.SignalTraces, "otlp"), + }, + }, + }, + }, + error: "invalid context: invalid", + }, + { + name: "log context with match_once false", + config: &Config{ + MatchOnce: false, + Table: []RoutingTableItem{ + { + Context: "log", + Statement: `route() where attributes["attr"] == "acme"`, + Pipelines: []pipeline.ID{ + pipeline.NewIDWithName(pipeline.SignalTraces, "otlp"), + }, + }, + }, + }, + error: "log context is not supported with match_once: false", + }, } for _, tt := range tests { diff --git a/connector/routingconnector/logs.go b/connector/routingconnector/logs.go index dca421b74f9d..7fb0f92f65da 100644 --- a/connector/routingconnector/logs.go +++ b/connector/routingconnector/logs.go @@ -15,6 +15,7 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/connector/routingconnector/internal/plogutil" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottllog" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlresource" ) @@ -74,29 +75,36 @@ func (c *logsConnector) switchLogs(ctx context.Context, ld plog.Logs) error { var errs error for _, route := range c.router.routeSlice { matchedLogs := plog.NewLogs() - - plogutil.MoveResourcesIf(ld, matchedLogs, - func(rl plog.ResourceLogs) bool { - rtx := ottlresource.NewTransformContext(rl.Resource(), rl) - _, isMatch, err := route.statement.Execute(ctx, rtx) - errs = errors.Join(errs, err) - return isMatch - }, - ) - + switch route.statementContext { + case "", "resource": + plogutil.MoveResourcesIf(ld, matchedLogs, + func(rl plog.ResourceLogs) bool { + rtx := ottlresource.NewTransformContext(rl.Resource(), rl) + _, isMatch, err := route.resourceStatement.Execute(ctx, rtx) + errs = errors.Join(errs, err) + return isMatch + }, + ) + case "log": + plogutil.MoveRecordsWithContextIf(ld, matchedLogs, + func(rl plog.ResourceLogs, sl plog.ScopeLogs, lr plog.LogRecord) bool { + ltx := ottllog.NewTransformContext(lr, sl.Scope(), rl.Resource(), sl, rl) + _, isMatch, err := route.logStatement.Execute(ctx, ltx) + errs = errors.Join(errs, err) + return isMatch + }, + ) + } if errs != nil { if c.config.ErrorMode == ottl.PropagateError { return errs } groupAll(groups, c.router.defaultConsumer, matchedLogs) - } groupAll(groups, route.consumer, matchedLogs) } - // anything left wasn't matched by any route. Send to default consumer groupAll(groups, c.router.defaultConsumer, ld) - for consumer, group := range groups { errs = errors.Join(errs, consumer.ConsumeLogs(ctx, group)) } @@ -110,14 +118,12 @@ func (c *logsConnector) matchAll(ctx context.Context, ld plog.Logs) error { // higher CPU usage. groups := make(map[consumer.Logs]plog.Logs) var errs error - for i := 0; i < ld.ResourceLogs().Len(); i++ { rlogs := ld.ResourceLogs().At(i) rtx := ottlresource.NewTransformContext(rlogs.Resource(), rlogs) - noRoutesMatch := true for _, route := range c.router.routeSlice { - _, isMatch, err := route.statement.Execute(ctx, rtx) + _, isMatch, err := route.resourceStatement.Execute(ctx, rtx) if err != nil { if c.config.ErrorMode == ottl.PropagateError { return err @@ -129,9 +135,7 @@ func (c *logsConnector) matchAll(ctx context.Context, ld plog.Logs) error { noRoutesMatch = false group(groups, route.consumer, rlogs) } - } - if noRoutesMatch { // no route conditions are matched, add resource logs to default exporters group group(groups, c.router.defaultConsumer, rlogs) diff --git a/connector/routingconnector/logs_test.go b/connector/routingconnector/logs_test.go index 344aa01e07ba..480d60443a61 100644 --- a/connector/routingconnector/logs_test.go +++ b/connector/routingconnector/logs_test.go @@ -481,6 +481,16 @@ func TestLogsConnectorDetailed(t *testing.T) { filepath.Join("testdata", "logs", "resource_context", "each_matches_one"), filepath.Join("testdata", "logs", "resource_context", "match_none_with_default"), filepath.Join("testdata", "logs", "resource_context", "match_none_without_default"), + filepath.Join("testdata", "logs", "log_context", "all_match_first_only"), + filepath.Join("testdata", "logs", "log_context", "all_match_last_only"), + filepath.Join("testdata", "logs", "log_context", "match_none_with_default"), + filepath.Join("testdata", "logs", "log_context", "match_none_without_default"), + filepath.Join("testdata", "logs", "log_context", "some_match_each_route"), + filepath.Join("testdata", "logs", "log_context", "with_resource_condition"), + filepath.Join("testdata", "logs", "log_context", "with_scope_condition"), + filepath.Join("testdata", "logs", "log_context", "with_resource_and_scope_conditions"), + filepath.Join("testdata", "logs", "mixed_context", "match_resource_then_logs"), + filepath.Join("testdata", "logs", "mixed_context", "match_logs_then_resource"), } for _, tt := range testCases { diff --git a/connector/routingconnector/metrics.go b/connector/routingconnector/metrics.go index 42c362cbe1dd..6bf9508a6ef2 100644 --- a/connector/routingconnector/metrics.go +++ b/connector/routingconnector/metrics.go @@ -73,7 +73,7 @@ func (c *metricsConnector) ConsumeMetrics(ctx context.Context, md pmetric.Metric noRoutesMatch := true for _, route := range c.router.routeSlice { - _, isMatch, err := route.statement.Execute(ctx, rtx) + _, isMatch, err := route.resourceStatement.Execute(ctx, rtx) if err != nil { if c.config.ErrorMode == ottl.PropagateError { return err diff --git a/connector/routingconnector/router.go b/connector/routingconnector/router.go index d9cc906b3564..aac7bb1b2324 100644 --- a/connector/routingconnector/router.go +++ b/connector/routingconnector/router.go @@ -14,6 +14,7 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/connector/routingconnector/internal/common" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottllog" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlresource" ) @@ -28,8 +29,9 @@ type consumerProvider[C any] func(...pipeline.ID) (C, error) // parameter C is expected to be one of: consumer.Traces, consumer.Metrics, or // consumer.Logs. type router[C any] struct { - logger *zap.Logger - parser ottl.Parser[ottlresource.TransformContext] + logger *zap.Logger + resourceParser ottl.Parser[ottlresource.TransformContext] + logParser ottl.Parser[ottllog.TransformContext] table []RoutingTableItem routes map[string]routingItem[C] @@ -47,23 +49,17 @@ func newRouter[C any]( provider consumerProvider[C], settings component.TelemetrySettings, ) (*router[C], error) { - parser, err := ottlresource.NewParser( - common.Functions[ottlresource.TransformContext](), - settings, - ) - - if err != nil { - return nil, err - } - r := &router[C]{ logger: settings.Logger, - parser: parser, table: table, routes: make(map[string]routingItem[C]), consumerProvider: provider, } + if err := r.buildParsers(table, settings); err != nil { + return nil, err + } + if err := r.registerConsumers(defaultPipelineIDs); err != nil { return nil, err } @@ -72,8 +68,48 @@ func newRouter[C any]( } type routingItem[C any] struct { - consumer C - statement *ottl.Statement[ottlresource.TransformContext] + consumer C + statementContext string + + resourceStatement *ottl.Statement[ottlresource.TransformContext] + logStatement *ottl.Statement[ottllog.TransformContext] +} + +func (r *router[C]) buildParsers(table []RoutingTableItem, settings component.TelemetrySettings) error { + var buildResource, buildLog bool + for _, item := range table { + switch item.Context { + case "", "resource": + buildResource = true + case "log": + buildLog = true + } + } + + var errs error + if buildResource { + parser, err := ottlresource.NewParser( + common.Functions[ottlresource.TransformContext](), + settings, + ) + if err == nil { + r.resourceParser = parser + } else { + errs = errors.Join(errs, err) + } + } + if buildLog { + parser, err := ottllog.NewParser( + common.Functions[ottllog.TransformContext](), + settings, + ) + if err == nil { + r.logParser = parser + } else { + errs = errors.Join(errs, err) + } + } + return errs } func (r *router[C]) registerConsumers(defaultPipelineIDs []pipeline.ID) error { @@ -94,8 +130,7 @@ func (r *router[C]) registerConsumers(defaultPipelineIDs []pipeline.ID) error { return nil } -// registerDefaultConsumer registers a consumer for the default -// pipelines configured +// registerDefaultConsumer registers a consumer for the default pipelines configured func (r *router[C]) registerDefaultConsumer(pipelineIDs []pipeline.ID) error { if len(pipelineIDs) == 0 { return nil @@ -121,18 +156,26 @@ func (r *router[C]) normalizeConditions() { } } -// registerRouteConsumers registers a consumer for the pipelines configured -// for each route +// registerRouteConsumers registers a consumer for the pipelines configured for each route func (r *router[C]) registerRouteConsumers() error { for _, item := range r.table { - statement, err := r.parser.ParseStatement(item.Statement) - if err != nil { - return err - } - route, ok := r.routes[key(item)] if !ok { - route.statement = statement + route.statementContext = item.Context + switch item.Context { + case "", "resource": + statement, err := r.resourceParser.ParseStatement(item.Statement) + if err != nil { + return err + } + route.resourceStatement = statement + case "log": + statement, err := r.logParser.ParseStatement(item.Statement) + if err != nil { + return err + } + route.logStatement = statement + } } else { pipelineNames := []string{} for _, pipeline := range item.Pipelines { @@ -157,5 +200,8 @@ func (r *router[C]) registerRouteConsumers() error { } func key(entry RoutingTableItem) string { - return entry.Statement + if entry.Context == "" || entry.Context == "resource" { + return entry.Statement + } + return "[" + entry.Context + "] " + entry.Statement } diff --git a/connector/routingconnector/testdata/logs/log_context/all_match_first_only/config.yaml b/connector/routingconnector/testdata/logs/log_context/all_match_first_only/config.yaml new file mode 100644 index 000000000000..e890865e53dc --- /dev/null +++ b/connector/routingconnector/testdata/logs/log_context/all_match_first_only/config.yaml @@ -0,0 +1,13 @@ +routing: + match_once: true + default_pipelines: + - logs/default + table: + - context: log + condition: attributes["logName"] != nil + pipelines: + - logs/0 + - context: log + condition: attributes["logName"] == "logY" + pipelines: + - logs/1 diff --git a/connector/routingconnector/testdata/logs/log_context/all_match_first_only/input.yaml b/connector/routingconnector/testdata/logs/log_context/all_match_first_only/input.yaml new file mode 100644 index 000000000000..63c6eada6cf9 --- /dev/null +++ b/connector/routingconnector/testdata/logs/log_context/all_match_first_only/input.yaml @@ -0,0 +1,141 @@ +resourceLogs: + - resource: + attributes: + - key: resourceName + value: + stringValue: resourceA + - key: resourceNameAgain + value: + stringValue: resourceA + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scopeLogs: + - attributes: + - key: scopeName + value: + stringValue: scopeA + - key: scopeNameAgain + value: + stringValue: scopeA + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeA + version: v0.1.0 + - attributes: + - key: scopeName + value: + stringValue: scopeB + - key: scopeNameAgain + value: + stringValue: scopeB + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeB + version: v0.1.0 + - resource: + attributes: + - key: resourceName + value: + stringValue: resourceB + - key: resourceNameAgain + value: + stringValue: resourceB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scopeLogs: + - attributes: + - key: scopeName + value: + stringValue: scopeA + - key: scopeNameAgain + value: + stringValue: scopeA + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeA + version: v0.1.0 + - attributes: + - key: scopeName + value: + stringValue: scopeB + - key: scopeNameAgain + value: + stringValue: scopeB + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeB + version: v0.1.0 diff --git a/connector/routingconnector/testdata/logs/log_context/all_match_first_only/sink_0.yaml b/connector/routingconnector/testdata/logs/log_context/all_match_first_only/sink_0.yaml new file mode 100644 index 000000000000..63c6eada6cf9 --- /dev/null +++ b/connector/routingconnector/testdata/logs/log_context/all_match_first_only/sink_0.yaml @@ -0,0 +1,141 @@ +resourceLogs: + - resource: + attributes: + - key: resourceName + value: + stringValue: resourceA + - key: resourceNameAgain + value: + stringValue: resourceA + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scopeLogs: + - attributes: + - key: scopeName + value: + stringValue: scopeA + - key: scopeNameAgain + value: + stringValue: scopeA + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeA + version: v0.1.0 + - attributes: + - key: scopeName + value: + stringValue: scopeB + - key: scopeNameAgain + value: + stringValue: scopeB + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeB + version: v0.1.0 + - resource: + attributes: + - key: resourceName + value: + stringValue: resourceB + - key: resourceNameAgain + value: + stringValue: resourceB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scopeLogs: + - attributes: + - key: scopeName + value: + stringValue: scopeA + - key: scopeNameAgain + value: + stringValue: scopeA + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeA + version: v0.1.0 + - attributes: + - key: scopeName + value: + stringValue: scopeB + - key: scopeNameAgain + value: + stringValue: scopeB + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeB + version: v0.1.0 diff --git a/connector/routingconnector/testdata/logs/log_context/all_match_last_only/config.yaml b/connector/routingconnector/testdata/logs/log_context/all_match_last_only/config.yaml new file mode 100644 index 000000000000..87873dd530db --- /dev/null +++ b/connector/routingconnector/testdata/logs/log_context/all_match_last_only/config.yaml @@ -0,0 +1,13 @@ +routing: + match_once: true + default_pipelines: + - logs/default + table: + - context: log + condition: attributes["logName"] == "logX" + pipelines: + - logs/0 + - context: log + condition: attributes["logName"] != nil + pipelines: + - logs/1 diff --git a/connector/routingconnector/testdata/logs/log_context/all_match_last_only/input.yaml b/connector/routingconnector/testdata/logs/log_context/all_match_last_only/input.yaml new file mode 100644 index 000000000000..63c6eada6cf9 --- /dev/null +++ b/connector/routingconnector/testdata/logs/log_context/all_match_last_only/input.yaml @@ -0,0 +1,141 @@ +resourceLogs: + - resource: + attributes: + - key: resourceName + value: + stringValue: resourceA + - key: resourceNameAgain + value: + stringValue: resourceA + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scopeLogs: + - attributes: + - key: scopeName + value: + stringValue: scopeA + - key: scopeNameAgain + value: + stringValue: scopeA + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeA + version: v0.1.0 + - attributes: + - key: scopeName + value: + stringValue: scopeB + - key: scopeNameAgain + value: + stringValue: scopeB + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeB + version: v0.1.0 + - resource: + attributes: + - key: resourceName + value: + stringValue: resourceB + - key: resourceNameAgain + value: + stringValue: resourceB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scopeLogs: + - attributes: + - key: scopeName + value: + stringValue: scopeA + - key: scopeNameAgain + value: + stringValue: scopeA + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeA + version: v0.1.0 + - attributes: + - key: scopeName + value: + stringValue: scopeB + - key: scopeNameAgain + value: + stringValue: scopeB + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeB + version: v0.1.0 diff --git a/connector/routingconnector/testdata/logs/log_context/all_match_last_only/sink_1.yaml b/connector/routingconnector/testdata/logs/log_context/all_match_last_only/sink_1.yaml new file mode 100644 index 000000000000..63c6eada6cf9 --- /dev/null +++ b/connector/routingconnector/testdata/logs/log_context/all_match_last_only/sink_1.yaml @@ -0,0 +1,141 @@ +resourceLogs: + - resource: + attributes: + - key: resourceName + value: + stringValue: resourceA + - key: resourceNameAgain + value: + stringValue: resourceA + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scopeLogs: + - attributes: + - key: scopeName + value: + stringValue: scopeA + - key: scopeNameAgain + value: + stringValue: scopeA + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeA + version: v0.1.0 + - attributes: + - key: scopeName + value: + stringValue: scopeB + - key: scopeNameAgain + value: + stringValue: scopeB + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeB + version: v0.1.0 + - resource: + attributes: + - key: resourceName + value: + stringValue: resourceB + - key: resourceNameAgain + value: + stringValue: resourceB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scopeLogs: + - attributes: + - key: scopeName + value: + stringValue: scopeA + - key: scopeNameAgain + value: + stringValue: scopeA + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeA + version: v0.1.0 + - attributes: + - key: scopeName + value: + stringValue: scopeB + - key: scopeNameAgain + value: + stringValue: scopeB + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeB + version: v0.1.0 diff --git a/connector/routingconnector/testdata/logs/log_context/match_none_with_default/config.yaml b/connector/routingconnector/testdata/logs/log_context/match_none_with_default/config.yaml new file mode 100644 index 000000000000..dacb340e9937 --- /dev/null +++ b/connector/routingconnector/testdata/logs/log_context/match_none_with_default/config.yaml @@ -0,0 +1,13 @@ +routing: + match_once: true + default_pipelines: + - logs/default + table: + - context: log + condition: attributes["logName"] == "logX" + pipelines: + - logs/0 + - context: log + condition: attributes["logName"] == "logY" + pipelines: + - logs/1 diff --git a/connector/routingconnector/testdata/logs/log_context/match_none_with_default/input.yaml b/connector/routingconnector/testdata/logs/log_context/match_none_with_default/input.yaml new file mode 100644 index 000000000000..63c6eada6cf9 --- /dev/null +++ b/connector/routingconnector/testdata/logs/log_context/match_none_with_default/input.yaml @@ -0,0 +1,141 @@ +resourceLogs: + - resource: + attributes: + - key: resourceName + value: + stringValue: resourceA + - key: resourceNameAgain + value: + stringValue: resourceA + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scopeLogs: + - attributes: + - key: scopeName + value: + stringValue: scopeA + - key: scopeNameAgain + value: + stringValue: scopeA + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeA + version: v0.1.0 + - attributes: + - key: scopeName + value: + stringValue: scopeB + - key: scopeNameAgain + value: + stringValue: scopeB + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeB + version: v0.1.0 + - resource: + attributes: + - key: resourceName + value: + stringValue: resourceB + - key: resourceNameAgain + value: + stringValue: resourceB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scopeLogs: + - attributes: + - key: scopeName + value: + stringValue: scopeA + - key: scopeNameAgain + value: + stringValue: scopeA + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeA + version: v0.1.0 + - attributes: + - key: scopeName + value: + stringValue: scopeB + - key: scopeNameAgain + value: + stringValue: scopeB + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeB + version: v0.1.0 diff --git a/connector/routingconnector/testdata/logs/log_context/match_none_with_default/sink_default.yaml b/connector/routingconnector/testdata/logs/log_context/match_none_with_default/sink_default.yaml new file mode 100644 index 000000000000..63c6eada6cf9 --- /dev/null +++ b/connector/routingconnector/testdata/logs/log_context/match_none_with_default/sink_default.yaml @@ -0,0 +1,141 @@ +resourceLogs: + - resource: + attributes: + - key: resourceName + value: + stringValue: resourceA + - key: resourceNameAgain + value: + stringValue: resourceA + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scopeLogs: + - attributes: + - key: scopeName + value: + stringValue: scopeA + - key: scopeNameAgain + value: + stringValue: scopeA + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeA + version: v0.1.0 + - attributes: + - key: scopeName + value: + stringValue: scopeB + - key: scopeNameAgain + value: + stringValue: scopeB + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeB + version: v0.1.0 + - resource: + attributes: + - key: resourceName + value: + stringValue: resourceB + - key: resourceNameAgain + value: + stringValue: resourceB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scopeLogs: + - attributes: + - key: scopeName + value: + stringValue: scopeA + - key: scopeNameAgain + value: + stringValue: scopeA + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeA + version: v0.1.0 + - attributes: + - key: scopeName + value: + stringValue: scopeB + - key: scopeNameAgain + value: + stringValue: scopeB + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeB + version: v0.1.0 diff --git a/connector/routingconnector/testdata/logs/log_context/match_none_without_default/config.yaml b/connector/routingconnector/testdata/logs/log_context/match_none_without_default/config.yaml new file mode 100644 index 000000000000..74264624fc67 --- /dev/null +++ b/connector/routingconnector/testdata/logs/log_context/match_none_without_default/config.yaml @@ -0,0 +1,12 @@ +routing: + match_once: true + # no default pipelines + table: + - context: log + condition: attributes["logName"] == "logX" + pipelines: + - logs/0 + - context: log + condition: attributes["logName"] == "logY" + pipelines: + - logs/1 diff --git a/connector/routingconnector/testdata/logs/log_context/match_none_without_default/input.yaml b/connector/routingconnector/testdata/logs/log_context/match_none_without_default/input.yaml new file mode 100644 index 000000000000..63c6eada6cf9 --- /dev/null +++ b/connector/routingconnector/testdata/logs/log_context/match_none_without_default/input.yaml @@ -0,0 +1,141 @@ +resourceLogs: + - resource: + attributes: + - key: resourceName + value: + stringValue: resourceA + - key: resourceNameAgain + value: + stringValue: resourceA + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scopeLogs: + - attributes: + - key: scopeName + value: + stringValue: scopeA + - key: scopeNameAgain + value: + stringValue: scopeA + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeA + version: v0.1.0 + - attributes: + - key: scopeName + value: + stringValue: scopeB + - key: scopeNameAgain + value: + stringValue: scopeB + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeB + version: v0.1.0 + - resource: + attributes: + - key: resourceName + value: + stringValue: resourceB + - key: resourceNameAgain + value: + stringValue: resourceB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scopeLogs: + - attributes: + - key: scopeName + value: + stringValue: scopeA + - key: scopeNameAgain + value: + stringValue: scopeA + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeA + version: v0.1.0 + - attributes: + - key: scopeName + value: + stringValue: scopeB + - key: scopeNameAgain + value: + stringValue: scopeB + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeB + version: v0.1.0 diff --git a/connector/routingconnector/testdata/logs/log_context/some_match_each_route/config.yaml b/connector/routingconnector/testdata/logs/log_context/some_match_each_route/config.yaml new file mode 100644 index 000000000000..b830ca7e9450 --- /dev/null +++ b/connector/routingconnector/testdata/logs/log_context/some_match_each_route/config.yaml @@ -0,0 +1,13 @@ +routing: + match_once: true + default_pipelines: + - logs/default + table: + - context: log + condition: attributes["logName"] == "logA" and resource.attributes["resourceName"] == "resourceA" + pipelines: + - logs/0 + - context: log + condition: attributes["logName"] == "logB" and resource.attributes["resourceName"] == "resourceB" + pipelines: + - logs/1 diff --git a/connector/routingconnector/testdata/logs/log_context/some_match_each_route/input.yaml b/connector/routingconnector/testdata/logs/log_context/some_match_each_route/input.yaml new file mode 100644 index 000000000000..63c6eada6cf9 --- /dev/null +++ b/connector/routingconnector/testdata/logs/log_context/some_match_each_route/input.yaml @@ -0,0 +1,141 @@ +resourceLogs: + - resource: + attributes: + - key: resourceName + value: + stringValue: resourceA + - key: resourceNameAgain + value: + stringValue: resourceA + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scopeLogs: + - attributes: + - key: scopeName + value: + stringValue: scopeA + - key: scopeNameAgain + value: + stringValue: scopeA + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeA + version: v0.1.0 + - attributes: + - key: scopeName + value: + stringValue: scopeB + - key: scopeNameAgain + value: + stringValue: scopeB + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeB + version: v0.1.0 + - resource: + attributes: + - key: resourceName + value: + stringValue: resourceB + - key: resourceNameAgain + value: + stringValue: resourceB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scopeLogs: + - attributes: + - key: scopeName + value: + stringValue: scopeA + - key: scopeNameAgain + value: + stringValue: scopeA + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeA + version: v0.1.0 + - attributes: + - key: scopeName + value: + stringValue: scopeB + - key: scopeNameAgain + value: + stringValue: scopeB + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeB + version: v0.1.0 diff --git a/connector/routingconnector/testdata/logs/log_context/some_match_each_route/sink_0.yaml b/connector/routingconnector/testdata/logs/log_context/some_match_each_route/sink_0.yaml new file mode 100644 index 000000000000..539ec42b1322 --- /dev/null +++ b/connector/routingconnector/testdata/logs/log_context/some_match_each_route/sink_0.yaml @@ -0,0 +1,53 @@ +resourceLogs: + - resource: + attributes: + - key: resourceName + value: + stringValue: resourceA + - key: resourceNameAgain + value: + stringValue: resourceA + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scopeLogs: + - attributes: + - key: scopeName + value: + stringValue: scopeA + - key: scopeNameAgain + value: + stringValue: scopeA + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeA + version: v0.1.0 + - attributes: + - key: scopeName + value: + stringValue: scopeB + - key: scopeNameAgain + value: + stringValue: scopeB + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeB + version: v0.1.0 diff --git a/connector/routingconnector/testdata/logs/log_context/some_match_each_route/sink_1.yaml b/connector/routingconnector/testdata/logs/log_context/some_match_each_route/sink_1.yaml new file mode 100644 index 000000000000..ba61f2992fbf --- /dev/null +++ b/connector/routingconnector/testdata/logs/log_context/some_match_each_route/sink_1.yaml @@ -0,0 +1,53 @@ +resourceLogs: + - resource: + attributes: + - key: resourceName + value: + stringValue: resourceB + - key: resourceNameAgain + value: + stringValue: resourceB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scopeLogs: + - attributes: + - key: scopeName + value: + stringValue: scopeA + - key: scopeNameAgain + value: + stringValue: scopeA + logRecords: + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeA + version: v0.1.0 + - attributes: + - key: scopeName + value: + stringValue: scopeB + - key: scopeNameAgain + value: + stringValue: scopeB + logRecords: + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeB + version: v0.1.0 diff --git a/connector/routingconnector/testdata/logs/log_context/some_match_each_route/sink_default.yaml b/connector/routingconnector/testdata/logs/log_context/some_match_each_route/sink_default.yaml new file mode 100644 index 000000000000..ee4340abb2cc --- /dev/null +++ b/connector/routingconnector/testdata/logs/log_context/some_match_each_route/sink_default.yaml @@ -0,0 +1,105 @@ +resourceLogs: + - resource: + attributes: + - key: resourceName + value: + stringValue: resourceA + - key: resourceNameAgain + value: + stringValue: resourceA + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scopeLogs: + - attributes: + - key: scopeName + value: + stringValue: scopeA + - key: scopeNameAgain + value: + stringValue: scopeA + logRecords: + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeA + version: v0.1.0 + - attributes: + - key: scopeName + value: + stringValue: scopeB + - key: scopeNameAgain + value: + stringValue: scopeB + logRecords: + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeB + version: v0.1.0 + - resource: + attributes: + - key: resourceName + value: + stringValue: resourceB + - key: resourceNameAgain + value: + stringValue: resourceB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scopeLogs: + - attributes: + - key: scopeName + value: + stringValue: scopeA + - key: scopeNameAgain + value: + stringValue: scopeA + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeA + version: v0.1.0 + - attributes: + - key: scopeName + value: + stringValue: scopeB + - key: scopeNameAgain + value: + stringValue: scopeB + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeB + version: v0.1.0 diff --git a/connector/routingconnector/testdata/logs/log_context/with_resource_and_scope_conditions/config.yaml b/connector/routingconnector/testdata/logs/log_context/with_resource_and_scope_conditions/config.yaml new file mode 100644 index 000000000000..26855ab4322d --- /dev/null +++ b/connector/routingconnector/testdata/logs/log_context/with_resource_and_scope_conditions/config.yaml @@ -0,0 +1,13 @@ +routing: + match_once: true + default_pipelines: + - logs/default + table: + - context: log + condition: resource.attributes["resourceName"] == "resourceB" and instrumentation_scope.name == "scopeA" and attributes["logName"] != nil + pipelines: + - logs/0 + - context: log + condition: attributes["logName"] == "logY" + pipelines: + - logs/1 diff --git a/connector/routingconnector/testdata/logs/log_context/with_resource_and_scope_conditions/input.yaml b/connector/routingconnector/testdata/logs/log_context/with_resource_and_scope_conditions/input.yaml new file mode 100644 index 000000000000..63c6eada6cf9 --- /dev/null +++ b/connector/routingconnector/testdata/logs/log_context/with_resource_and_scope_conditions/input.yaml @@ -0,0 +1,141 @@ +resourceLogs: + - resource: + attributes: + - key: resourceName + value: + stringValue: resourceA + - key: resourceNameAgain + value: + stringValue: resourceA + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scopeLogs: + - attributes: + - key: scopeName + value: + stringValue: scopeA + - key: scopeNameAgain + value: + stringValue: scopeA + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeA + version: v0.1.0 + - attributes: + - key: scopeName + value: + stringValue: scopeB + - key: scopeNameAgain + value: + stringValue: scopeB + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeB + version: v0.1.0 + - resource: + attributes: + - key: resourceName + value: + stringValue: resourceB + - key: resourceNameAgain + value: + stringValue: resourceB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scopeLogs: + - attributes: + - key: scopeName + value: + stringValue: scopeA + - key: scopeNameAgain + value: + stringValue: scopeA + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeA + version: v0.1.0 + - attributes: + - key: scopeName + value: + stringValue: scopeB + - key: scopeNameAgain + value: + stringValue: scopeB + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeB + version: v0.1.0 diff --git a/connector/routingconnector/testdata/logs/log_context/with_resource_and_scope_conditions/sink_0.yaml b/connector/routingconnector/testdata/logs/log_context/with_resource_and_scope_conditions/sink_0.yaml new file mode 100644 index 000000000000..d42bbbcd478c --- /dev/null +++ b/connector/routingconnector/testdata/logs/log_context/with_resource_and_scope_conditions/sink_0.yaml @@ -0,0 +1,41 @@ +resourceLogs: + - resource: + attributes: + - key: resourceName + value: + stringValue: resourceB + - key: resourceNameAgain + value: + stringValue: resourceB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scopeLogs: + - attributes: + - key: scopeName + value: + stringValue: scopeA + - key: scopeNameAgain + value: + stringValue: scopeA + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeA + version: v0.1.0 diff --git a/connector/routingconnector/testdata/logs/log_context/with_resource_and_scope_conditions/sink_default.yaml b/connector/routingconnector/testdata/logs/log_context/with_resource_and_scope_conditions/sink_default.yaml new file mode 100644 index 000000000000..28fed8ee680e --- /dev/null +++ b/connector/routingconnector/testdata/logs/log_context/with_resource_and_scope_conditions/sink_default.yaml @@ -0,0 +1,111 @@ +resourceLogs: + - resource: + attributes: + - key: resourceName + value: + stringValue: resourceA + - key: resourceNameAgain + value: + stringValue: resourceA + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scopeLogs: + - attributes: + - key: scopeName + value: + stringValue: scopeA + - key: scopeNameAgain + value: + stringValue: scopeA + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeA + version: v0.1.0 + - attributes: + - key: scopeName + value: + stringValue: scopeB + - key: scopeNameAgain + value: + stringValue: scopeB + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeB + version: v0.1.0 + - resource: + attributes: + - key: resourceName + value: + stringValue: resourceB + - key: resourceNameAgain + value: + stringValue: resourceB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scopeLogs: + - attributes: + - key: scopeName + value: + stringValue: scopeB + - key: scopeNameAgain + value: + stringValue: scopeB + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeB + version: v0.1.0 diff --git a/connector/routingconnector/testdata/logs/log_context/with_resource_condition/config.yaml b/connector/routingconnector/testdata/logs/log_context/with_resource_condition/config.yaml new file mode 100644 index 000000000000..1907cfd8cd0e --- /dev/null +++ b/connector/routingconnector/testdata/logs/log_context/with_resource_condition/config.yaml @@ -0,0 +1,13 @@ +routing: + match_once: true + default_pipelines: + - logs/default + table: + - context: log + condition: resource.attributes["resourceName"] == "resourceB" and attributes["logName"] != nil + pipelines: + - logs/0 + - context: log + condition: attributes["logName"] == "logY" + pipelines: + - logs/1 diff --git a/connector/routingconnector/testdata/logs/log_context/with_resource_condition/input.yaml b/connector/routingconnector/testdata/logs/log_context/with_resource_condition/input.yaml new file mode 100644 index 000000000000..63c6eada6cf9 --- /dev/null +++ b/connector/routingconnector/testdata/logs/log_context/with_resource_condition/input.yaml @@ -0,0 +1,141 @@ +resourceLogs: + - resource: + attributes: + - key: resourceName + value: + stringValue: resourceA + - key: resourceNameAgain + value: + stringValue: resourceA + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scopeLogs: + - attributes: + - key: scopeName + value: + stringValue: scopeA + - key: scopeNameAgain + value: + stringValue: scopeA + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeA + version: v0.1.0 + - attributes: + - key: scopeName + value: + stringValue: scopeB + - key: scopeNameAgain + value: + stringValue: scopeB + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeB + version: v0.1.0 + - resource: + attributes: + - key: resourceName + value: + stringValue: resourceB + - key: resourceNameAgain + value: + stringValue: resourceB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scopeLogs: + - attributes: + - key: scopeName + value: + stringValue: scopeA + - key: scopeNameAgain + value: + stringValue: scopeA + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeA + version: v0.1.0 + - attributes: + - key: scopeName + value: + stringValue: scopeB + - key: scopeNameAgain + value: + stringValue: scopeB + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeB + version: v0.1.0 diff --git a/connector/routingconnector/testdata/logs/log_context/with_resource_condition/sink_0.yaml b/connector/routingconnector/testdata/logs/log_context/with_resource_condition/sink_0.yaml new file mode 100644 index 000000000000..28a5a7c8b0f5 --- /dev/null +++ b/connector/routingconnector/testdata/logs/log_context/with_resource_condition/sink_0.yaml @@ -0,0 +1,71 @@ +resourceLogs: + - resource: + attributes: + - key: resourceName + value: + stringValue: resourceB + - key: resourceNameAgain + value: + stringValue: resourceB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scopeLogs: + - attributes: + - key: scopeName + value: + stringValue: scopeA + - key: scopeNameAgain + value: + stringValue: scopeA + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeA + version: v0.1.0 + - attributes: + - key: scopeName + value: + stringValue: scopeB + - key: scopeNameAgain + value: + stringValue: scopeB + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeB + version: v0.1.0 diff --git a/connector/routingconnector/testdata/logs/log_context/with_resource_condition/sink_default.yaml b/connector/routingconnector/testdata/logs/log_context/with_resource_condition/sink_default.yaml new file mode 100644 index 000000000000..72f617672bf3 --- /dev/null +++ b/connector/routingconnector/testdata/logs/log_context/with_resource_condition/sink_default.yaml @@ -0,0 +1,71 @@ +resourceLogs: + - resource: + attributes: + - key: resourceName + value: + stringValue: resourceA + - key: resourceNameAgain + value: + stringValue: resourceA + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scopeLogs: + - attributes: + - key: scopeName + value: + stringValue: scopeA + - key: scopeNameAgain + value: + stringValue: scopeA + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeA + version: v0.1.0 + - attributes: + - key: scopeName + value: + stringValue: scopeB + - key: scopeNameAgain + value: + stringValue: scopeB + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeB + version: v0.1.0 diff --git a/connector/routingconnector/testdata/logs/log_context/with_scope_condition/config.yaml b/connector/routingconnector/testdata/logs/log_context/with_scope_condition/config.yaml new file mode 100644 index 000000000000..1b6eeba670b4 --- /dev/null +++ b/connector/routingconnector/testdata/logs/log_context/with_scope_condition/config.yaml @@ -0,0 +1,13 @@ +routing: + match_once: true + default_pipelines: + - logs/default + table: + - context: log + condition: instrumentation_scope.name == "scopeB" and attributes["logName"] != nil + pipelines: + - logs/0 + - context: log + condition: attributes["logName"] == "logY" + pipelines: + - logs/1 diff --git a/connector/routingconnector/testdata/logs/log_context/with_scope_condition/input.yaml b/connector/routingconnector/testdata/logs/log_context/with_scope_condition/input.yaml new file mode 100644 index 000000000000..63c6eada6cf9 --- /dev/null +++ b/connector/routingconnector/testdata/logs/log_context/with_scope_condition/input.yaml @@ -0,0 +1,141 @@ +resourceLogs: + - resource: + attributes: + - key: resourceName + value: + stringValue: resourceA + - key: resourceNameAgain + value: + stringValue: resourceA + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scopeLogs: + - attributes: + - key: scopeName + value: + stringValue: scopeA + - key: scopeNameAgain + value: + stringValue: scopeA + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeA + version: v0.1.0 + - attributes: + - key: scopeName + value: + stringValue: scopeB + - key: scopeNameAgain + value: + stringValue: scopeB + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeB + version: v0.1.0 + - resource: + attributes: + - key: resourceName + value: + stringValue: resourceB + - key: resourceNameAgain + value: + stringValue: resourceB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scopeLogs: + - attributes: + - key: scopeName + value: + stringValue: scopeA + - key: scopeNameAgain + value: + stringValue: scopeA + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeA + version: v0.1.0 + - attributes: + - key: scopeName + value: + stringValue: scopeB + - key: scopeNameAgain + value: + stringValue: scopeB + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeB + version: v0.1.0 diff --git a/connector/routingconnector/testdata/logs/log_context/with_scope_condition/sink_0.yaml b/connector/routingconnector/testdata/logs/log_context/with_scope_condition/sink_0.yaml new file mode 100644 index 000000000000..8c8745158e33 --- /dev/null +++ b/connector/routingconnector/testdata/logs/log_context/with_scope_condition/sink_0.yaml @@ -0,0 +1,81 @@ +resourceLogs: + - resource: + attributes: + - key: resourceName + value: + stringValue: resourceA + - key: resourceNameAgain + value: + stringValue: resourceA + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scopeLogs: + - attributes: + - key: scopeName + value: + stringValue: scopeB + - key: scopeNameAgain + value: + stringValue: scopeB + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeB + version: v0.1.0 + - resource: + attributes: + - key: resourceName + value: + stringValue: resourceB + - key: resourceNameAgain + value: + stringValue: resourceB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scopeLogs: + - attributes: + - key: scopeName + value: + stringValue: scopeB + - key: scopeNameAgain + value: + stringValue: scopeB + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeB + version: v0.1.0 diff --git a/connector/routingconnector/testdata/logs/log_context/with_scope_condition/sink_default.yaml b/connector/routingconnector/testdata/logs/log_context/with_scope_condition/sink_default.yaml new file mode 100644 index 000000000000..71ca02311582 --- /dev/null +++ b/connector/routingconnector/testdata/logs/log_context/with_scope_condition/sink_default.yaml @@ -0,0 +1,81 @@ +resourceLogs: + - resource: + attributes: + - key: resourceName + value: + stringValue: resourceA + - key: resourceNameAgain + value: + stringValue: resourceA + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scopeLogs: + - attributes: + - key: scopeName + value: + stringValue: scopeA + - key: scopeNameAgain + value: + stringValue: scopeA + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeA + version: v0.1.0 + - resource: + attributes: + - key: resourceName + value: + stringValue: resourceB + - key: resourceNameAgain + value: + stringValue: resourceB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scopeLogs: + - attributes: + - key: scopeName + value: + stringValue: scopeA + - key: scopeNameAgain + value: + stringValue: scopeA + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeA + version: v0.1.0 diff --git a/connector/routingconnector/testdata/logs/mixed_context/match_logs_then_resource/config.yaml b/connector/routingconnector/testdata/logs/mixed_context/match_logs_then_resource/config.yaml new file mode 100644 index 000000000000..14fc87417bf2 --- /dev/null +++ b/connector/routingconnector/testdata/logs/mixed_context/match_logs_then_resource/config.yaml @@ -0,0 +1,13 @@ +routing: + match_once: true + default_pipelines: + - logs/default + table: + - context: log + condition: attributes["logName"] == "logA" + pipelines: + - logs/0 + - context: resource + condition: attributes["resourceName"] == "resourceB" + pipelines: + - logs/1 diff --git a/connector/routingconnector/testdata/logs/mixed_context/match_logs_then_resource/input.yaml b/connector/routingconnector/testdata/logs/mixed_context/match_logs_then_resource/input.yaml new file mode 100644 index 000000000000..63c6eada6cf9 --- /dev/null +++ b/connector/routingconnector/testdata/logs/mixed_context/match_logs_then_resource/input.yaml @@ -0,0 +1,141 @@ +resourceLogs: + - resource: + attributes: + - key: resourceName + value: + stringValue: resourceA + - key: resourceNameAgain + value: + stringValue: resourceA + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scopeLogs: + - attributes: + - key: scopeName + value: + stringValue: scopeA + - key: scopeNameAgain + value: + stringValue: scopeA + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeA + version: v0.1.0 + - attributes: + - key: scopeName + value: + stringValue: scopeB + - key: scopeNameAgain + value: + stringValue: scopeB + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeB + version: v0.1.0 + - resource: + attributes: + - key: resourceName + value: + stringValue: resourceB + - key: resourceNameAgain + value: + stringValue: resourceB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scopeLogs: + - attributes: + - key: scopeName + value: + stringValue: scopeA + - key: scopeNameAgain + value: + stringValue: scopeA + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeA + version: v0.1.0 + - attributes: + - key: scopeName + value: + stringValue: scopeB + - key: scopeNameAgain + value: + stringValue: scopeB + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeB + version: v0.1.0 diff --git a/connector/routingconnector/testdata/logs/mixed_context/match_logs_then_resource/sink_0.yaml b/connector/routingconnector/testdata/logs/mixed_context/match_logs_then_resource/sink_0.yaml new file mode 100644 index 000000000000..39604c6017d6 --- /dev/null +++ b/connector/routingconnector/testdata/logs/mixed_context/match_logs_then_resource/sink_0.yaml @@ -0,0 +1,105 @@ +resourceLogs: + - resource: + attributes: + - key: resourceName + value: + stringValue: resourceA + - key: resourceNameAgain + value: + stringValue: resourceA + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scopeLogs: + - attributes: + - key: scopeName + value: + stringValue: scopeA + - key: scopeNameAgain + value: + stringValue: scopeA + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeA + version: v0.1.0 + - attributes: + - key: scopeName + value: + stringValue: scopeB + - key: scopeNameAgain + value: + stringValue: scopeB + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeB + version: v0.1.0 + - resource: + attributes: + - key: resourceName + value: + stringValue: resourceB + - key: resourceNameAgain + value: + stringValue: resourceB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scopeLogs: + - attributes: + - key: scopeName + value: + stringValue: scopeA + - key: scopeNameAgain + value: + stringValue: scopeA + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeA + version: v0.1.0 + - attributes: + - key: scopeName + value: + stringValue: scopeB + - key: scopeNameAgain + value: + stringValue: scopeB + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeB + version: v0.1.0 diff --git a/connector/routingconnector/testdata/logs/mixed_context/match_logs_then_resource/sink_1.yaml b/connector/routingconnector/testdata/logs/mixed_context/match_logs_then_resource/sink_1.yaml new file mode 100644 index 000000000000..ba61f2992fbf --- /dev/null +++ b/connector/routingconnector/testdata/logs/mixed_context/match_logs_then_resource/sink_1.yaml @@ -0,0 +1,53 @@ +resourceLogs: + - resource: + attributes: + - key: resourceName + value: + stringValue: resourceB + - key: resourceNameAgain + value: + stringValue: resourceB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scopeLogs: + - attributes: + - key: scopeName + value: + stringValue: scopeA + - key: scopeNameAgain + value: + stringValue: scopeA + logRecords: + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeA + version: v0.1.0 + - attributes: + - key: scopeName + value: + stringValue: scopeB + - key: scopeNameAgain + value: + stringValue: scopeB + logRecords: + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeB + version: v0.1.0 diff --git a/connector/routingconnector/testdata/logs/mixed_context/match_logs_then_resource/sink_default.yaml b/connector/routingconnector/testdata/logs/mixed_context/match_logs_then_resource/sink_default.yaml new file mode 100644 index 000000000000..b20626892e83 --- /dev/null +++ b/connector/routingconnector/testdata/logs/mixed_context/match_logs_then_resource/sink_default.yaml @@ -0,0 +1,53 @@ +resourceLogs: + - resource: + attributes: + - key: resourceName + value: + stringValue: resourceA + - key: resourceNameAgain + value: + stringValue: resourceA + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scopeLogs: + - attributes: + - key: scopeName + value: + stringValue: scopeA + - key: scopeNameAgain + value: + stringValue: scopeA + logRecords: + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeA + version: v0.1.0 + - attributes: + - key: scopeName + value: + stringValue: scopeB + - key: scopeNameAgain + value: + stringValue: scopeB + logRecords: + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeB + version: v0.1.0 \ No newline at end of file diff --git a/connector/routingconnector/testdata/logs/mixed_context/match_resource_then_logs/config.yaml b/connector/routingconnector/testdata/logs/mixed_context/match_resource_then_logs/config.yaml new file mode 100644 index 000000000000..ab15a654b35a --- /dev/null +++ b/connector/routingconnector/testdata/logs/mixed_context/match_resource_then_logs/config.yaml @@ -0,0 +1,13 @@ +routing: + match_once: true + default_pipelines: + - logs/default + table: + - context: resource + condition: attributes["resourceName"] == "resourceA" + pipelines: + - logs/0 + - context: log + condition: attributes["logName"] == "logB" + pipelines: + - logs/1 diff --git a/connector/routingconnector/testdata/logs/mixed_context/match_resource_then_logs/input.yaml b/connector/routingconnector/testdata/logs/mixed_context/match_resource_then_logs/input.yaml new file mode 100644 index 000000000000..63c6eada6cf9 --- /dev/null +++ b/connector/routingconnector/testdata/logs/mixed_context/match_resource_then_logs/input.yaml @@ -0,0 +1,141 @@ +resourceLogs: + - resource: + attributes: + - key: resourceName + value: + stringValue: resourceA + - key: resourceNameAgain + value: + stringValue: resourceA + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scopeLogs: + - attributes: + - key: scopeName + value: + stringValue: scopeA + - key: scopeNameAgain + value: + stringValue: scopeA + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeA + version: v0.1.0 + - attributes: + - key: scopeName + value: + stringValue: scopeB + - key: scopeNameAgain + value: + stringValue: scopeB + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeB + version: v0.1.0 + - resource: + attributes: + - key: resourceName + value: + stringValue: resourceB + - key: resourceNameAgain + value: + stringValue: resourceB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scopeLogs: + - attributes: + - key: scopeName + value: + stringValue: scopeA + - key: scopeNameAgain + value: + stringValue: scopeA + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeA + version: v0.1.0 + - attributes: + - key: scopeName + value: + stringValue: scopeB + - key: scopeNameAgain + value: + stringValue: scopeB + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeB + version: v0.1.0 diff --git a/connector/routingconnector/testdata/logs/mixed_context/match_resource_then_logs/sink_0.yaml b/connector/routingconnector/testdata/logs/mixed_context/match_resource_then_logs/sink_0.yaml new file mode 100644 index 000000000000..72f617672bf3 --- /dev/null +++ b/connector/routingconnector/testdata/logs/mixed_context/match_resource_then_logs/sink_0.yaml @@ -0,0 +1,71 @@ +resourceLogs: + - resource: + attributes: + - key: resourceName + value: + stringValue: resourceA + - key: resourceNameAgain + value: + stringValue: resourceA + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scopeLogs: + - attributes: + - key: scopeName + value: + stringValue: scopeA + - key: scopeNameAgain + value: + stringValue: scopeA + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeA + version: v0.1.0 + - attributes: + - key: scopeName + value: + stringValue: scopeB + - key: scopeNameAgain + value: + stringValue: scopeB + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeB + version: v0.1.0 diff --git a/connector/routingconnector/testdata/logs/mixed_context/match_resource_then_logs/sink_1.yaml b/connector/routingconnector/testdata/logs/mixed_context/match_resource_then_logs/sink_1.yaml new file mode 100644 index 000000000000..ba61f2992fbf --- /dev/null +++ b/connector/routingconnector/testdata/logs/mixed_context/match_resource_then_logs/sink_1.yaml @@ -0,0 +1,53 @@ +resourceLogs: + - resource: + attributes: + - key: resourceName + value: + stringValue: resourceB + - key: resourceNameAgain + value: + stringValue: resourceB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scopeLogs: + - attributes: + - key: scopeName + value: + stringValue: scopeA + - key: scopeNameAgain + value: + stringValue: scopeA + logRecords: + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeA + version: v0.1.0 + - attributes: + - key: scopeName + value: + stringValue: scopeB + - key: scopeNameAgain + value: + stringValue: scopeB + logRecords: + - attributes: + - key: logName + value: + stringValue: logB + - key: logNameAgain + value: + stringValue: logB + body: + stringValue: logB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeB + version: v0.1.0 diff --git a/connector/routingconnector/testdata/logs/mixed_context/match_resource_then_logs/sink_default.yaml b/connector/routingconnector/testdata/logs/mixed_context/match_resource_then_logs/sink_default.yaml new file mode 100644 index 000000000000..a2e7ef72f97e --- /dev/null +++ b/connector/routingconnector/testdata/logs/mixed_context/match_resource_then_logs/sink_default.yaml @@ -0,0 +1,53 @@ +resourceLogs: + - resource: + attributes: + - key: resourceName + value: + stringValue: resourceB + - key: resourceNameAgain + value: + stringValue: resourceB + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scopeLogs: + - attributes: + - key: scopeName + value: + stringValue: scopeA + - key: scopeNameAgain + value: + stringValue: scopeA + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeA + version: v0.1.0 + - attributes: + - key: scopeName + value: + stringValue: scopeB + - key: scopeNameAgain + value: + stringValue: scopeB + logRecords: + - attributes: + - key: logName + value: + stringValue: logA + - key: logNameAgain + value: + stringValue: logA + body: + stringValue: logA + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scope: + name: scopeB + version: v0.1.0 diff --git a/connector/routingconnector/traces.go b/connector/routingconnector/traces.go index 73da17b7e3f1..98906fd82460 100644 --- a/connector/routingconnector/traces.go +++ b/connector/routingconnector/traces.go @@ -72,7 +72,7 @@ func (c *tracesConnector) ConsumeTraces(ctx context.Context, t ptrace.Traces) er noRoutesMatch := true for _, route := range c.router.routeSlice { - _, isMatch, err := route.statement.Execute(ctx, rtx) + _, isMatch, err := route.resourceStatement.Execute(ctx, rtx) if err != nil { if c.config.ErrorMode == ottl.PropagateError { return err From 2416e90067b973e011967285b7583c979077c180 Mon Sep 17 00:00:00 2001 From: xu0o0 Date: Sat, 26 Oct 2024 03:19:01 +0800 Subject: [PATCH 2/4] [receiver/otlpjsonfile] Add support for profiles signal (#35995) #### Description Add support for profiles signal to `otlpjsonfilereceiver`. #### Link to tracking issue Fixes #35977 #### Testing - add `TestFileProfilesReceiver` - update `TestFileMixedSignals` --- .chloggen/receiver-otlpjsonfile-profiles.yaml | 27 ++++++++ receiver/otlpjsonfilereceiver/README.md | 4 +- receiver/otlpjsonfilereceiver/file.go | 33 ++++++++-- receiver/otlpjsonfilereceiver/file_test.go | 44 +++++++++++++ receiver/otlpjsonfilereceiver/go.mod | 14 +++-- .../internal/metadata/generated_status.go | 7 ++- .../internal/metadata/generated_telemetry.go | 17 +++++ .../metadata/generated_telemetry_test.go | 63 +++++++++++++++++++ receiver/otlpjsonfilereceiver/metadata.yaml | 1 + 9 files changed, 196 insertions(+), 14 deletions(-) create mode 100644 .chloggen/receiver-otlpjsonfile-profiles.yaml create mode 100644 receiver/otlpjsonfilereceiver/internal/metadata/generated_telemetry.go create mode 100644 receiver/otlpjsonfilereceiver/internal/metadata/generated_telemetry_test.go diff --git a/.chloggen/receiver-otlpjsonfile-profiles.yaml b/.chloggen/receiver-otlpjsonfile-profiles.yaml new file mode 100644 index 000000000000..897522257bed --- /dev/null +++ b/.chloggen/receiver-otlpjsonfile-profiles.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: otlpjsonfilereceiver + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Add support for profiles signal + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [35977] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/receiver/otlpjsonfilereceiver/README.md b/receiver/otlpjsonfilereceiver/README.md index 6b9738bc1a4a..b5de33def89f 100644 --- a/receiver/otlpjsonfilereceiver/README.md +++ b/receiver/otlpjsonfilereceiver/README.md @@ -3,11 +3,13 @@ | Status | | | ------------- |-----------| -| Stability | [alpha]: traces, metrics, logs | +| Stability | [development]: profiles | +| | [alpha]: traces, metrics, logs | | Distributions | [contrib] | | Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Areceiver%2Fotlpjsonfile%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Areceiver%2Fotlpjsonfile) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Areceiver%2Fotlpjsonfile%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Areceiver%2Fotlpjsonfile) | | [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@djaglowski](https://www.github.com/djaglowski), [@atoulme](https://www.github.com/atoulme) | +[development]: https://github.com/open-telemetry/opentelemetry-collector#development [alpha]: https://github.com/open-telemetry/opentelemetry-collector#alpha [contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib diff --git a/receiver/otlpjsonfilereceiver/file.go b/receiver/otlpjsonfilereceiver/file.go index a9f7541f4875..aad1ddef6561 100644 --- a/receiver/otlpjsonfilereceiver/file.go +++ b/receiver/otlpjsonfilereceiver/file.go @@ -8,11 +8,14 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/consumerprofiles" "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/pprofile" "go.opentelemetry.io/collector/pdata/ptrace" "go.opentelemetry.io/collector/receiver" "go.opentelemetry.io/collector/receiver/receiverhelper" + "go.opentelemetry.io/collector/receiver/receiverprofiles" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/adapter" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer" @@ -25,12 +28,13 @@ const ( // NewFactory creates a factory for file receiver func NewFactory() receiver.Factory { - return receiver.NewFactory( + return receiverprofiles.NewFactory( metadata.Type, createDefaultConfig, - receiver.WithMetrics(createMetricsReceiver, metadata.MetricsStability), - receiver.WithLogs(createLogsReceiver, metadata.LogsStability), - receiver.WithTraces(createTracesReceiver, metadata.TracesStability)) + receiverprofiles.WithMetrics(createMetricsReceiver, metadata.MetricsStability), + receiverprofiles.WithLogs(createLogsReceiver, metadata.LogsStability), + receiverprofiles.WithTraces(createTracesReceiver, metadata.TracesStability), + receiverprofiles.WithProfiles(createProfilesReceiver, metadata.ProfilesStability)) } type Config struct { @@ -171,3 +175,24 @@ func createTracesReceiver(_ context.Context, settings receiver.Settings, configu return &otlpjsonfilereceiver{input: input, id: settings.ID, storageID: cfg.StorageID}, nil } + +func createProfilesReceiver(_ context.Context, settings receiver.Settings, configuration component.Config, profiles consumerprofiles.Profiles) (receiverprofiles.Profiles, error) { + profilesUnmarshaler := &pprofile.JSONUnmarshaler{} + cfg := configuration.(*Config) + opts := make([]fileconsumer.Option, 0) + if cfg.ReplayFile { + opts = append(opts, fileconsumer.WithNoTracking()) + } + input, err := cfg.Config.Build(settings.TelemetrySettings, func(ctx context.Context, token []byte, _ map[string]any) error { + p, _ := profilesUnmarshaler.UnmarshalProfiles(token) + if p.ResourceProfiles().Len() != 0 { + _ = profiles.ConsumeProfiles(ctx, p) + } + return nil + }, opts...) + if err != nil { + return nil, err + } + + return &otlpjsonfilereceiver{input: input, id: settings.ID, storageID: cfg.StorageID}, nil +} diff --git a/receiver/otlpjsonfilereceiver/file_test.go b/receiver/otlpjsonfilereceiver/file_test.go index dc60dfbdac12..db75e5293a68 100644 --- a/receiver/otlpjsonfilereceiver/file_test.go +++ b/receiver/otlpjsonfilereceiver/file_test.go @@ -18,8 +18,10 @@ import ( "go.opentelemetry.io/collector/consumer/consumertest" "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/pprofile" "go.opentelemetry.io/collector/pdata/ptrace" "go.opentelemetry.io/collector/pdata/testdata" + "go.opentelemetry.io/collector/receiver/receiverprofiles" "go.opentelemetry.io/collector/receiver/receivertest" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer" @@ -35,6 +37,33 @@ func TestDefaultConfig(t *testing.T) { require.NoError(t, componenttest.CheckConfigStruct(cfg)) } +func TestFileProfilesReceiver(t *testing.T) { + tempFolder := t.TempDir() + factory := NewFactory() + cfg := createDefaultConfig().(*Config) + cfg.Config.Include = []string{filepath.Join(tempFolder, "*")} + cfg.Config.StartAt = "beginning" + sink := new(consumertest.ProfilesSink) + receiver, err := factory.(receiverprofiles.Factory).CreateProfiles(context.Background(), receivertest.NewNopSettings(), cfg, sink) + assert.NoError(t, err) + err = receiver.Start(context.Background(), nil) + require.NoError(t, err) + + pd := testdata.GenerateProfiles(5) + marshaler := &pprofile.JSONMarshaler{} + b, err := marshaler.MarshalProfiles(pd) + assert.NoError(t, err) + b = append(b, '\n') + err = os.WriteFile(filepath.Join(tempFolder, "profiles.json"), b, 0600) + assert.NoError(t, err) + time.Sleep(1 * time.Second) + + require.Len(t, sink.AllProfiles(), 1) + assert.EqualValues(t, pd, sink.AllProfiles()[0]) + err = receiver.Shutdown(context.Background()) + assert.NoError(t, err) +} + func TestFileTracesReceiver(t *testing.T) { tempFolder := t.TempDir() factory := NewFactory() @@ -213,6 +242,11 @@ func TestFileMixedSignals(t *testing.T) { assert.NoError(t, err) err = lr.Start(context.Background(), nil) assert.NoError(t, err) + ps := new(consumertest.ProfilesSink) + pr, err := factory.(receiverprofiles.Factory).CreateProfiles(context.Background(), cs, cfg, ps) + assert.NoError(t, err) + err = pr.Start(context.Background(), nil) + assert.NoError(t, err) md := testdata.GenerateMetrics(5) marshaler := &pmetric.JSONMarshaler{} @@ -226,11 +260,17 @@ func TestFileMixedSignals(t *testing.T) { lmarshaler := &plog.JSONMarshaler{} b3, err := lmarshaler.MarshalLogs(ld) assert.NoError(t, err) + pd := testdata.GenerateProfiles(5) + pmarshaler := &pprofile.JSONMarshaler{} + b4, err := pmarshaler.MarshalProfiles(pd) + assert.NoError(t, err) b = append(b, '\n') b = append(b, b2...) b = append(b, '\n') b = append(b, b3...) b = append(b, '\n') + b = append(b, b4...) + b = append(b, '\n') err = os.WriteFile(filepath.Join(tempFolder, "metrics.json"), b, 0600) assert.NoError(t, err) time.Sleep(1 * time.Second) @@ -241,10 +281,14 @@ func TestFileMixedSignals(t *testing.T) { assert.EqualValues(t, td, ts.AllTraces()[0]) require.Len(t, ls.AllLogs(), 1) assert.EqualValues(t, ld, ls.AllLogs()[0]) + require.Len(t, ps.AllProfiles(), 1) + assert.EqualValues(t, pd, ps.AllProfiles()[0]) err = mr.Shutdown(context.Background()) assert.NoError(t, err) err = tr.Shutdown(context.Background()) assert.NoError(t, err) err = lr.Shutdown(context.Background()) assert.NoError(t, err) + err = pr.Shutdown(context.Background()) + assert.NoError(t, err) } diff --git a/receiver/otlpjsonfilereceiver/go.mod b/receiver/otlpjsonfilereceiver/go.mod index 38728c067e26..cbffb45d9d30 100644 --- a/receiver/otlpjsonfilereceiver/go.mod +++ b/receiver/otlpjsonfilereceiver/go.mod @@ -12,12 +12,17 @@ require ( go.opentelemetry.io/collector/pdata/testdata v0.112.0 go.opentelemetry.io/collector/receiver v0.112.0 go.opentelemetry.io/collector/semconv v0.112.0 // indirect - go.opentelemetry.io/otel/metric v1.31.0 // indirect - go.opentelemetry.io/otel/trace v1.31.0 // indirect + go.opentelemetry.io/otel/metric v1.31.0 + go.opentelemetry.io/otel/trace v1.31.0 go.uber.org/goleak v1.3.0 ) -require go.opentelemetry.io/collector/consumer/consumertest v0.112.0 +require ( + go.opentelemetry.io/collector/consumer/consumerprofiles v0.112.0 + go.opentelemetry.io/collector/consumer/consumertest v0.112.0 + go.opentelemetry.io/collector/pdata/pprofile v0.112.0 + go.opentelemetry.io/collector/receiver/receiverprofiles v0.112.0 +) require ( github.com/bmatcuk/doublestar/v4 v4.7.1 // indirect @@ -50,13 +55,10 @@ require ( github.com/valyala/fastjson v1.6.4 // indirect go.opentelemetry.io/collector/config/configtelemetry v0.112.0 // indirect go.opentelemetry.io/collector/consumer/consumererror v0.112.0 // indirect - go.opentelemetry.io/collector/consumer/consumerprofiles v0.112.0 // indirect go.opentelemetry.io/collector/extension v0.112.0 // indirect go.opentelemetry.io/collector/extension/experimental/storage v0.112.0 // indirect go.opentelemetry.io/collector/featuregate v1.18.0 // indirect - go.opentelemetry.io/collector/pdata/pprofile v0.112.0 // indirect go.opentelemetry.io/collector/pipeline v0.112.0 // indirect - go.opentelemetry.io/collector/receiver/receiverprofiles v0.112.0 // indirect go.opentelemetry.io/otel v1.31.0 // indirect go.opentelemetry.io/otel/sdk v1.31.0 // indirect go.opentelemetry.io/otel/sdk/metric v1.31.0 // indirect diff --git a/receiver/otlpjsonfilereceiver/internal/metadata/generated_status.go b/receiver/otlpjsonfilereceiver/internal/metadata/generated_status.go index b1ee70e774fd..ea5adcf7b049 100644 --- a/receiver/otlpjsonfilereceiver/internal/metadata/generated_status.go +++ b/receiver/otlpjsonfilereceiver/internal/metadata/generated_status.go @@ -12,7 +12,8 @@ var ( ) const ( - TracesStability = component.StabilityLevelAlpha - MetricsStability = component.StabilityLevelAlpha - LogsStability = component.StabilityLevelAlpha + ProfilesStability = component.StabilityLevelDevelopment + TracesStability = component.StabilityLevelAlpha + MetricsStability = component.StabilityLevelAlpha + LogsStability = component.StabilityLevelAlpha ) diff --git a/receiver/otlpjsonfilereceiver/internal/metadata/generated_telemetry.go b/receiver/otlpjsonfilereceiver/internal/metadata/generated_telemetry.go new file mode 100644 index 000000000000..48b3bd10e587 --- /dev/null +++ b/receiver/otlpjsonfilereceiver/internal/metadata/generated_telemetry.go @@ -0,0 +1,17 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/trace" +) + +func Meter(settings component.TelemetrySettings) metric.Meter { + return settings.MeterProvider.Meter("github.com/open-telemetry/opentelemetry-collector-contrib/receiver/otlpjsonfilereceiver") +} + +func Tracer(settings component.TelemetrySettings) trace.Tracer { + return settings.TracerProvider.Tracer("github.com/open-telemetry/opentelemetry-collector-contrib/receiver/otlpjsonfilereceiver") +} diff --git a/receiver/otlpjsonfilereceiver/internal/metadata/generated_telemetry_test.go b/receiver/otlpjsonfilereceiver/internal/metadata/generated_telemetry_test.go new file mode 100644 index 000000000000..3cb69319b6a5 --- /dev/null +++ b/receiver/otlpjsonfilereceiver/internal/metadata/generated_telemetry_test.go @@ -0,0 +1,63 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "testing" + + "github.com/stretchr/testify/require" + "go.opentelemetry.io/otel/metric" + embeddedmetric "go.opentelemetry.io/otel/metric/embedded" + noopmetric "go.opentelemetry.io/otel/metric/noop" + "go.opentelemetry.io/otel/trace" + embeddedtrace "go.opentelemetry.io/otel/trace/embedded" + nooptrace "go.opentelemetry.io/otel/trace/noop" + + "go.opentelemetry.io/collector/component" +) + +type mockMeter struct { + noopmetric.Meter + name string +} +type mockMeterProvider struct { + embeddedmetric.MeterProvider +} + +func (m mockMeterProvider) Meter(name string, opts ...metric.MeterOption) metric.Meter { + return mockMeter{name: name} +} + +type mockTracer struct { + nooptrace.Tracer + name string +} + +type mockTracerProvider struct { + embeddedtrace.TracerProvider +} + +func (m mockTracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer { + return mockTracer{name: name} +} + +func TestProviders(t *testing.T) { + set := component.TelemetrySettings{ + MeterProvider: mockMeterProvider{}, + TracerProvider: mockTracerProvider{}, + } + + meter := Meter(set) + if m, ok := meter.(mockMeter); ok { + require.Equal(t, "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/otlpjsonfilereceiver", m.name) + } else { + require.Fail(t, "returned Meter not mockMeter") + } + + tracer := Tracer(set) + if m, ok := tracer.(mockTracer); ok { + require.Equal(t, "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/otlpjsonfilereceiver", m.name) + } else { + require.Fail(t, "returned Meter not mockTracer") + } +} diff --git a/receiver/otlpjsonfilereceiver/metadata.yaml b/receiver/otlpjsonfilereceiver/metadata.yaml index 63c404c5cf4c..ba56f6fe291f 100644 --- a/receiver/otlpjsonfilereceiver/metadata.yaml +++ b/receiver/otlpjsonfilereceiver/metadata.yaml @@ -4,6 +4,7 @@ status: class: receiver stability: alpha: [traces, metrics, logs] + development: [profiles] distributions: [contrib] codeowners: active: [djaglowski, atoulme] From ed099909a87ac3d51f577b9e86e15369481861a3 Mon Sep 17 00:00:00 2001 From: andreidorin-oprea <87006060+andreidorin-oprea@users.noreply.github.com> Date: Fri, 25 Oct 2024 22:32:53 +0300 Subject: [PATCH 3/4] [exporter/azuredataexplorerexporter] add azure default auth (#35835) #### Description Add the ability to use the default Azure SDK authentication for the kusto client. This enables users to use workload identity. Links: - https://learn.microsoft.com/azure/developer/go/azure-sdk-authentication?tabs=bash#2-authenticate-with-azure - https://github.com/Azure/azure-kusto-go/blob/11658efc9faad4d0300afdc4af9a19c470b0313a/azkustodata/kcsb.go#L294C1-L298C2 - https://learn.microsoft.com/en-us/azure/aks/workload-identity-overview?tabs=go #### Link to tracking issue Fixes https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/33667 #### Testing - `make` - deploy controller in kind cluster using workload identity, send dummy traces and validate that they reach the ADX cluster #### Documentation Split the authentication docs for each mechanism available. --- .chloggen/add_azure_default_auth.yaml | 27 +++++++++++++++++++ exporter/azuredataexplorerexporter/README.md | 13 ++++++--- .../azuredataexplorerexporter/adx_exporter.go | 2 ++ .../adx_exporter_test.go | 12 +++++++++ exporter/azuredataexplorerexporter/config.go | 21 ++++++++++++--- .../azuredataexplorerexporter/config_test.go | 14 +++++++++- .../testdata/config.yaml | 7 ++++- 7 files changed, 88 insertions(+), 8 deletions(-) create mode 100644 .chloggen/add_azure_default_auth.yaml diff --git a/.chloggen/add_azure_default_auth.yaml b/.chloggen/add_azure_default_auth.yaml new file mode 100644 index 000000000000..d9f279fc7991 --- /dev/null +++ b/.chloggen/add_azure_default_auth.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: azuredataexplorerexporter + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Add new configuration option `use_default_auth` to enable default authentication for Azure Data Explorer. This option allows users to leverage workload identity for authentication. + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [33667] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/exporter/azuredataexplorerexporter/README.md b/exporter/azuredataexplorerexporter/README.md index b0024e90d34a..5f355f878930 100644 --- a/exporter/azuredataexplorerexporter/README.md +++ b/exporter/azuredataexplorerexporter/README.md @@ -22,9 +22,16 @@ This exporter sends metrics, logs and trace data to The following settings are required: - `cluster_uri` (no default): The cluster name of the provisioned ADX cluster to ingest the data. -- `application_id` (no default): The client id to connect to the cluster and ingest data. -- `application_key` (no default): The cluster secret corresponding to the client id. -- `tenant_id` (no default): The tenant id where the application_id is referenced from. + +One authentication method is required: +- Service principal: + - `application_id` (no default): The client id to connect to the cluster and ingest data. + - `application_key` (no default): The cluster secret corresponding to the client id. + - `tenant_id` (no default): The tenant id where the application_id is referenced from. +- Managed identity: + - `managed_identity_id` (no default): The managed identity id to authenticate with. Set to "system" for system-assigned managed identity. Set the MI client Id (GUID) for user-assigned managed identity. +- Default authentication: + - `use_azure_auth` (default: false): Set to true to use the Azure [default authentication](https://learn.microsoft.com/en-us/azure/developer/go/azure-sdk-authentication?tabs=bash#2-authenticate-with-azure). The following settings can be optionally configured and have default values: > Note that the database tables are expected to be created upfront before the exporter is in operation , the definition of these are in the section [Database and Table definition scripts](#database-and-table-definition-scripts) diff --git a/exporter/azuredataexplorerexporter/adx_exporter.go b/exporter/azuredataexplorerexporter/adx_exporter.go index b7e66e13b9fb..49b3dd585b71 100644 --- a/exporter/azuredataexplorerexporter/adx_exporter.go +++ b/exporter/azuredataexplorerexporter/adx_exporter.go @@ -218,6 +218,8 @@ func createKcsb(config *Config, version string) *kusto.ConnectionStringBuilder { isSystemManagedIdentity := strings.EqualFold(strings.TrimSpace(config.ManagedIdentityID), "SYSTEM") // If the user has managed identity done, use it. For System managed identity use the MI as system switch { + case config.UseAzureAuth: + kcsb = kusto.NewConnectionStringBuilder(config.ClusterURI).WithDefaultAzureCredential() case !isManagedIdentity: kcsb = kusto.NewConnectionStringBuilder(config.ClusterURI).WithAadAppKey(config.ApplicationID, string(config.ApplicationKey), config.TenantID) case isManagedIdentity && isSystemManagedIdentity: diff --git a/exporter/azuredataexplorerexporter/adx_exporter_test.go b/exporter/azuredataexplorerexporter/adx_exporter_test.go index 186a8839e700..f76389ee1851 100644 --- a/exporter/azuredataexplorerexporter/adx_exporter_test.go +++ b/exporter/azuredataexplorerexporter/adx_exporter_test.go @@ -178,6 +178,7 @@ func TestCreateKcsb(t *testing.T) { name string // name of the test config Config // config for the test isMsi bool // is MSI enabled + isAzureAuth bool // is azure authentication enabled applicationID string // application id managedIdentityID string // managed identity id }{ @@ -216,6 +217,15 @@ func TestCreateKcsb(t *testing.T) { managedIdentityID: "636d798f-b005-41c9-9809-81a5e5a12b2e", applicationID: "", }, + { + name: "azure auth", + config: Config{ + ClusterURI: "https://CLUSTER.kusto.windows.net", + Database: "tests", + UseAzureAuth: true, + }, + isAzureAuth: true, + }, } for i := range tests { tt := tests[i] @@ -229,6 +239,8 @@ func TestCreateKcsb(t *testing.T) { wantManagedID := tt.managedIdentityID assert.Equal(t, wantManagedID, gotKcsb.ManagedServiceIdentity) assert.Equal(t, "https://CLUSTER.kusto.windows.net", gotKcsb.DataSource) + wantIsAzure := tt.isAzureAuth + assert.Equal(t, wantIsAzure, gotKcsb.DefaultAuth) }) } } diff --git a/exporter/azuredataexplorerexporter/config.go b/exporter/azuredataexplorerexporter/config.go index 082add3ac618..e90100b65122 100644 --- a/exporter/azuredataexplorerexporter/config.go +++ b/exporter/azuredataexplorerexporter/config.go @@ -24,6 +24,7 @@ type Config struct { ApplicationKey configopaque.String `mapstructure:"application_key"` TenantID string `mapstructure:"tenant_id"` ManagedIdentityID string `mapstructure:"managed_identity_id"` + UseAzureAuth bool `mapstructure:"use_azure_auth"` Database string `mapstructure:"db_name"` MetricTable string `mapstructure:"metrics_table_name"` LogTable string `mapstructure:"logs_table_name"` @@ -46,9 +47,23 @@ func (adxCfg *Config) Validate() error { if isClusterURIEmpty { return errors.New(`clusterURI config is mandatory`) } - // Parameters for AD App Auth or Managed Identity Auth are mandatory - if isAppAuthEmpty && isManagedAuthEmpty { - return errors.New(`either ["application_id" , "application_key" , "tenant_id"] or ["managed_identity_id"] are needed for auth`) + // Parameters for AD App Auth or Managed Identity Auth or Default Auth are mandatory + authMethods := 0 + + if !isAppAuthEmpty { + authMethods++ + } + + if !isManagedAuthEmpty { + authMethods++ + } + + if adxCfg.UseAzureAuth { + authMethods++ + } + + if authMethods != 1 { + return errors.New(`either ["application_id" , "application_key" , "tenant_id"] or ["managed_identity_id"] or ["use_azure_auth"] must be provided for auth`) } if !(adxCfg.IngestionType == managedIngestType || adxCfg.IngestionType == queuedIngestTest || isEmpty(adxCfg.IngestionType)) { diff --git a/exporter/azuredataexplorerexporter/config_test.go b/exporter/azuredataexplorerexporter/config_test.go index 03882a8145f0..d124e01fc163 100644 --- a/exporter/azuredataexplorerexporter/config_test.go +++ b/exporter/azuredataexplorerexporter/config_test.go @@ -45,7 +45,7 @@ func TestLoadConfig(t *testing.T) { }, { id: component.NewIDWithName(metadata.Type, "2"), - errorMessage: `either ["application_id" , "application_key" , "tenant_id"] or ["managed_identity_id"] are needed for auth`, + errorMessage: `either ["application_id" , "application_key" , "tenant_id"] or ["managed_identity_id"] or ["use_azure_auth"] must be provided for auth`, }, { id: component.NewIDWithName(metadata.Type, "3"), @@ -111,6 +111,18 @@ func TestLoadConfig(t *testing.T) { }, }, }, + { + id: component.NewIDWithName(metadata.Type, "9"), + expected: &Config{ + ClusterURI: "https://CLUSTER.kusto.windows.net", + Database: "oteldb", + MetricTable: "OTELMetrics", + LogTable: "OTELLogs", + TraceTable: "OTELTraces", + UseAzureAuth: true, + IngestionType: queuedIngestTest, + }, + }, } for _, tt := range tests { diff --git a/exporter/azuredataexplorerexporter/testdata/config.yaml b/exporter/azuredataexplorerexporter/testdata/config.yaml index 0fdf9fbe0a13..33ca1d14793d 100644 --- a/exporter/azuredataexplorerexporter/testdata/config.yaml +++ b/exporter/azuredataexplorerexporter/testdata/config.yaml @@ -145,4 +145,9 @@ azuredataexplorer/8: enabled: true initial_interval: 10s max_interval: 60s - max_elapsed_time: 10m \ No newline at end of file + max_elapsed_time: 10m +azuredataexplorer/9: + # Kusto cluster uri + cluster_uri: "https://CLUSTER.kusto.windows.net" + # weather to use the default azure auth + use_azure_auth: true From 3b31e401b255814c40ab33c17a1e95d90c074372 Mon Sep 17 00:00:00 2001 From: Michael Burt Date: Mon, 28 Oct 2024 04:58:36 -0600 Subject: [PATCH 4/4] [receiver/splunkentperisereceiver] Add Dispatch Artifact Collection (#35950) #### Description This PR adds metrics for search dispatch using the `/services/server/status/dispatch-artifacts` API. #### Testing I added unit tests for these metrics --- ...nkenterprisereceiver-search-artifacts.yaml | 27 ++ .../splunkenterprisereceiver/documentation.md | 84 +++++ .../internal/metadata/generated_config.go | 24 ++ .../metadata/generated_config_test.go | 12 + .../internal/metadata/generated_metrics.go | 354 ++++++++++++++++++ .../metadata/generated_metrics_test.go | 108 ++++++ .../internal/metadata/testdata/config.yaml | 24 ++ .../splunkenterprisereceiver/metadata.yaml | 61 ++- receiver/splunkenterprisereceiver/scraper.go | 88 +++++ .../splunkenterprisereceiver/scraper_test.go | 9 + .../splunkenterprisereceiver/search_result.go | 19 + 11 files changed, 807 insertions(+), 3 deletions(-) create mode 100644 .chloggen/splunkenterprisereceiver-search-artifacts.yaml diff --git a/.chloggen/splunkenterprisereceiver-search-artifacts.yaml b/.chloggen/splunkenterprisereceiver-search-artifacts.yaml new file mode 100644 index 000000000000..0d3505e00dd6 --- /dev/null +++ b/.chloggen/splunkenterprisereceiver-search-artifacts.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: splunkenterprisereceiver + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Add new metrics for Splunk Enterprise dispatch artifacts + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [35950] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/receiver/splunkenterprisereceiver/documentation.md b/receiver/splunkenterprisereceiver/documentation.md index 18671d969bc2..e866054e5627 100644 --- a/receiver/splunkenterprisereceiver/documentation.md +++ b/receiver/splunkenterprisereceiver/documentation.md @@ -475,3 +475,87 @@ Gauge tracking current bytes waiting in queue. *Note:** Must be pointed at speci | Name | Description | Values | | ---- | ----------- | ------ | | splunk.queue.name | The name of the queue reporting a specific KPI | Any Str | + +### splunk.server.searchartifacts.adhoc + +Gauge tracking number of ad hoc search artifacts currently on disk. Note:* Must be pointed at specific Search Head endpoint and gathers metrics from only that Search Head. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {search_artifacts} | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| splunk.host | The name of the splunk host | Any Str | + +### splunk.server.searchartifacts.completed + +Gauge tracking number of artifacts currently on disk that belong to finished searches. Note:* Must be pointed at specific Search Head endpoint and gathers metrics from only that Search Head. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {search_artifacts} | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| splunk.host | The name of the splunk host | Any Str | + +### splunk.server.searchartifacts.incomplete + +Gauge tracking number of artifacts currently on disk that belong to unfinished/running searches. Note:* Must be pointed at specific Search Head endpoint and gathers metrics from only that Search Head. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {search_artifacts} | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| splunk.host | The name of the splunk host | Any Str | + +### splunk.server.searchartifacts.invalid + +Gauge tracking number of artifacts currently on disk that are not in a valid state, such as missing info.csv file, etc. Note:* Must be pointed at specific Search Head endpoint and gathers metrics from only that Search Head. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {search_artifacts} | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| splunk.host | The name of the splunk host | Any Str | + +### splunk.server.searchartifacts.savedsearches + +Gauge tracking, for the `splunk.server.searchartifacts.scheduled` number of scheduled search artifacts, how many different saved-searches they belong to. Note:* Must be pointed at specific Search Head endpoint and gathers metrics from only that Search Head. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {search_artifacts} | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| splunk.host | The name of the splunk host | Any Str | + +### splunk.server.searchartifacts.scheduled + +Gauge tracking number of scheduled search artifacts currently on disk. Note:* Must be pointed at specific Search Head endpoint and gathers metrics from only that Search Head. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {search_artifacts} | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| splunk.host | The name of the splunk host | Any Str | diff --git a/receiver/splunkenterprisereceiver/internal/metadata/generated_config.go b/receiver/splunkenterprisereceiver/internal/metadata/generated_config.go index 4887e11ff0b0..9daf22b30999 100644 --- a/receiver/splunkenterprisereceiver/internal/metadata/generated_config.go +++ b/receiver/splunkenterprisereceiver/internal/metadata/generated_config.go @@ -58,6 +58,12 @@ type MetricsConfig struct { SplunkSchedulerCompletionRatio MetricConfig `mapstructure:"splunk.scheduler.completion.ratio"` SplunkServerIntrospectionQueuesCurrent MetricConfig `mapstructure:"splunk.server.introspection.queues.current"` SplunkServerIntrospectionQueuesCurrentBytes MetricConfig `mapstructure:"splunk.server.introspection.queues.current.bytes"` + SplunkServerSearchartifactsAdhoc MetricConfig `mapstructure:"splunk.server.searchartifacts.adhoc"` + SplunkServerSearchartifactsCompleted MetricConfig `mapstructure:"splunk.server.searchartifacts.completed"` + SplunkServerSearchartifactsIncomplete MetricConfig `mapstructure:"splunk.server.searchartifacts.incomplete"` + SplunkServerSearchartifactsInvalid MetricConfig `mapstructure:"splunk.server.searchartifacts.invalid"` + SplunkServerSearchartifactsSavedsearches MetricConfig `mapstructure:"splunk.server.searchartifacts.savedsearches"` + SplunkServerSearchartifactsScheduled MetricConfig `mapstructure:"splunk.server.searchartifacts.scheduled"` SplunkTypingQueueRatio MetricConfig `mapstructure:"splunk.typing.queue.ratio"` } @@ -156,6 +162,24 @@ func DefaultMetricsConfig() MetricsConfig { SplunkServerIntrospectionQueuesCurrentBytes: MetricConfig{ Enabled: false, }, + SplunkServerSearchartifactsAdhoc: MetricConfig{ + Enabled: false, + }, + SplunkServerSearchartifactsCompleted: MetricConfig{ + Enabled: false, + }, + SplunkServerSearchartifactsIncomplete: MetricConfig{ + Enabled: false, + }, + SplunkServerSearchartifactsInvalid: MetricConfig{ + Enabled: false, + }, + SplunkServerSearchartifactsSavedsearches: MetricConfig{ + Enabled: false, + }, + SplunkServerSearchartifactsScheduled: MetricConfig{ + Enabled: false, + }, SplunkTypingQueueRatio: MetricConfig{ Enabled: true, }, diff --git a/receiver/splunkenterprisereceiver/internal/metadata/generated_config_test.go b/receiver/splunkenterprisereceiver/internal/metadata/generated_config_test.go index d904af8f5963..9b1dc2b53099 100644 --- a/receiver/splunkenterprisereceiver/internal/metadata/generated_config_test.go +++ b/receiver/splunkenterprisereceiver/internal/metadata/generated_config_test.go @@ -56,6 +56,12 @@ func TestMetricsBuilderConfig(t *testing.T) { SplunkSchedulerCompletionRatio: MetricConfig{Enabled: true}, SplunkServerIntrospectionQueuesCurrent: MetricConfig{Enabled: true}, SplunkServerIntrospectionQueuesCurrentBytes: MetricConfig{Enabled: true}, + SplunkServerSearchartifactsAdhoc: MetricConfig{Enabled: true}, + SplunkServerSearchartifactsCompleted: MetricConfig{Enabled: true}, + SplunkServerSearchartifactsIncomplete: MetricConfig{Enabled: true}, + SplunkServerSearchartifactsInvalid: MetricConfig{Enabled: true}, + SplunkServerSearchartifactsSavedsearches: MetricConfig{Enabled: true}, + SplunkServerSearchartifactsScheduled: MetricConfig{Enabled: true}, SplunkTypingQueueRatio: MetricConfig{Enabled: true}, }, }, @@ -95,6 +101,12 @@ func TestMetricsBuilderConfig(t *testing.T) { SplunkSchedulerCompletionRatio: MetricConfig{Enabled: false}, SplunkServerIntrospectionQueuesCurrent: MetricConfig{Enabled: false}, SplunkServerIntrospectionQueuesCurrentBytes: MetricConfig{Enabled: false}, + SplunkServerSearchartifactsAdhoc: MetricConfig{Enabled: false}, + SplunkServerSearchartifactsCompleted: MetricConfig{Enabled: false}, + SplunkServerSearchartifactsIncomplete: MetricConfig{Enabled: false}, + SplunkServerSearchartifactsInvalid: MetricConfig{Enabled: false}, + SplunkServerSearchartifactsSavedsearches: MetricConfig{Enabled: false}, + SplunkServerSearchartifactsScheduled: MetricConfig{Enabled: false}, SplunkTypingQueueRatio: MetricConfig{Enabled: false}, }, }, diff --git a/receiver/splunkenterprisereceiver/internal/metadata/generated_metrics.go b/receiver/splunkenterprisereceiver/internal/metadata/generated_metrics.go index 21a081c6031c..968f9b26bed2 100644 --- a/receiver/splunkenterprisereceiver/internal/metadata/generated_metrics.go +++ b/receiver/splunkenterprisereceiver/internal/metadata/generated_metrics.go @@ -1598,6 +1598,312 @@ func newMetricSplunkServerIntrospectionQueuesCurrentBytes(cfg MetricConfig) metr return m } +type metricSplunkServerSearchartifactsAdhoc struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills splunk.server.searchartifacts.adhoc metric with initial data. +func (m *metricSplunkServerSearchartifactsAdhoc) init() { + m.data.SetName("splunk.server.searchartifacts.adhoc") + m.data.SetDescription("Gauge tracking number of ad hoc search artifacts currently on disk. Note:* Must be pointed at specific Search Head endpoint and gathers metrics from only that Search Head.") + m.data.SetUnit("{search_artifacts}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSplunkServerSearchartifactsAdhoc) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, splunkHostAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("splunk.host", splunkHostAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSplunkServerSearchartifactsAdhoc) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSplunkServerSearchartifactsAdhoc) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSplunkServerSearchartifactsAdhoc(cfg MetricConfig) metricSplunkServerSearchartifactsAdhoc { + m := metricSplunkServerSearchartifactsAdhoc{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSplunkServerSearchartifactsCompleted struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills splunk.server.searchartifacts.completed metric with initial data. +func (m *metricSplunkServerSearchartifactsCompleted) init() { + m.data.SetName("splunk.server.searchartifacts.completed") + m.data.SetDescription("Gauge tracking number of artifacts currently on disk that belong to finished searches. Note:* Must be pointed at specific Search Head endpoint and gathers metrics from only that Search Head.") + m.data.SetUnit("{search_artifacts}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSplunkServerSearchartifactsCompleted) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, splunkHostAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("splunk.host", splunkHostAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSplunkServerSearchartifactsCompleted) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSplunkServerSearchartifactsCompleted) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSplunkServerSearchartifactsCompleted(cfg MetricConfig) metricSplunkServerSearchartifactsCompleted { + m := metricSplunkServerSearchartifactsCompleted{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSplunkServerSearchartifactsIncomplete struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills splunk.server.searchartifacts.incomplete metric with initial data. +func (m *metricSplunkServerSearchartifactsIncomplete) init() { + m.data.SetName("splunk.server.searchartifacts.incomplete") + m.data.SetDescription("Gauge tracking number of artifacts currently on disk that belong to unfinished/running searches. Note:* Must be pointed at specific Search Head endpoint and gathers metrics from only that Search Head.") + m.data.SetUnit("{search_artifacts}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSplunkServerSearchartifactsIncomplete) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, splunkHostAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("splunk.host", splunkHostAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSplunkServerSearchartifactsIncomplete) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSplunkServerSearchartifactsIncomplete) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSplunkServerSearchartifactsIncomplete(cfg MetricConfig) metricSplunkServerSearchartifactsIncomplete { + m := metricSplunkServerSearchartifactsIncomplete{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSplunkServerSearchartifactsInvalid struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills splunk.server.searchartifacts.invalid metric with initial data. +func (m *metricSplunkServerSearchartifactsInvalid) init() { + m.data.SetName("splunk.server.searchartifacts.invalid") + m.data.SetDescription("Gauge tracking number of artifacts currently on disk that are not in a valid state, such as missing info.csv file, etc. Note:* Must be pointed at specific Search Head endpoint and gathers metrics from only that Search Head.") + m.data.SetUnit("{search_artifacts}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSplunkServerSearchartifactsInvalid) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, splunkHostAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("splunk.host", splunkHostAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSplunkServerSearchartifactsInvalid) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSplunkServerSearchartifactsInvalid) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSplunkServerSearchartifactsInvalid(cfg MetricConfig) metricSplunkServerSearchartifactsInvalid { + m := metricSplunkServerSearchartifactsInvalid{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSplunkServerSearchartifactsSavedsearches struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills splunk.server.searchartifacts.savedsearches metric with initial data. +func (m *metricSplunkServerSearchartifactsSavedsearches) init() { + m.data.SetName("splunk.server.searchartifacts.savedsearches") + m.data.SetDescription("Gauge tracking, for the `splunk.server.searchartifacts.scheduled` number of scheduled search artifacts, how many different saved-searches they belong to. Note:* Must be pointed at specific Search Head endpoint and gathers metrics from only that Search Head.") + m.data.SetUnit("{search_artifacts}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSplunkServerSearchartifactsSavedsearches) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, splunkHostAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("splunk.host", splunkHostAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSplunkServerSearchartifactsSavedsearches) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSplunkServerSearchartifactsSavedsearches) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSplunkServerSearchartifactsSavedsearches(cfg MetricConfig) metricSplunkServerSearchartifactsSavedsearches { + m := metricSplunkServerSearchartifactsSavedsearches{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSplunkServerSearchartifactsScheduled struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills splunk.server.searchartifacts.scheduled metric with initial data. +func (m *metricSplunkServerSearchartifactsScheduled) init() { + m.data.SetName("splunk.server.searchartifacts.scheduled") + m.data.SetDescription("Gauge tracking number of scheduled search artifacts currently on disk. Note:* Must be pointed at specific Search Head endpoint and gathers metrics from only that Search Head.") + m.data.SetUnit("{search_artifacts}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSplunkServerSearchartifactsScheduled) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, splunkHostAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("splunk.host", splunkHostAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSplunkServerSearchartifactsScheduled) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSplunkServerSearchartifactsScheduled) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSplunkServerSearchartifactsScheduled(cfg MetricConfig) metricSplunkServerSearchartifactsScheduled { + m := metricSplunkServerSearchartifactsScheduled{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + type metricSplunkTypingQueueRatio struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -1688,6 +1994,12 @@ type MetricsBuilder struct { metricSplunkSchedulerCompletionRatio metricSplunkSchedulerCompletionRatio metricSplunkServerIntrospectionQueuesCurrent metricSplunkServerIntrospectionQueuesCurrent metricSplunkServerIntrospectionQueuesCurrentBytes metricSplunkServerIntrospectionQueuesCurrentBytes + metricSplunkServerSearchartifactsAdhoc metricSplunkServerSearchartifactsAdhoc + metricSplunkServerSearchartifactsCompleted metricSplunkServerSearchartifactsCompleted + metricSplunkServerSearchartifactsIncomplete metricSplunkServerSearchartifactsIncomplete + metricSplunkServerSearchartifactsInvalid metricSplunkServerSearchartifactsInvalid + metricSplunkServerSearchartifactsSavedsearches metricSplunkServerSearchartifactsSavedsearches + metricSplunkServerSearchartifactsScheduled metricSplunkServerSearchartifactsScheduled metricSplunkTypingQueueRatio metricSplunkTypingQueueRatio } @@ -1746,6 +2058,12 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, opt metricSplunkSchedulerCompletionRatio: newMetricSplunkSchedulerCompletionRatio(mbc.Metrics.SplunkSchedulerCompletionRatio), metricSplunkServerIntrospectionQueuesCurrent: newMetricSplunkServerIntrospectionQueuesCurrent(mbc.Metrics.SplunkServerIntrospectionQueuesCurrent), metricSplunkServerIntrospectionQueuesCurrentBytes: newMetricSplunkServerIntrospectionQueuesCurrentBytes(mbc.Metrics.SplunkServerIntrospectionQueuesCurrentBytes), + metricSplunkServerSearchartifactsAdhoc: newMetricSplunkServerSearchartifactsAdhoc(mbc.Metrics.SplunkServerSearchartifactsAdhoc), + metricSplunkServerSearchartifactsCompleted: newMetricSplunkServerSearchartifactsCompleted(mbc.Metrics.SplunkServerSearchartifactsCompleted), + metricSplunkServerSearchartifactsIncomplete: newMetricSplunkServerSearchartifactsIncomplete(mbc.Metrics.SplunkServerSearchartifactsIncomplete), + metricSplunkServerSearchartifactsInvalid: newMetricSplunkServerSearchartifactsInvalid(mbc.Metrics.SplunkServerSearchartifactsInvalid), + metricSplunkServerSearchartifactsSavedsearches: newMetricSplunkServerSearchartifactsSavedsearches(mbc.Metrics.SplunkServerSearchartifactsSavedsearches), + metricSplunkServerSearchartifactsScheduled: newMetricSplunkServerSearchartifactsScheduled(mbc.Metrics.SplunkServerSearchartifactsScheduled), metricSplunkTypingQueueRatio: newMetricSplunkTypingQueueRatio(mbc.Metrics.SplunkTypingQueueRatio), } @@ -1843,6 +2161,12 @@ func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) { mb.metricSplunkSchedulerCompletionRatio.emit(ils.Metrics()) mb.metricSplunkServerIntrospectionQueuesCurrent.emit(ils.Metrics()) mb.metricSplunkServerIntrospectionQueuesCurrentBytes.emit(ils.Metrics()) + mb.metricSplunkServerSearchartifactsAdhoc.emit(ils.Metrics()) + mb.metricSplunkServerSearchartifactsCompleted.emit(ils.Metrics()) + mb.metricSplunkServerSearchartifactsIncomplete.emit(ils.Metrics()) + mb.metricSplunkServerSearchartifactsInvalid.emit(ils.Metrics()) + mb.metricSplunkServerSearchartifactsSavedsearches.emit(ils.Metrics()) + mb.metricSplunkServerSearchartifactsScheduled.emit(ils.Metrics()) mb.metricSplunkTypingQueueRatio.emit(ils.Metrics()) for _, op := range options { @@ -2020,6 +2344,36 @@ func (mb *MetricsBuilder) RecordSplunkServerIntrospectionQueuesCurrentBytesDataP mb.metricSplunkServerIntrospectionQueuesCurrentBytes.recordDataPoint(mb.startTime, ts, val, splunkQueueNameAttributeValue) } +// RecordSplunkServerSearchartifactsAdhocDataPoint adds a data point to splunk.server.searchartifacts.adhoc metric. +func (mb *MetricsBuilder) RecordSplunkServerSearchartifactsAdhocDataPoint(ts pcommon.Timestamp, val int64, splunkHostAttributeValue string) { + mb.metricSplunkServerSearchartifactsAdhoc.recordDataPoint(mb.startTime, ts, val, splunkHostAttributeValue) +} + +// RecordSplunkServerSearchartifactsCompletedDataPoint adds a data point to splunk.server.searchartifacts.completed metric. +func (mb *MetricsBuilder) RecordSplunkServerSearchartifactsCompletedDataPoint(ts pcommon.Timestamp, val int64, splunkHostAttributeValue string) { + mb.metricSplunkServerSearchartifactsCompleted.recordDataPoint(mb.startTime, ts, val, splunkHostAttributeValue) +} + +// RecordSplunkServerSearchartifactsIncompleteDataPoint adds a data point to splunk.server.searchartifacts.incomplete metric. +func (mb *MetricsBuilder) RecordSplunkServerSearchartifactsIncompleteDataPoint(ts pcommon.Timestamp, val int64, splunkHostAttributeValue string) { + mb.metricSplunkServerSearchartifactsIncomplete.recordDataPoint(mb.startTime, ts, val, splunkHostAttributeValue) +} + +// RecordSplunkServerSearchartifactsInvalidDataPoint adds a data point to splunk.server.searchartifacts.invalid metric. +func (mb *MetricsBuilder) RecordSplunkServerSearchartifactsInvalidDataPoint(ts pcommon.Timestamp, val int64, splunkHostAttributeValue string) { + mb.metricSplunkServerSearchartifactsInvalid.recordDataPoint(mb.startTime, ts, val, splunkHostAttributeValue) +} + +// RecordSplunkServerSearchartifactsSavedsearchesDataPoint adds a data point to splunk.server.searchartifacts.savedsearches metric. +func (mb *MetricsBuilder) RecordSplunkServerSearchartifactsSavedsearchesDataPoint(ts pcommon.Timestamp, val int64, splunkHostAttributeValue string) { + mb.metricSplunkServerSearchartifactsSavedsearches.recordDataPoint(mb.startTime, ts, val, splunkHostAttributeValue) +} + +// RecordSplunkServerSearchartifactsScheduledDataPoint adds a data point to splunk.server.searchartifacts.scheduled metric. +func (mb *MetricsBuilder) RecordSplunkServerSearchartifactsScheduledDataPoint(ts pcommon.Timestamp, val int64, splunkHostAttributeValue string) { + mb.metricSplunkServerSearchartifactsScheduled.recordDataPoint(mb.startTime, ts, val, splunkHostAttributeValue) +} + // RecordSplunkTypingQueueRatioDataPoint adds a data point to splunk.typing.queue.ratio metric. func (mb *MetricsBuilder) RecordSplunkTypingQueueRatioDataPoint(ts pcommon.Timestamp, val float64, splunkHostAttributeValue string) { mb.metricSplunkTypingQueueRatio.recordDataPoint(mb.startTime, ts, val, splunkHostAttributeValue) diff --git a/receiver/splunkenterprisereceiver/internal/metadata/generated_metrics_test.go b/receiver/splunkenterprisereceiver/internal/metadata/generated_metrics_test.go index 1e225a521ac8..b2c7ede854dc 100644 --- a/receiver/splunkenterprisereceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/splunkenterprisereceiver/internal/metadata/generated_metrics_test.go @@ -170,6 +170,24 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordSplunkServerIntrospectionQueuesCurrentBytesDataPoint(ts, 1, "splunk.queue.name-val") + allMetricsCount++ + mb.RecordSplunkServerSearchartifactsAdhocDataPoint(ts, 1, "splunk.host-val") + + allMetricsCount++ + mb.RecordSplunkServerSearchartifactsCompletedDataPoint(ts, 1, "splunk.host-val") + + allMetricsCount++ + mb.RecordSplunkServerSearchartifactsIncompleteDataPoint(ts, 1, "splunk.host-val") + + allMetricsCount++ + mb.RecordSplunkServerSearchartifactsInvalidDataPoint(ts, 1, "splunk.host-val") + + allMetricsCount++ + mb.RecordSplunkServerSearchartifactsSavedsearchesDataPoint(ts, 1, "splunk.host-val") + + allMetricsCount++ + mb.RecordSplunkServerSearchartifactsScheduledDataPoint(ts, 1, "splunk.host-val") + defaultMetricsCount++ allMetricsCount++ mb.RecordSplunkTypingQueueRatioDataPoint(ts, 1, "splunk.host-val") @@ -679,6 +697,96 @@ func TestMetricsBuilder(t *testing.T) { attrVal, ok := dp.Attributes().Get("splunk.queue.name") assert.True(t, ok) assert.EqualValues(t, "splunk.queue.name-val", attrVal.Str()) + case "splunk.server.searchartifacts.adhoc": + assert.False(t, validatedMetrics["splunk.server.searchartifacts.adhoc"], "Found a duplicate in the metrics slice: splunk.server.searchartifacts.adhoc") + validatedMetrics["splunk.server.searchartifacts.adhoc"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Gauge tracking number of ad hoc search artifacts currently on disk. Note:* Must be pointed at specific Search Head endpoint and gathers metrics from only that Search Head.", ms.At(i).Description()) + assert.Equal(t, "{search_artifacts}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("splunk.host") + assert.True(t, ok) + assert.EqualValues(t, "splunk.host-val", attrVal.Str()) + case "splunk.server.searchartifacts.completed": + assert.False(t, validatedMetrics["splunk.server.searchartifacts.completed"], "Found a duplicate in the metrics slice: splunk.server.searchartifacts.completed") + validatedMetrics["splunk.server.searchartifacts.completed"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Gauge tracking number of artifacts currently on disk that belong to finished searches. Note:* Must be pointed at specific Search Head endpoint and gathers metrics from only that Search Head.", ms.At(i).Description()) + assert.Equal(t, "{search_artifacts}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("splunk.host") + assert.True(t, ok) + assert.EqualValues(t, "splunk.host-val", attrVal.Str()) + case "splunk.server.searchartifacts.incomplete": + assert.False(t, validatedMetrics["splunk.server.searchartifacts.incomplete"], "Found a duplicate in the metrics slice: splunk.server.searchartifacts.incomplete") + validatedMetrics["splunk.server.searchartifacts.incomplete"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Gauge tracking number of artifacts currently on disk that belong to unfinished/running searches. Note:* Must be pointed at specific Search Head endpoint and gathers metrics from only that Search Head.", ms.At(i).Description()) + assert.Equal(t, "{search_artifacts}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("splunk.host") + assert.True(t, ok) + assert.EqualValues(t, "splunk.host-val", attrVal.Str()) + case "splunk.server.searchartifacts.invalid": + assert.False(t, validatedMetrics["splunk.server.searchartifacts.invalid"], "Found a duplicate in the metrics slice: splunk.server.searchartifacts.invalid") + validatedMetrics["splunk.server.searchartifacts.invalid"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Gauge tracking number of artifacts currently on disk that are not in a valid state, such as missing info.csv file, etc. Note:* Must be pointed at specific Search Head endpoint and gathers metrics from only that Search Head.", ms.At(i).Description()) + assert.Equal(t, "{search_artifacts}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("splunk.host") + assert.True(t, ok) + assert.EqualValues(t, "splunk.host-val", attrVal.Str()) + case "splunk.server.searchartifacts.savedsearches": + assert.False(t, validatedMetrics["splunk.server.searchartifacts.savedsearches"], "Found a duplicate in the metrics slice: splunk.server.searchartifacts.savedsearches") + validatedMetrics["splunk.server.searchartifacts.savedsearches"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Gauge tracking, for the `splunk.server.searchartifacts.scheduled` number of scheduled search artifacts, how many different saved-searches they belong to. Note:* Must be pointed at specific Search Head endpoint and gathers metrics from only that Search Head.", ms.At(i).Description()) + assert.Equal(t, "{search_artifacts}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("splunk.host") + assert.True(t, ok) + assert.EqualValues(t, "splunk.host-val", attrVal.Str()) + case "splunk.server.searchartifacts.scheduled": + assert.False(t, validatedMetrics["splunk.server.searchartifacts.scheduled"], "Found a duplicate in the metrics slice: splunk.server.searchartifacts.scheduled") + validatedMetrics["splunk.server.searchartifacts.scheduled"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Gauge tracking number of scheduled search artifacts currently on disk. Note:* Must be pointed at specific Search Head endpoint and gathers metrics from only that Search Head.", ms.At(i).Description()) + assert.Equal(t, "{search_artifacts}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("splunk.host") + assert.True(t, ok) + assert.EqualValues(t, "splunk.host-val", attrVal.Str()) case "splunk.typing.queue.ratio": assert.False(t, validatedMetrics["splunk.typing.queue.ratio"], "Found a duplicate in the metrics slice: splunk.typing.queue.ratio") validatedMetrics["splunk.typing.queue.ratio"] = true diff --git a/receiver/splunkenterprisereceiver/internal/metadata/testdata/config.yaml b/receiver/splunkenterprisereceiver/internal/metadata/testdata/config.yaml index c3e150597d16..449cd89b2255 100644 --- a/receiver/splunkenterprisereceiver/internal/metadata/testdata/config.yaml +++ b/receiver/splunkenterprisereceiver/internal/metadata/testdata/config.yaml @@ -63,6 +63,18 @@ all_set: enabled: true splunk.server.introspection.queues.current.bytes: enabled: true + splunk.server.searchartifacts.adhoc: + enabled: true + splunk.server.searchartifacts.completed: + enabled: true + splunk.server.searchartifacts.incomplete: + enabled: true + splunk.server.searchartifacts.invalid: + enabled: true + splunk.server.searchartifacts.savedsearches: + enabled: true + splunk.server.searchartifacts.scheduled: + enabled: true splunk.typing.queue.ratio: enabled: true none_set: @@ -129,5 +141,17 @@ none_set: enabled: false splunk.server.introspection.queues.current.bytes: enabled: false + splunk.server.searchartifacts.adhoc: + enabled: false + splunk.server.searchartifacts.completed: + enabled: false + splunk.server.searchartifacts.incomplete: + enabled: false + splunk.server.searchartifacts.invalid: + enabled: false + splunk.server.searchartifacts.savedsearches: + enabled: false + splunk.server.searchartifacts.scheduled: + enabled: false splunk.typing.queue.ratio: enabled: false diff --git a/receiver/splunkenterprisereceiver/metadata.yaml b/receiver/splunkenterprisereceiver/metadata.yaml index 4c4c23b2c8e9..e6329f500e0d 100644 --- a/receiver/splunkenterprisereceiver/metadata.yaml +++ b/receiver/splunkenterprisereceiver/metadata.yaml @@ -231,8 +231,8 @@ metrics: unit: '{buckets}' gauge: value_type: int - attributes: [splunk.index.name, splunk.bucket.dir] - #'services/server/introspection/queues' + attributes: [splunk.index.name, splunk.bucket.dir] + #'services/server/introspection/queues' splunk.server.introspection.queues.current: enabled: false description: Gauge tracking current length of queue. *Note:** Must be pointed at specific indexer `endpoint` and gathers metrics from only that indexer. @@ -268,7 +268,62 @@ metrics: unit: '{status}' gauge: value_type: int - attributes: [splunk.kvstore.status.value] + attributes: [splunk.kvstore.status.value] + #'services/server/status/dispatch-artifacts' + splunk.server.searchartifacts.adhoc: + enabled: false + description: Gauge tracking number of ad hoc search artifacts currently on disk. Note:* Must be pointed at specific Search Head endpoint and gathers metrics from only that Search Head. + unit: "{search_artifacts}" + gauge: + monotonic: false + aggregation_temporality: cumulative + value_type: int + attributes: [splunk.host] + splunk.server.searchartifacts.scheduled: + enabled: false + description: Gauge tracking number of scheduled search artifacts currently on disk. Note:* Must be pointed at specific Search Head endpoint and gathers metrics from only that Search Head. + unit: "{search_artifacts}" + gauge: + monotonic: false + aggregation_temporality: cumulative + value_type: int + attributes: [splunk.host] + splunk.server.searchartifacts.completed: + enabled: false + description: Gauge tracking number of artifacts currently on disk that belong to finished searches. Note:* Must be pointed at specific Search Head endpoint and gathers metrics from only that Search Head. + unit: "{search_artifacts}" + gauge: + monotonic: false + aggregation_temporality: cumulative + value_type: int + attributes: [splunk.host] + splunk.server.searchartifacts.incomplete: + enabled: false + description: Gauge tracking number of artifacts currently on disk that belong to unfinished/running searches. Note:* Must be pointed at specific Search Head endpoint and gathers metrics from only that Search Head. + unit: "{search_artifacts}" + gauge: + monotonic: false + aggregation_temporality: cumulative + value_type: int + attributes: [splunk.host] + splunk.server.searchartifacts.invalid: + enabled: false + description: Gauge tracking number of artifacts currently on disk that are not in a valid state, such as missing info.csv file, etc. Note:* Must be pointed at specific Search Head endpoint and gathers metrics from only that Search Head. + unit: "{search_artifacts}" + gauge: + monotonic: false + aggregation_temporality: cumulative + value_type: int + attributes: [splunk.host] + splunk.server.searchartifacts.savedsearches: + enabled: false + description: Gauge tracking, for the `splunk.server.searchartifacts.scheduled` number of scheduled search artifacts, how many different saved-searches they belong to. Note:* Must be pointed at specific Search Head endpoint and gathers metrics from only that Search Head. + unit: "{search_artifacts}" + gauge: + monotonic: false + aggregation_temporality: cumulative + value_type: int + attributes: [splunk.host] tests: config: diff --git a/receiver/splunkenterprisereceiver/scraper.go b/receiver/splunkenterprisereceiver/scraper.go index ed731080acad..7d55727e71e4 100644 --- a/receiver/splunkenterprisereceiver/scraper.go +++ b/receiver/splunkenterprisereceiver/scraper.go @@ -102,6 +102,7 @@ func (s *splunkScraper) scrape(ctx context.Context) (pmetric.Metrics, error) { s.scrapeSchedulerRunTimeByHost, s.scrapeIndexerAvgRate, s.scrapeKVStoreStatus, + s.scrapeSearchArtifacts, } errChan := make(chan error, len(metricScrapes)) @@ -1627,3 +1628,90 @@ func (s *splunkScraper) scrapeKVStoreStatus(ctx context.Context, now pcommon.Tim } } } + +// Scrape dispatch artifacts +func (s *splunkScraper) scrapeSearchArtifacts(ctx context.Context, now pcommon.Timestamp, errs chan error) { + if !s.splunkClient.isConfigured(typeSh) { + return + } + + ctx = context.WithValue(ctx, endpointType("type"), typeSh) + var da DispatchArtifacts + + ept := apiDict[`SplunkDispatchArtifacts`] + + req, err := s.splunkClient.createAPIRequest(ctx, ept) + if err != nil { + errs <- err + return + } + + res, err := s.splunkClient.makeRequest(req) + if err != nil { + errs <- err + return + } + defer res.Body.Close() + + body, err := io.ReadAll(res.Body) + if err != nil { + errs <- err + return + } + err = json.Unmarshal(body, &da) + if err != nil { + errs <- err + return + } + + for _, f := range da.Entries { + + if !s.conf.MetricsBuilderConfig.Metrics.SplunkServerSearchartifactsAdhoc.Enabled { + adhocCount, err := strconv.ParseInt(f.Content.AdhocCount, 10, 64) + if err != nil { + errs <- err + } + s.mb.RecordSplunkServerSearchartifactsAdhocDataPoint(now, adhocCount, s.conf.SHEndpoint.Endpoint) + } + + if !s.conf.MetricsBuilderConfig.Metrics.SplunkServerSearchartifactsScheduled.Enabled { + scheduledCount, err := strconv.ParseInt(f.Content.ScheduledCount, 10, 64) + if err != nil { + errs <- err + } + s.mb.RecordSplunkServerSearchartifactsScheduledDataPoint(now, scheduledCount, s.conf.SHEndpoint.Endpoint) + } + + if !s.conf.MetricsBuilderConfig.Metrics.SplunkServerSearchartifactsCompleted.Enabled { + completedCount, err := strconv.ParseInt(f.Content.CompletedCount, 10, 64) + if err != nil { + errs <- err + } + s.mb.RecordSplunkServerSearchartifactsCompletedDataPoint(now, completedCount, s.conf.SHEndpoint.Endpoint) + } + + if !s.conf.MetricsBuilderConfig.Metrics.SplunkServerSearchartifactsIncomplete.Enabled { + incompleCount, err := strconv.ParseInt(f.Content.IncompleCount, 10, 64) + if err != nil { + errs <- err + } + s.mb.RecordSplunkServerSearchartifactsIncompleteDataPoint(now, incompleCount, s.conf.SHEndpoint.Endpoint) + } + + if !s.conf.MetricsBuilderConfig.Metrics.SplunkServerSearchartifactsInvalid.Enabled { + invalidCount, err := strconv.ParseInt(f.Content.InvalidCount, 10, 64) + if err != nil { + errs <- err + } + s.mb.RecordSplunkServerSearchartifactsInvalidDataPoint(now, invalidCount, s.conf.SHEndpoint.Endpoint) + } + + if !s.conf.MetricsBuilderConfig.Metrics.SplunkServerSearchartifactsSavedsearches.Enabled { + savedSearchesCount, err := strconv.ParseInt(f.Content.SsCount, 10, 64) + if err != nil { + errs <- err + } + s.mb.RecordSplunkServerSearchartifactsSavedsearchesDataPoint(now, savedSearchesCount, s.conf.SHEndpoint.Endpoint) + } + } +} diff --git a/receiver/splunkenterprisereceiver/scraper_test.go b/receiver/splunkenterprisereceiver/scraper_test.go index a4a06cc84ee1..0af346d429e2 100644 --- a/receiver/splunkenterprisereceiver/scraper_test.go +++ b/receiver/splunkenterprisereceiver/scraper_test.go @@ -47,6 +47,13 @@ func mockIntrospectionQueues(w http.ResponseWriter, _ *http.Request) { _, _ = w.Write([]byte(`{"links":{},"origin":"https://somehost:8089/services/server/introspection/queues","updated":"2023-09-18T13:37:45+00:00","generator":{"build":"82c987350fde","version":"9.0.1"},"entry":[{"name":"AEQ","id":"https://somehost:8089/services/server/introspection/queues/AEQ","updated":"1970-01-01T00:00:00+00:00","links":{"alternate":"/services/server/introspection/queues/AEQ","list":"/services/server/introspection/queues/AEQ","edit":"/services/server/introspection/queues/AEQ"},"author":"system","acl":{"app":"","can_list":true,"can_write":true,"modifiable":false,"owner":"system","perms":{"read":["admin","splunk-system-role"],"write":["admin","splunk-system-role"]},"removable":false,"sharing":"system"},"content":{"cntr_1_lookback_time":60,"cntr_2_lookback_time":600,"cntr_3_lookback_time":900,"current_size":1,"current_size_bytes":100,"eai:acl":null,"largest_size":3,"max_size_bytes":512000,"sampling_interval":1,"smallest_size":0,"value_cntr1_size_bytes_lookback":0,"value_cntr1_size_lookback":0,"value_cntr2_size_bytes_lookback":0,"value_cntr2_size_lookback":0,"value_cntr3_size_bytes_lookback":0,"value_cntr3_size_lookback":0}}],"paging":{"total":13,"perPage":1,"offset":0},"messages":[]}`)) } +func mockDispatchArtifacts(w http.ResponseWriter, _ *http.Request) { + status := http.StatusOK + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(status) + _, _ = w.Write([]byte(`{"links":{},"origin":"https://somehost:8089/services/server/status/dispatch-artifacts","updated":"2024-10-24T04:46:47+00:00","generator":{"build":"05775df3af30","version":"9.2.2406.108"},"entry":[{"name":"result","id":"https://somehost:8089/services/server/status/dispatch-artifacts/result","updated":"1970-01-01T00:00:00+00:00","links":{"alternate":"/services/server/status/dispatch-artifacts/result","list":"/services/server/status/dispatch-artifacts/result"},"author":"system","acl":{"app":"","can_list":true,"can_write":true,"modifiable":false,"owner":"system","perms":{"read":["*"],"write":[]},"removable":false,"sharing":"system"},"content":{"adhoc_count":"7","adhoc_size_mb":"1","adhoc_subsearch_count":"0","adhoc_subsearch_size_mb":"0","cached_job_status_info_csv_size_mb":"0","cached_job_status_status_csv_size_mb":"0","cached_job_status_total_entries":"20","completed_count":"20","completed_size_mb":"2","count_summary":"1","disk_usage_MB":"2","eai:acl":null,"incomple_count":"0","incomple_size_mb":"0","invalid_count":"1","remote_count":"0","remote_mb":"0","rsa_count":"0","rsa_scheduled_count":"0","rsa_scheduled_size_mb":"0","rsa_size_mb":"0","scheduled_count":"13","scheduled_size_mb":"1","scheduled_subsearch_count":"0","scheduled_subsearch_size_mb":"0","ss_count":"7","status_cache_info_csv_size_mb":"0","status_cache_status_csv_size_mb":"0","status_cache_total_entries":"20","temp_dispatch_count":"0","temp_dispatch_size_mb":"0","top_apps":{"0":{"splunk_instrumentation":"6"},"1":{"search":"1"}},"top_named_searches":null,"top_users":{"0":{"splunk-system-user":"6"},"1":{"internal_observability":"1"}},"total_count":"7"}}],"paging":{"total":1,"perPage":30,"offset":0},"messages":[]}`)) +} + // mock server create func createMockServer() *httptest.Server { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -57,6 +64,8 @@ func createMockServer() *httptest.Server { mockIndexesExtended(w, r) case "/services/server/introspection/queues?output_mode=json&count=-1": mockIntrospectionQueues(w, r) + case "/services/server/status/dispatch-artifacts?output_mode=json&count=-1": + mockDispatchArtifacts(w, r) default: http.NotFoundHandler().ServeHTTP(w, r) } diff --git a/receiver/splunkenterprisereceiver/search_result.go b/receiver/splunkenterprisereceiver/search_result.go index 2a9146411ef4..69212c4de08d 100644 --- a/receiver/splunkenterprisereceiver/search_result.go +++ b/receiver/splunkenterprisereceiver/search_result.go @@ -24,6 +24,7 @@ var apiDict = map[string]string{ `SplunkDataIndexesExtended`: `/services/data/indexes-extended?output_mode=json&count=-1`, `SplunkIntrospectionQueues`: `/services/server/introspection/queues?output_mode=json&count=-1`, `SplunkKVStoreStatus`: `/services/kvstore/status?output_mode=json`, + `SplunkDispatchArtifacts`: `/services/server/status/dispatch-artifacts?output_mode=json&count=-1`, } type searchResponse struct { @@ -134,3 +135,21 @@ type KVStoreCurrent struct { ReplicationStatus string `json:"replicationStatus"` StorageEngine string `json:"storageEngine"` } + +// '/services/server/status/dispatch-artifacts' +type DispatchArtifacts struct { + Entries []DispatchArtifactEntry `json:"entry"` +} + +type DispatchArtifactEntry struct { + Content DispatchArtifactContent `json:"content"` +} + +type DispatchArtifactContent struct { + AdhocCount string `json:"adhoc_count"` + ScheduledCount string `json:"scheduled_count"` + SsCount string `json:"ss_count"` + CompletedCount string `json:"completed_count"` + IncompleCount string `json:"incomple_count"` + InvalidCount string `json:"invalid_count"` +}