From 6156ecb1146775807322a88137752165cb7f2976 Mon Sep 17 00:00:00 2001 From: Saumya Shah <115284013+Saumya40-codes@users.noreply.github.com> Date: Wed, 22 Jan 2025 22:04:28 +0530 Subject: [PATCH 01/12] [receiver/jaegerreceiver] Add logs when the respective server starts in jaegerreceiver (#36961) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit #### Description - This PR adds the logging related to the server starting under the [jaegerreceiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/jaegerreceiver) - Added logs when: GRPC, Thrift Http, Thrift Binary and Thrift compact server starts #### Link to tracking issue Fixes https://github.com/jaegertracing/jaeger/issues/6412 #### How was change tested? Running collector with `jaegerreceiver` enabled with its all transports #### Output ``` @Saumya40-codes ➜ /workspaces/opentelemetry-collector-contrib (log-endpoints) $ ./bin/otelcontribcol_linux_amd64 --config config.yaml 2024-12-27T15:37:23.428Z info service@v0.116.1-0.20241220212031-7c2639723f67/service.go:164 Setting up own telemetry... 2024-12-27T15:37:23.428Z info telemetry/metrics.go:70 Serving metrics {"address": "localhost:8888", "metrics level": "Normal"} 2024-12-27T15:37:23.428Z info builders/builders.go:26 Development component. May change in the future. {"kind": "exporter", "data_type": "traces", "name": "debug"} 2024-12-27T15:37:23.436Z info service@v0.116.1-0.20241220212031-7c2639723f67/service.go:230 Starting otelcontribcol... {"Version": "0.116.0-dev", "NumCPU": 2} 2024-12-27T15:37:23.436Z info extensions/extensions.go:39 Starting extensions... 2024-12-27T15:37:23.436Z info jaegerreceiver@v0.116.0/trace_receiver.go:252 Starting UDP server for Binary Thrift {"kind": "receiver", "name": "jaeger", "data_type": "traces", "endpoint": "0.0.0.0:6831"} 2024-12-27T15:37:23.436Z info jaegerreceiver@v0.116.0/trace_receiver.go:274 Starting UDP server for Compact Thrift {"kind": "receiver", "name": "jaeger", "data_type": "traces", "endpoint": "0.0.0.0:6832"} 2024-12-27T15:37:23.436Z info jaegerreceiver@v0.116.0/trace_receiver.go:398 Starting HTTP server for Jaeger Collector {"kind": "receiver", "name": "jaeger", "data_type": "traces", "endpoint": "0.0.0.0:14268"} 2024-12-27T15:37:23.436Z info jaegerreceiver@v0.116.0/trace_receiver.go:423 Starting gRPC server for Jaeger Collector {"kind": "receiver", "name": "jaeger", "data_type": "traces", "endpoint": "0.0.0.0:14250"} 2024-12-27T15:37:23.436Z info service@v0.116.1-0.20241220212031-7c2639723f67/service.go:253 Everything is ready. Begin running and processing data. ``` #### config file used ``` receivers: jaeger: protocols: grpc: endpoint: "0.0.0.0:14250" thrift_http: endpoint: "0.0.0.0:14268" thrift_binary: endpoint: "0.0.0.0:6831" thrift_compact: endpoint: "0.0.0.0:6832" exporters: debug: {} service: pipelines: traces: receivers: [jaeger] exporters: [debug] ``` cc @fatsheep9146 --------- Signed-off-by: Saumyacodes-40 Co-authored-by: Yuri Shkuro --- .chloggen/log-jaegerreceiver-endpoints.yaml | 28 +++++++++++++++++++++ receiver/jaegerreceiver/go.mod | 2 +- receiver/jaegerreceiver/trace_receiver.go | 9 +++++++ 3 files changed, 38 insertions(+), 1 deletion(-) create mode 100644 .chloggen/log-jaegerreceiver-endpoints.yaml diff --git a/.chloggen/log-jaegerreceiver-endpoints.yaml b/.chloggen/log-jaegerreceiver-endpoints.yaml new file mode 100644 index 000000000000..c2d735a0fc19 --- /dev/null +++ b/.chloggen/log-jaegerreceiver-endpoints.yaml @@ -0,0 +1,28 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: jaegerreceiver + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Log the endpoints of different servers started by jaegerreceiver + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [36961] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: | + This change logs the endpoints of different servers started by jaegerreceiver. It simplifies debugging by ensuring log messages match configuration settings. + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/receiver/jaegerreceiver/go.mod b/receiver/jaegerreceiver/go.mod index b1bd73ddf269..b1c9e2713ccd 100644 --- a/receiver/jaegerreceiver/go.mod +++ b/receiver/jaegerreceiver/go.mod @@ -27,6 +27,7 @@ require ( go.opentelemetry.io/collector/semconv v0.118.1-0.20250121185328-fbefb22cc2b3 go.uber.org/goleak v1.3.0 go.uber.org/multierr v1.11.0 + go.uber.org/zap v1.27.0 google.golang.org/grpc v1.69.4 ) @@ -75,7 +76,6 @@ require ( go.opentelemetry.io/otel/sdk v1.34.0 // indirect go.opentelemetry.io/otel/sdk/metric v1.34.0 // indirect go.opentelemetry.io/otel/trace v1.34.0 // indirect - go.uber.org/zap v1.27.0 // indirect golang.org/x/net v0.34.0 // indirect golang.org/x/sys v0.29.0 // indirect golang.org/x/text v0.21.0 // indirect diff --git a/receiver/jaegerreceiver/trace_receiver.go b/receiver/jaegerreceiver/trace_receiver.go index af204faa68f0..73c484e7d75d 100644 --- a/receiver/jaegerreceiver/trace_receiver.go +++ b/receiver/jaegerreceiver/trace_receiver.go @@ -30,6 +30,7 @@ import ( "go.opentelemetry.io/collector/receiver" "go.opentelemetry.io/collector/receiver/receiverhelper" "go.uber.org/multierr" + "go.uber.org/zap" "google.golang.org/grpc" jaegertranslator "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger" @@ -214,6 +215,8 @@ func (jr *jReceiver) startAgent() error { return err } jr.agentProcessors = append(jr.agentProcessors, processor) + + jr.settings.Logger.Info("Starting UDP server for Binary Thrift", zap.String("endpoint", jr.config.ThriftBinaryUDP.Endpoint)) } if jr.config.ThriftCompactUDP != nil { @@ -234,6 +237,8 @@ func (jr *jReceiver) startAgent() error { return err } jr.agentProcessors = append(jr.agentProcessors, processor) + + jr.settings.Logger.Info("Starting UDP server for Compact Thrift", zap.String("endpoint", jr.config.ThriftCompactUDP.Endpoint)) } jr.goroutines.Add(len(jr.agentProcessors)) @@ -339,6 +344,8 @@ func (jr *jReceiver) startCollector(ctx context.Context, host component.Host) er return err } + jr.settings.Logger.Info("Starting HTTP server for Jaeger Thrift", zap.String("endpoint", jr.config.ThriftHTTP.Endpoint)) + jr.goroutines.Add(1) go func() { defer jr.goroutines.Done() @@ -362,6 +369,8 @@ func (jr *jReceiver) startCollector(ctx context.Context, host component.Host) er api_v2.RegisterCollectorServiceServer(jr.grpc, jr) + jr.settings.Logger.Info("Starting gRPC server for Jaeger Protobuf", zap.String("endpoint", jr.config.GRPC.NetAddr.Endpoint)) + jr.goroutines.Add(1) go func() { defer jr.goroutines.Done() From 4ebb7af870f1fef25d0574b2996f4dfcbd876242 Mon Sep 17 00:00:00 2001 From: Edmo Vamerlatti Costa <11836452+edmocosta@users.noreply.github.com> Date: Wed, 22 Jan 2025 17:46:13 +0100 Subject: [PATCH 02/12] [processor/transform] Replace ParserCollection and add initial support for context inference (#37272) --- ...d-add-intial-contextinference-support.yaml | 27 + processor/transformprocessor/config_test.go | 56 +- .../internal/common/config.go | 16 + .../internal/common/logs.go | 100 +-- .../internal/common/metrics.go | 164 ++-- .../internal/common/processor.go | 147 ++-- .../internal/common/traces.go | 136 ++-- .../internal/logs/processor.go | 7 +- .../internal/logs/processor_test.go | 428 ++++++++++- .../internal/metrics/processor.go | 7 +- .../internal/metrics/processor_test.go | 712 +++++++++++++++++- .../internal/traces/processor.go | 7 +- .../internal/traces/processor_test.go | 414 +++++++++- .../transformprocessor/testdata/config.yaml | 28 + 14 files changed, 1938 insertions(+), 311 deletions(-) create mode 100644 .chloggen/replace-pc-and-add-intial-contextinference-support.yaml diff --git a/.chloggen/replace-pc-and-add-intial-contextinference-support.yaml b/.chloggen/replace-pc-and-add-intial-contextinference-support.yaml new file mode 100644 index 000000000000..9299d503395c --- /dev/null +++ b/.chloggen/replace-pc-and-add-intial-contextinference-support.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: processor/transformprocessor + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Replace parser collection implementations with `ottl.ParserCollection` and add initial support for expressing statement's context via path names. + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [29017] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/processor/transformprocessor/config_test.go b/processor/transformprocessor/config_test.go index fe30c4c58ac1..e736707cd706 100644 --- a/processor/transformprocessor/config_test.go +++ b/processor/transformprocessor/config_test.go @@ -147,9 +147,63 @@ func TestLoadConfig(t *testing.T) { id: component.NewIDWithName(metadata.Type, "bad_syntax_multi_signal"), errorLen: 3, }, + { + id: component.NewIDWithName(metadata.Type, "structured_configuration_with_path_context"), + expected: &Config{ + ErrorMode: ottl.PropagateError, + TraceStatements: []common.ContextStatements{ + { + Context: "span", + Statements: []string{`set(span.name, "bear") where span.attributes["http.path"] == "/animal"`}, + }, + }, + MetricStatements: []common.ContextStatements{ + { + Context: "metric", + Statements: []string{`set(metric.name, "bear") where resource.attributes["http.path"] == "/animal"`}, + }, + }, + LogStatements: []common.ContextStatements{ + { + Context: "log", + Statements: []string{`set(log.body, "bear") where log.attributes["http.path"] == "/animal"`}, + }, + }, + }, + }, + { + id: component.NewIDWithName(metadata.Type, "structured_configuration_with_inferred_context"), + expected: &Config{ + ErrorMode: ottl.PropagateError, + TraceStatements: []common.ContextStatements{ + { + Statements: []string{ + `set(span.name, "bear") where span.attributes["http.path"] == "/animal"`, + `set(resource.attributes["name"], "bear")`, + }, + }, + }, + MetricStatements: []common.ContextStatements{ + { + Statements: []string{ + `set(metric.name, "bear") where resource.attributes["http.path"] == "/animal"`, + `set(resource.attributes["name"], "bear")`, + }, + }, + }, + LogStatements: []common.ContextStatements{ + { + Statements: []string{ + `set(log.body, "bear") where log.attributes["http.path"] == "/animal"`, + `set(resource.attributes["name"], "bear")`, + }, + }, + }, + }, + }, } for _, tt := range tests { - t.Run(tt.id.String(), func(t *testing.T) { + t.Run(tt.id.Name(), func(t *testing.T) { cm, err := confmaptest.LoadConf(filepath.Join("testdata", "config.yaml")) assert.NoError(t, err) diff --git a/processor/transformprocessor/internal/common/config.go b/processor/transformprocessor/internal/common/config.go index c0f293457329..79087389d644 100644 --- a/processor/transformprocessor/internal/common/config.go +++ b/processor/transformprocessor/internal/common/config.go @@ -6,8 +6,12 @@ package common // import "github.com/open-telemetry/opentelemetry-collector-cont import ( "fmt" "strings" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" ) +var _ ottl.StatementsGetter = (*ContextStatements)(nil) + type ContextID string const ( @@ -36,3 +40,15 @@ type ContextStatements struct { Conditions []string `mapstructure:"conditions"` Statements []string `mapstructure:"statements"` } + +func (c ContextStatements) GetStatements() []string { + return c.Statements +} + +func toContextStatements(statements any) (*ContextStatements, error) { + contextStatements, ok := statements.(ContextStatements) + if !ok { + return nil, fmt.Errorf("invalid context statements type, expected: common.ContextStatements, got: %T", statements) + } + return &contextStatements, nil +} diff --git a/processor/transformprocessor/internal/common/logs.go b/processor/transformprocessor/internal/common/logs.go index 4d9726c38260..711cbc418396 100644 --- a/processor/transformprocessor/internal/common/logs.go +++ b/processor/transformprocessor/internal/common/logs.go @@ -7,38 +7,37 @@ import ( "context" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/plog" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/expr" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottllog" - "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlresource" - "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlscope" ) -var _ consumer.Logs = &logStatements{} +type LogsConsumer interface { + Context() ContextID + ConsumeLogs(ctx context.Context, ld plog.Logs, cache *pcommon.Map) error +} type logStatements struct { ottl.StatementSequence[ottllog.TransformContext] expr.BoolExpr[ottllog.TransformContext] } -func (l logStatements) Capabilities() consumer.Capabilities { - return consumer.Capabilities{ - MutatesData: true, - } +func (l logStatements) Context() ContextID { + return Log } -func (l logStatements) ConsumeLogs(ctx context.Context, ld plog.Logs) error { +func (l logStatements) ConsumeLogs(ctx context.Context, ld plog.Logs, cache *pcommon.Map) error { for i := 0; i < ld.ResourceLogs().Len(); i++ { rlogs := ld.ResourceLogs().At(i) for j := 0; j < rlogs.ScopeLogs().Len(); j++ { slogs := rlogs.ScopeLogs().At(j) logs := slogs.LogRecords() for k := 0; k < logs.Len(); k++ { - tCtx := ottllog.NewTransformContext(logs.At(k), slogs.Scope(), rlogs.Resource(), slogs, rlogs) + tCtx := ottllog.NewTransformContext(logs.At(k), slogs.Scope(), rlogs.Resource(), slogs, rlogs, ottllog.WithCache(cache)) condition, err := l.BoolExpr.Eval(ctx, tCtx) if err != nil { return err @@ -55,76 +54,59 @@ func (l logStatements) ConsumeLogs(ctx context.Context, ld plog.Logs) error { return nil } -type LogParserCollection struct { - parserCollection - logParser ottl.Parser[ottllog.TransformContext] -} +type LogParserCollection ottl.ParserCollection[LogsConsumer] -type LogParserCollectionOption func(*LogParserCollection) error +type LogParserCollectionOption ottl.ParserCollectionOption[LogsConsumer] func WithLogParser(functions map[string]ottl.Factory[ottllog.TransformContext]) LogParserCollectionOption { - return func(lp *LogParserCollection) error { - logParser, err := ottllog.NewParser(functions, lp.settings) + return func(pc *ottl.ParserCollection[LogsConsumer]) error { + logParser, err := ottllog.NewParser(functions, pc.Settings, ottllog.EnablePathContextNames()) if err != nil { return err } - lp.logParser = logParser - return nil + return ottl.WithParserCollectionContext(ottllog.ContextName, &logParser, convertLogStatements)(pc) } } func WithLogErrorMode(errorMode ottl.ErrorMode) LogParserCollectionOption { - return func(lp *LogParserCollection) error { - lp.errorMode = errorMode - return nil - } + return LogParserCollectionOption(ottl.WithParserCollectionErrorMode[LogsConsumer](errorMode)) } func NewLogParserCollection(settings component.TelemetrySettings, options ...LogParserCollectionOption) (*LogParserCollection, error) { - rp, err := ottlresource.NewParser(ResourceFunctions(), settings) + pcOptions := []ottl.ParserCollectionOption[LogsConsumer]{ + withCommonContextParsers[LogsConsumer](), + } + + for _, option := range options { + pcOptions = append(pcOptions, ottl.ParserCollectionOption[LogsConsumer](option)) + } + + pc, err := ottl.NewParserCollection(settings, pcOptions...) if err != nil { return nil, err } - sp, err := ottlscope.NewParser(ScopeFunctions(), settings) + + lpc := LogParserCollection(*pc) + return &lpc, nil +} + +func convertLogStatements(pc *ottl.ParserCollection[LogsConsumer], _ *ottl.Parser[ottllog.TransformContext], _ string, statements ottl.StatementsGetter, parsedStatements []*ottl.Statement[ottllog.TransformContext]) (LogsConsumer, error) { + contextStatements, err := toContextStatements(statements) if err != nil { return nil, err } - lpc := &LogParserCollection{ - parserCollection: parserCollection{ - settings: settings, - resourceParser: rp, - scopeParser: sp, - }, + globalExpr, errGlobalBoolExpr := parseGlobalExpr(filterottl.NewBoolExprForLog, contextStatements.Conditions, pc.ErrorMode, pc.Settings, filterottl.StandardLogFuncs()) + if errGlobalBoolExpr != nil { + return nil, errGlobalBoolExpr } - - for _, op := range options { - err := op(lpc) - if err != nil { - return nil, err - } - } - - return lpc, nil + lStatements := ottllog.NewStatementSequence(parsedStatements, pc.Settings, ottllog.WithStatementSequenceErrorMode(pc.ErrorMode)) + return logStatements{lStatements, globalExpr}, nil } -func (pc LogParserCollection) ParseContextStatements(contextStatements ContextStatements) (consumer.Logs, error) { - switch contextStatements.Context { - case Log: - parsedStatements, err := pc.logParser.ParseStatements(contextStatements.Statements) - if err != nil { - return nil, err - } - globalExpr, errGlobalBoolExpr := parseGlobalExpr(filterottl.NewBoolExprForLog, contextStatements.Conditions, pc.parserCollection, filterottl.StandardLogFuncs()) - if errGlobalBoolExpr != nil { - return nil, errGlobalBoolExpr - } - lStatements := ottllog.NewStatementSequence(parsedStatements, pc.settings, ottllog.WithStatementSequenceErrorMode(pc.errorMode)) - return logStatements{lStatements, globalExpr}, nil - default: - statements, err := pc.parseCommonContextStatements(contextStatements) - if err != nil { - return nil, err - } - return statements, nil +func (lpc *LogParserCollection) ParseContextStatements(contextStatements ContextStatements) (LogsConsumer, error) { + pc := ottl.ParserCollection[LogsConsumer](*lpc) + if contextStatements.Context != "" { + return pc.ParseStatementsWithContext(string(contextStatements.Context), contextStatements, true) } + return pc.ParseStatements(contextStatements) } diff --git a/processor/transformprocessor/internal/common/metrics.go b/processor/transformprocessor/internal/common/metrics.go index f4cec79cd15e..82f5434d18e4 100644 --- a/processor/transformprocessor/internal/common/metrics.go +++ b/processor/transformprocessor/internal/common/metrics.go @@ -7,7 +7,6 @@ import ( "context" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" @@ -16,31 +15,30 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottldatapoint" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric" - "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlresource" - "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlscope" ) -var _ consumer.Metrics = &metricStatements{} +type MetricsConsumer interface { + Context() ContextID + ConsumeMetrics(ctx context.Context, md pmetric.Metrics, cache *pcommon.Map) error +} type metricStatements struct { ottl.StatementSequence[ottlmetric.TransformContext] expr.BoolExpr[ottlmetric.TransformContext] } -func (m metricStatements) Capabilities() consumer.Capabilities { - return consumer.Capabilities{ - MutatesData: true, - } +func (m metricStatements) Context() ContextID { + return Metric } -func (m metricStatements) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error { +func (m metricStatements) ConsumeMetrics(ctx context.Context, md pmetric.Metrics, cache *pcommon.Map) error { for i := 0; i < md.ResourceMetrics().Len(); i++ { rmetrics := md.ResourceMetrics().At(i) for j := 0; j < rmetrics.ScopeMetrics().Len(); j++ { smetrics := rmetrics.ScopeMetrics().At(j) metrics := smetrics.Metrics() for k := 0; k < metrics.Len(); k++ { - tCtx := ottlmetric.NewTransformContext(metrics.At(k), smetrics.Metrics(), smetrics.Scope(), rmetrics.Resource(), smetrics, rmetrics) + tCtx := ottlmetric.NewTransformContext(metrics.At(k), smetrics.Metrics(), smetrics.Scope(), rmetrics.Resource(), smetrics, rmetrics, ottlmetric.WithCache(cache)) condition, err := m.BoolExpr.Eval(ctx, tCtx) if err != nil { return err @@ -57,20 +55,16 @@ func (m metricStatements) ConsumeMetrics(ctx context.Context, md pmetric.Metrics return nil } -var _ consumer.Metrics = &dataPointStatements{} - type dataPointStatements struct { ottl.StatementSequence[ottldatapoint.TransformContext] expr.BoolExpr[ottldatapoint.TransformContext] } -func (d dataPointStatements) Capabilities() consumer.Capabilities { - return consumer.Capabilities{ - MutatesData: true, - } +func (d dataPointStatements) Context() ContextID { + return DataPoint } -func (d dataPointStatements) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error { +func (d dataPointStatements) ConsumeMetrics(ctx context.Context, md pmetric.Metrics, cache *pcommon.Map) error { for i := 0; i < md.ResourceMetrics().Len(); i++ { rmetrics := md.ResourceMetrics().At(i) for j := 0; j < rmetrics.ScopeMetrics().Len(); j++ { @@ -78,19 +72,20 @@ func (d dataPointStatements) ConsumeMetrics(ctx context.Context, md pmetric.Metr metrics := smetrics.Metrics() for k := 0; k < metrics.Len(); k++ { metric := metrics.At(k) + transformContextOptions := []ottldatapoint.TransformContextOption{ottldatapoint.WithCache(cache)} var err error //exhaustive:enforce switch metric.Type() { case pmetric.MetricTypeSum: - err = d.handleNumberDataPoints(ctx, metric.Sum().DataPoints(), metrics.At(k), metrics, smetrics.Scope(), rmetrics.Resource(), smetrics, rmetrics) + err = d.handleNumberDataPoints(ctx, metric.Sum().DataPoints(), metrics.At(k), metrics, smetrics.Scope(), rmetrics.Resource(), smetrics, rmetrics, transformContextOptions) case pmetric.MetricTypeGauge: - err = d.handleNumberDataPoints(ctx, metric.Gauge().DataPoints(), metrics.At(k), metrics, smetrics.Scope(), rmetrics.Resource(), smetrics, rmetrics) + err = d.handleNumberDataPoints(ctx, metric.Gauge().DataPoints(), metrics.At(k), metrics, smetrics.Scope(), rmetrics.Resource(), smetrics, rmetrics, transformContextOptions) case pmetric.MetricTypeHistogram: - err = d.handleHistogramDataPoints(ctx, metric.Histogram().DataPoints(), metrics.At(k), metrics, smetrics.Scope(), rmetrics.Resource(), smetrics, rmetrics) + err = d.handleHistogramDataPoints(ctx, metric.Histogram().DataPoints(), metrics.At(k), metrics, smetrics.Scope(), rmetrics.Resource(), smetrics, rmetrics, transformContextOptions) case pmetric.MetricTypeExponentialHistogram: - err = d.handleExponentialHistogramDataPoints(ctx, metric.ExponentialHistogram().DataPoints(), metrics.At(k), metrics, smetrics.Scope(), rmetrics.Resource(), smetrics, rmetrics) + err = d.handleExponentialHistogramDataPoints(ctx, metric.ExponentialHistogram().DataPoints(), metrics.At(k), metrics, smetrics.Scope(), rmetrics.Resource(), smetrics, rmetrics, transformContextOptions) case pmetric.MetricTypeSummary: - err = d.handleSummaryDataPoints(ctx, metric.Summary().DataPoints(), metrics.At(k), metrics, smetrics.Scope(), rmetrics.Resource(), smetrics, rmetrics) + err = d.handleSummaryDataPoints(ctx, metric.Summary().DataPoints(), metrics.At(k), metrics, smetrics.Scope(), rmetrics.Resource(), smetrics, rmetrics, transformContextOptions) } if err != nil { return err @@ -101,9 +96,9 @@ func (d dataPointStatements) ConsumeMetrics(ctx context.Context, md pmetric.Metr return nil } -func (d dataPointStatements) handleNumberDataPoints(ctx context.Context, dps pmetric.NumberDataPointSlice, metric pmetric.Metric, metrics pmetric.MetricSlice, is pcommon.InstrumentationScope, resource pcommon.Resource, scopeMetrics pmetric.ScopeMetrics, resourceMetrics pmetric.ResourceMetrics) error { +func (d dataPointStatements) handleNumberDataPoints(ctx context.Context, dps pmetric.NumberDataPointSlice, metric pmetric.Metric, metrics pmetric.MetricSlice, is pcommon.InstrumentationScope, resource pcommon.Resource, scopeMetrics pmetric.ScopeMetrics, resourceMetrics pmetric.ResourceMetrics, options []ottldatapoint.TransformContextOption) error { for i := 0; i < dps.Len(); i++ { - tCtx := ottldatapoint.NewTransformContext(dps.At(i), metric, metrics, is, resource, scopeMetrics, resourceMetrics) + tCtx := ottldatapoint.NewTransformContext(dps.At(i), metric, metrics, is, resource, scopeMetrics, resourceMetrics, options...) condition, err := d.BoolExpr.Eval(ctx, tCtx) if err != nil { return err @@ -118,9 +113,9 @@ func (d dataPointStatements) handleNumberDataPoints(ctx context.Context, dps pme return nil } -func (d dataPointStatements) handleHistogramDataPoints(ctx context.Context, dps pmetric.HistogramDataPointSlice, metric pmetric.Metric, metrics pmetric.MetricSlice, is pcommon.InstrumentationScope, resource pcommon.Resource, scopeMetrics pmetric.ScopeMetrics, resourceMetrics pmetric.ResourceMetrics) error { +func (d dataPointStatements) handleHistogramDataPoints(ctx context.Context, dps pmetric.HistogramDataPointSlice, metric pmetric.Metric, metrics pmetric.MetricSlice, is pcommon.InstrumentationScope, resource pcommon.Resource, scopeMetrics pmetric.ScopeMetrics, resourceMetrics pmetric.ResourceMetrics, options []ottldatapoint.TransformContextOption) error { for i := 0; i < dps.Len(); i++ { - tCtx := ottldatapoint.NewTransformContext(dps.At(i), metric, metrics, is, resource, scopeMetrics, resourceMetrics) + tCtx := ottldatapoint.NewTransformContext(dps.At(i), metric, metrics, is, resource, scopeMetrics, resourceMetrics, options...) condition, err := d.BoolExpr.Eval(ctx, tCtx) if err != nil { return err @@ -135,9 +130,9 @@ func (d dataPointStatements) handleHistogramDataPoints(ctx context.Context, dps return nil } -func (d dataPointStatements) handleExponentialHistogramDataPoints(ctx context.Context, dps pmetric.ExponentialHistogramDataPointSlice, metric pmetric.Metric, metrics pmetric.MetricSlice, is pcommon.InstrumentationScope, resource pcommon.Resource, scopeMetrics pmetric.ScopeMetrics, resourceMetrics pmetric.ResourceMetrics) error { +func (d dataPointStatements) handleExponentialHistogramDataPoints(ctx context.Context, dps pmetric.ExponentialHistogramDataPointSlice, metric pmetric.Metric, metrics pmetric.MetricSlice, is pcommon.InstrumentationScope, resource pcommon.Resource, scopeMetrics pmetric.ScopeMetrics, resourceMetrics pmetric.ResourceMetrics, options []ottldatapoint.TransformContextOption) error { for i := 0; i < dps.Len(); i++ { - tCtx := ottldatapoint.NewTransformContext(dps.At(i), metric, metrics, is, resource, scopeMetrics, resourceMetrics) + tCtx := ottldatapoint.NewTransformContext(dps.At(i), metric, metrics, is, resource, scopeMetrics, resourceMetrics, options...) condition, err := d.BoolExpr.Eval(ctx, tCtx) if err != nil { return err @@ -152,9 +147,9 @@ func (d dataPointStatements) handleExponentialHistogramDataPoints(ctx context.Co return nil } -func (d dataPointStatements) handleSummaryDataPoints(ctx context.Context, dps pmetric.SummaryDataPointSlice, metric pmetric.Metric, metrics pmetric.MetricSlice, is pcommon.InstrumentationScope, resource pcommon.Resource, scopeMetrics pmetric.ScopeMetrics, resourceMetrics pmetric.ResourceMetrics) error { +func (d dataPointStatements) handleSummaryDataPoints(ctx context.Context, dps pmetric.SummaryDataPointSlice, metric pmetric.Metric, metrics pmetric.MetricSlice, is pcommon.InstrumentationScope, resource pcommon.Resource, scopeMetrics pmetric.ScopeMetrics, resourceMetrics pmetric.ResourceMetrics, options []ottldatapoint.TransformContextOption) error { for i := 0; i < dps.Len(); i++ { - tCtx := ottldatapoint.NewTransformContext(dps.At(i), metric, metrics, is, resource, scopeMetrics, resourceMetrics) + tCtx := ottldatapoint.NewTransformContext(dps.At(i), metric, metrics, is, resource, scopeMetrics, resourceMetrics, options...) condition, err := d.BoolExpr.Eval(ctx, tCtx) if err != nil { return err @@ -169,99 +164,82 @@ func (d dataPointStatements) handleSummaryDataPoints(ctx context.Context, dps pm return nil } -type MetricParserCollection struct { - parserCollection - metricParser ottl.Parser[ottlmetric.TransformContext] - dataPointParser ottl.Parser[ottldatapoint.TransformContext] -} +type MetricParserCollection ottl.ParserCollection[MetricsConsumer] -type MetricParserCollectionOption func(*MetricParserCollection) error +type MetricParserCollectionOption ottl.ParserCollectionOption[MetricsConsumer] func WithMetricParser(functions map[string]ottl.Factory[ottlmetric.TransformContext]) MetricParserCollectionOption { - return func(mp *MetricParserCollection) error { - metricParser, err := ottlmetric.NewParser(functions, mp.settings) + return func(pc *ottl.ParserCollection[MetricsConsumer]) error { + metricParser, err := ottlmetric.NewParser(functions, pc.Settings, ottlmetric.EnablePathContextNames()) if err != nil { return err } - mp.metricParser = metricParser - return nil + return ottl.WithParserCollectionContext(ottlmetric.ContextName, &metricParser, convertMetricStatements)(pc) } } func WithDataPointParser(functions map[string]ottl.Factory[ottldatapoint.TransformContext]) MetricParserCollectionOption { - return func(mp *MetricParserCollection) error { - dataPointParser, err := ottldatapoint.NewParser(functions, mp.settings) + return func(pc *ottl.ParserCollection[MetricsConsumer]) error { + dataPointParser, err := ottldatapoint.NewParser(functions, pc.Settings, ottldatapoint.EnablePathContextNames()) if err != nil { return err } - mp.dataPointParser = dataPointParser - return nil + return ottl.WithParserCollectionContext(ottldatapoint.ContextName, &dataPointParser, convertDataPointStatements)(pc) } } func WithMetricErrorMode(errorMode ottl.ErrorMode) MetricParserCollectionOption { - return func(mp *MetricParserCollection) error { - mp.errorMode = errorMode - return nil - } + return MetricParserCollectionOption(ottl.WithParserCollectionErrorMode[MetricsConsumer](errorMode)) } func NewMetricParserCollection(settings component.TelemetrySettings, options ...MetricParserCollectionOption) (*MetricParserCollection, error) { - rp, err := ottlresource.NewParser(ResourceFunctions(), settings) + pcOptions := []ottl.ParserCollectionOption[MetricsConsumer]{ + withCommonContextParsers[MetricsConsumer](), + } + + for _, option := range options { + pcOptions = append(pcOptions, ottl.ParserCollectionOption[MetricsConsumer](option)) + } + + pc, err := ottl.NewParserCollection(settings, pcOptions...) if err != nil { return nil, err } - sp, err := ottlscope.NewParser(ScopeFunctions(), settings) + + mpc := MetricParserCollection(*pc) + return &mpc, nil +} + +func convertMetricStatements(pc *ottl.ParserCollection[MetricsConsumer], _ *ottl.Parser[ottlmetric.TransformContext], _ string, statements ottl.StatementsGetter, parsedStatements []*ottl.Statement[ottlmetric.TransformContext]) (MetricsConsumer, error) { + contextStatements, err := toContextStatements(statements) if err != nil { return nil, err } - mpc := &MetricParserCollection{ - parserCollection: parserCollection{ - settings: settings, - resourceParser: rp, - scopeParser: sp, - }, + globalExpr, errGlobalBoolExpr := parseGlobalExpr(filterottl.NewBoolExprForMetric, contextStatements.Conditions, pc.ErrorMode, pc.Settings, filterottl.StandardMetricFuncs()) + if errGlobalBoolExpr != nil { + return nil, errGlobalBoolExpr } + mStatements := ottlmetric.NewStatementSequence(parsedStatements, pc.Settings, ottlmetric.WithStatementSequenceErrorMode(pc.ErrorMode)) + return metricStatements{mStatements, globalExpr}, nil +} - for _, op := range options { - err := op(mpc) - if err != nil { - return nil, err - } +func convertDataPointStatements(pc *ottl.ParserCollection[MetricsConsumer], _ *ottl.Parser[ottldatapoint.TransformContext], _ string, statements ottl.StatementsGetter, parsedStatements []*ottl.Statement[ottldatapoint.TransformContext]) (MetricsConsumer, error) { + contextStatements, err := toContextStatements(statements) + if err != nil { + return nil, err } - - return mpc, nil + globalExpr, errGlobalBoolExpr := parseGlobalExpr(filterottl.NewBoolExprForDataPoint, contextStatements.Conditions, pc.ErrorMode, pc.Settings, filterottl.StandardDataPointFuncs()) + if errGlobalBoolExpr != nil { + return nil, errGlobalBoolExpr + } + dpStatements := ottldatapoint.NewStatementSequence(parsedStatements, pc.Settings, ottldatapoint.WithStatementSequenceErrorMode(pc.ErrorMode)) + return dataPointStatements{dpStatements, globalExpr}, nil } -func (pc MetricParserCollection) ParseContextStatements(contextStatements ContextStatements) (consumer.Metrics, error) { - switch contextStatements.Context { - case Metric: - parseStatements, err := pc.metricParser.ParseStatements(contextStatements.Statements) - if err != nil { - return nil, err - } - globalExpr, errGlobalBoolExpr := parseGlobalExpr(filterottl.NewBoolExprForMetric, contextStatements.Conditions, pc.parserCollection, filterottl.StandardMetricFuncs()) - if errGlobalBoolExpr != nil { - return nil, errGlobalBoolExpr - } - mStatements := ottlmetric.NewStatementSequence(parseStatements, pc.settings, ottlmetric.WithStatementSequenceErrorMode(pc.errorMode)) - return metricStatements{mStatements, globalExpr}, nil - case DataPoint: - parsedStatements, err := pc.dataPointParser.ParseStatements(contextStatements.Statements) - if err != nil { - return nil, err - } - globalExpr, errGlobalBoolExpr := parseGlobalExpr(filterottl.NewBoolExprForDataPoint, contextStatements.Conditions, pc.parserCollection, filterottl.StandardDataPointFuncs()) - if errGlobalBoolExpr != nil { - return nil, errGlobalBoolExpr - } - dpStatements := ottldatapoint.NewStatementSequence(parsedStatements, pc.settings, ottldatapoint.WithStatementSequenceErrorMode(pc.errorMode)) - return dataPointStatements{dpStatements, globalExpr}, nil - default: - statements, err := pc.parseCommonContextStatements(contextStatements) - if err != nil { - return nil, err - } - return statements, nil +func (mpc *MetricParserCollection) ParseContextStatements(contextStatements ContextStatements) (MetricsConsumer, error) { + pc := ottl.ParserCollection[MetricsConsumer](*mpc) + if contextStatements.Context != "" { + return pc.ParseStatementsWithContext(string(contextStatements.Context), contextStatements, true) } + return pc.ParseStatements(contextStatements) } diff --git a/processor/transformprocessor/internal/common/processor.go b/processor/transformprocessor/internal/common/processor.go index 40b984ed4ee5..4bc86eb518d8 100644 --- a/processor/transformprocessor/internal/common/processor.go +++ b/processor/transformprocessor/internal/common/processor.go @@ -5,10 +5,9 @@ package common // import "github.com/open-telemetry/opentelemetry-collector-cont import ( "context" - "fmt" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/ptrace" @@ -20,28 +19,21 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlscope" ) -var ( - _ consumer.Traces = &resourceStatements{} - _ consumer.Metrics = &resourceStatements{} - _ consumer.Logs = &resourceStatements{} - _ baseContext = &resourceStatements{} -) +var _ baseContext = &resourceStatements{} type resourceStatements struct { ottl.StatementSequence[ottlresource.TransformContext] expr.BoolExpr[ottlresource.TransformContext] } -func (r resourceStatements) Capabilities() consumer.Capabilities { - return consumer.Capabilities{ - MutatesData: true, - } +func (r resourceStatements) Context() ContextID { + return Resource } -func (r resourceStatements) ConsumeTraces(ctx context.Context, td ptrace.Traces) error { +func (r resourceStatements) ConsumeTraces(ctx context.Context, td ptrace.Traces, cache *pcommon.Map) error { for i := 0; i < td.ResourceSpans().Len(); i++ { rspans := td.ResourceSpans().At(i) - tCtx := ottlresource.NewTransformContext(rspans.Resource(), rspans) + tCtx := ottlresource.NewTransformContext(rspans.Resource(), rspans, ottlresource.WithCache(cache)) condition, err := r.BoolExpr.Eval(ctx, tCtx) if err != nil { return err @@ -56,10 +48,10 @@ func (r resourceStatements) ConsumeTraces(ctx context.Context, td ptrace.Traces) return nil } -func (r resourceStatements) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error { +func (r resourceStatements) ConsumeMetrics(ctx context.Context, md pmetric.Metrics, cache *pcommon.Map) error { for i := 0; i < md.ResourceMetrics().Len(); i++ { rmetrics := md.ResourceMetrics().At(i) - tCtx := ottlresource.NewTransformContext(rmetrics.Resource(), rmetrics) + tCtx := ottlresource.NewTransformContext(rmetrics.Resource(), rmetrics, ottlresource.WithCache(cache)) condition, err := r.BoolExpr.Eval(ctx, tCtx) if err != nil { return err @@ -74,10 +66,10 @@ func (r resourceStatements) ConsumeMetrics(ctx context.Context, md pmetric.Metri return nil } -func (r resourceStatements) ConsumeLogs(ctx context.Context, ld plog.Logs) error { +func (r resourceStatements) ConsumeLogs(ctx context.Context, ld plog.Logs, cache *pcommon.Map) error { for i := 0; i < ld.ResourceLogs().Len(); i++ { rlogs := ld.ResourceLogs().At(i) - tCtx := ottlresource.NewTransformContext(rlogs.Resource(), rlogs) + tCtx := ottlresource.NewTransformContext(rlogs.Resource(), rlogs, ottlresource.WithCache(cache)) condition, err := r.BoolExpr.Eval(ctx, tCtx) if err != nil { return err @@ -92,30 +84,23 @@ func (r resourceStatements) ConsumeLogs(ctx context.Context, ld plog.Logs) error return nil } -var ( - _ consumer.Traces = &scopeStatements{} - _ consumer.Metrics = &scopeStatements{} - _ consumer.Logs = &scopeStatements{} - _ baseContext = &scopeStatements{} -) +var _ baseContext = &scopeStatements{} type scopeStatements struct { ottl.StatementSequence[ottlscope.TransformContext] expr.BoolExpr[ottlscope.TransformContext] } -func (s scopeStatements) Capabilities() consumer.Capabilities { - return consumer.Capabilities{ - MutatesData: true, - } +func (s scopeStatements) Context() ContextID { + return Scope } -func (s scopeStatements) ConsumeTraces(ctx context.Context, td ptrace.Traces) error { +func (s scopeStatements) ConsumeTraces(ctx context.Context, td ptrace.Traces, cache *pcommon.Map) error { for i := 0; i < td.ResourceSpans().Len(); i++ { rspans := td.ResourceSpans().At(i) for j := 0; j < rspans.ScopeSpans().Len(); j++ { sspans := rspans.ScopeSpans().At(j) - tCtx := ottlscope.NewTransformContext(sspans.Scope(), rspans.Resource(), sspans) + tCtx := ottlscope.NewTransformContext(sspans.Scope(), rspans.Resource(), sspans, ottlscope.WithCache(cache)) condition, err := s.BoolExpr.Eval(ctx, tCtx) if err != nil { return err @@ -131,12 +116,12 @@ func (s scopeStatements) ConsumeTraces(ctx context.Context, td ptrace.Traces) er return nil } -func (s scopeStatements) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error { +func (s scopeStatements) ConsumeMetrics(ctx context.Context, md pmetric.Metrics, cache *pcommon.Map) error { for i := 0; i < md.ResourceMetrics().Len(); i++ { rmetrics := md.ResourceMetrics().At(i) for j := 0; j < rmetrics.ScopeMetrics().Len(); j++ { smetrics := rmetrics.ScopeMetrics().At(j) - tCtx := ottlscope.NewTransformContext(smetrics.Scope(), rmetrics.Resource(), smetrics) + tCtx := ottlscope.NewTransformContext(smetrics.Scope(), rmetrics.Resource(), smetrics, ottlscope.WithCache(cache)) condition, err := s.BoolExpr.Eval(ctx, tCtx) if err != nil { return err @@ -152,12 +137,12 @@ func (s scopeStatements) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) return nil } -func (s scopeStatements) ConsumeLogs(ctx context.Context, ld plog.Logs) error { +func (s scopeStatements) ConsumeLogs(ctx context.Context, ld plog.Logs, cache *pcommon.Map) error { for i := 0; i < ld.ResourceLogs().Len(); i++ { rlogs := ld.ResourceLogs().At(i) for j := 0; j < rlogs.ScopeLogs().Len(); j++ { slogs := rlogs.ScopeLogs().At(j) - tCtx := ottlscope.NewTransformContext(slogs.Scope(), rlogs.Resource(), slogs) + tCtx := ottlscope.NewTransformContext(slogs.Scope(), rlogs.Resource(), slogs, ottlscope.WithCache(cache)) condition, err := s.BoolExpr.Eval(ctx, tCtx) if err != nil { return err @@ -173,56 +158,86 @@ func (s scopeStatements) ConsumeLogs(ctx context.Context, ld plog.Logs) error { return nil } -type parserCollection struct { - settings component.TelemetrySettings - resourceParser ottl.Parser[ottlresource.TransformContext] - scopeParser ottl.Parser[ottlscope.TransformContext] - errorMode ottl.ErrorMode -} - type baseContext interface { - consumer.Traces - consumer.Metrics - consumer.Logs + TracesConsumer + MetricsConsumer + LogsConsumer } -func (pc parserCollection) parseCommonContextStatements(contextStatement ContextStatements) (baseContext, error) { - switch contextStatement.Context { - case Resource: - parsedStatements, err := pc.resourceParser.ParseStatements(contextStatement.Statements) +func withCommonContextParsers[R any]() ottl.ParserCollectionOption[R] { + return func(pc *ottl.ParserCollection[R]) error { + rp, err := ottlresource.NewParser(ResourceFunctions(), pc.Settings, ottlresource.EnablePathContextNames()) if err != nil { - return nil, err + return err } - globalExpr, errGlobalBoolExpr := parseGlobalExpr(filterottl.NewBoolExprForResource, contextStatement.Conditions, pc, filterottl.StandardResourceFuncs()) - if errGlobalBoolExpr != nil { - return nil, errGlobalBoolExpr + sp, err := ottlscope.NewParser(ScopeFunctions(), pc.Settings, ottlscope.EnablePathContextNames()) + if err != nil { + return err } - rStatements := ottlresource.NewStatementSequence(parsedStatements, pc.settings, ottlresource.WithStatementSequenceErrorMode(pc.errorMode)) - return resourceStatements{rStatements, globalExpr}, nil - case Scope: - parsedStatements, err := pc.scopeParser.ParseStatements(contextStatement.Statements) + + err = ottl.WithParserCollectionContext[ottlresource.TransformContext, R](ottlresource.ContextName, &rp, parseResourceContextStatements)(pc) if err != nil { - return nil, err + return err } - globalExpr, errGlobalBoolExpr := parseGlobalExpr(filterottl.NewBoolExprForScope, contextStatement.Conditions, pc, filterottl.StandardScopeFuncs()) - if errGlobalBoolExpr != nil { - return nil, errGlobalBoolExpr + + err = ottl.WithParserCollectionContext[ottlscope.TransformContext, R](ottlscope.ContextName, &sp, parseScopeContextStatements)(pc) + if err != nil { + return err } - sStatements := ottlscope.NewStatementSequence(parsedStatements, pc.settings, ottlscope.WithStatementSequenceErrorMode(pc.errorMode)) - return scopeStatements{sStatements, globalExpr}, nil - default: - return nil, fmt.Errorf("unknown context %v", contextStatement.Context) + + return nil + } +} + +func parseResourceContextStatements[R any]( + pc *ottl.ParserCollection[R], + _ *ottl.Parser[ottlresource.TransformContext], + _ string, + statements ottl.StatementsGetter, + parsedStatements []*ottl.Statement[ottlresource.TransformContext], +) (R, error) { + contextStatements, err := toContextStatements(statements) + if err != nil { + return *new(R), err + } + globalExpr, errGlobalBoolExpr := parseGlobalExpr(filterottl.NewBoolExprForResource, contextStatements.Conditions, pc.ErrorMode, pc.Settings, filterottl.StandardResourceFuncs()) + if errGlobalBoolExpr != nil { + return *new(R), errGlobalBoolExpr + } + rStatements := ottlresource.NewStatementSequence(parsedStatements, pc.Settings, ottlresource.WithStatementSequenceErrorMode(pc.ErrorMode)) + result := (baseContext)(resourceStatements{rStatements, globalExpr}) + return result.(R), nil +} + +func parseScopeContextStatements[R any]( + pc *ottl.ParserCollection[R], + _ *ottl.Parser[ottlscope.TransformContext], + _ string, + statements ottl.StatementsGetter, + parsedStatements []*ottl.Statement[ottlscope.TransformContext], +) (R, error) { + contextStatements, err := toContextStatements(statements) + if err != nil { + return *new(R), err + } + globalExpr, errGlobalBoolExpr := parseGlobalExpr(filterottl.NewBoolExprForScope, contextStatements.Conditions, pc.ErrorMode, pc.Settings, filterottl.StandardScopeFuncs()) + if errGlobalBoolExpr != nil { + return *new(R), errGlobalBoolExpr } + sStatements := ottlscope.NewStatementSequence(parsedStatements, pc.Settings, ottlscope.WithStatementSequenceErrorMode(pc.ErrorMode)) + result := (baseContext)(scopeStatements{sStatements, globalExpr}) + return result.(R), nil } func parseGlobalExpr[K any]( boolExprFunc func([]string, map[string]ottl.Factory[K], ottl.ErrorMode, component.TelemetrySettings) (*ottl.ConditionSequence[K], error), conditions []string, - pc parserCollection, + errorMode ottl.ErrorMode, + settings component.TelemetrySettings, standardFuncs map[string]ottl.Factory[K], ) (expr.BoolExpr[K], error) { if len(conditions) > 0 { - return boolExprFunc(conditions, standardFuncs, pc.errorMode, pc.settings) + return boolExprFunc(conditions, standardFuncs, errorMode, settings) } // By default, set the global expression to always true unless conditions are specified. return expr.AlwaysTrue[K](), nil diff --git a/processor/transformprocessor/internal/common/traces.go b/processor/transformprocessor/internal/common/traces.go index de03b8afe917..4b3dd117b1f0 100644 --- a/processor/transformprocessor/internal/common/traces.go +++ b/processor/transformprocessor/internal/common/traces.go @@ -7,39 +7,38 @@ import ( "context" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/ptrace" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/expr" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" - "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlresource" - "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlscope" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspan" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspanevent" ) -var _ consumer.Traces = &traceStatements{} +type TracesConsumer interface { + Context() ContextID + ConsumeTraces(ctx context.Context, td ptrace.Traces, cache *pcommon.Map) error +} type traceStatements struct { ottl.StatementSequence[ottlspan.TransformContext] expr.BoolExpr[ottlspan.TransformContext] } -func (t traceStatements) Capabilities() consumer.Capabilities { - return consumer.Capabilities{ - MutatesData: true, - } +func (t traceStatements) Context() ContextID { + return Span } -func (t traceStatements) ConsumeTraces(ctx context.Context, td ptrace.Traces) error { +func (t traceStatements) ConsumeTraces(ctx context.Context, td ptrace.Traces, cache *pcommon.Map) error { for i := 0; i < td.ResourceSpans().Len(); i++ { rspans := td.ResourceSpans().At(i) for j := 0; j < rspans.ScopeSpans().Len(); j++ { sspans := rspans.ScopeSpans().At(j) spans := sspans.Spans() for k := 0; k < spans.Len(); k++ { - tCtx := ottlspan.NewTransformContext(spans.At(k), sspans.Scope(), rspans.Resource(), sspans, rspans) + tCtx := ottlspan.NewTransformContext(spans.At(k), sspans.Scope(), rspans.Resource(), sspans, rspans, ottlspan.WithCache(cache)) condition, err := t.BoolExpr.Eval(ctx, tCtx) if err != nil { return err @@ -56,20 +55,16 @@ func (t traceStatements) ConsumeTraces(ctx context.Context, td ptrace.Traces) er return nil } -var _ consumer.Traces = &spanEventStatements{} - type spanEventStatements struct { ottl.StatementSequence[ottlspanevent.TransformContext] expr.BoolExpr[ottlspanevent.TransformContext] } -func (s spanEventStatements) Capabilities() consumer.Capabilities { - return consumer.Capabilities{ - MutatesData: true, - } +func (s spanEventStatements) Context() ContextID { + return SpanEvent } -func (s spanEventStatements) ConsumeTraces(ctx context.Context, td ptrace.Traces) error { +func (s spanEventStatements) ConsumeTraces(ctx context.Context, td ptrace.Traces, cache *pcommon.Map) error { for i := 0; i < td.ResourceSpans().Len(); i++ { rspans := td.ResourceSpans().At(i) for j := 0; j < rspans.ScopeSpans().Len(); j++ { @@ -79,7 +74,7 @@ func (s spanEventStatements) ConsumeTraces(ctx context.Context, td ptrace.Traces span := spans.At(k) spanEvents := span.Events() for n := 0; n < spanEvents.Len(); n++ { - tCtx := ottlspanevent.NewTransformContext(spanEvents.At(n), span, sspans.Scope(), rspans.Resource(), sspans, rspans) + tCtx := ottlspanevent.NewTransformContext(spanEvents.At(n), span, sspans.Scope(), rspans.Resource(), sspans, rspans, ottlspanevent.WithCache(cache)) condition, err := s.BoolExpr.Eval(ctx, tCtx) if err != nil { return err @@ -97,95 +92,82 @@ func (s spanEventStatements) ConsumeTraces(ctx context.Context, td ptrace.Traces return nil } -type TraceParserCollection struct { - parserCollection - spanParser ottl.Parser[ottlspan.TransformContext] - spanEventParser ottl.Parser[ottlspanevent.TransformContext] -} +type TraceParserCollection ottl.ParserCollection[TracesConsumer] -type TraceParserCollectionOption func(*TraceParserCollection) error +type TraceParserCollectionOption ottl.ParserCollectionOption[TracesConsumer] func WithSpanParser(functions map[string]ottl.Factory[ottlspan.TransformContext]) TraceParserCollectionOption { - return func(tp *TraceParserCollection) error { - spanParser, err := ottlspan.NewParser(functions, tp.settings) + return func(pc *ottl.ParserCollection[TracesConsumer]) error { + parser, err := ottlspan.NewParser(functions, pc.Settings, ottlspan.EnablePathContextNames()) if err != nil { return err } - tp.spanParser = spanParser - return nil + return ottl.WithParserCollectionContext(ottlspan.ContextName, &parser, convertSpanStatements)(pc) } } func WithSpanEventParser(functions map[string]ottl.Factory[ottlspanevent.TransformContext]) TraceParserCollectionOption { - return func(tp *TraceParserCollection) error { - spanEventParser, err := ottlspanevent.NewParser(functions, tp.settings) + return func(pc *ottl.ParserCollection[TracesConsumer]) error { + parser, err := ottlspanevent.NewParser(functions, pc.Settings, ottlspanevent.EnablePathContextNames()) if err != nil { return err } - tp.spanEventParser = spanEventParser - return nil + return ottl.WithParserCollectionContext(ottlspanevent.ContextName, &parser, convertSpanEventStatements)(pc) } } func WithTraceErrorMode(errorMode ottl.ErrorMode) TraceParserCollectionOption { - return func(tp *TraceParserCollection) error { - tp.errorMode = errorMode - return nil - } + return TraceParserCollectionOption(ottl.WithParserCollectionErrorMode[TracesConsumer](errorMode)) } func NewTraceParserCollection(settings component.TelemetrySettings, options ...TraceParserCollectionOption) (*TraceParserCollection, error) { - rp, err := ottlresource.NewParser(ResourceFunctions(), settings) + pcOptions := []ottl.ParserCollectionOption[TracesConsumer]{ + withCommonContextParsers[TracesConsumer](), + } + + for _, option := range options { + pcOptions = append(pcOptions, ottl.ParserCollectionOption[TracesConsumer](option)) + } + + pc, err := ottl.NewParserCollection(settings, pcOptions...) if err != nil { return nil, err } - sp, err := ottlscope.NewParser(ScopeFunctions(), settings) + + tpc := TraceParserCollection(*pc) + return &tpc, nil +} + +func convertSpanStatements(pc *ottl.ParserCollection[TracesConsumer], _ *ottl.Parser[ottlspan.TransformContext], _ string, statements ottl.StatementsGetter, parsedStatements []*ottl.Statement[ottlspan.TransformContext]) (TracesConsumer, error) { + contextStatements, err := toContextStatements(statements) if err != nil { return nil, err } - tpc := &TraceParserCollection{ - parserCollection: parserCollection{ - settings: settings, - resourceParser: rp, - scopeParser: sp, - }, + globalExpr, errGlobalBoolExpr := parseGlobalExpr(filterottl.NewBoolExprForSpan, contextStatements.Conditions, pc.ErrorMode, pc.Settings, filterottl.StandardSpanFuncs()) + if errGlobalBoolExpr != nil { + return nil, errGlobalBoolExpr } + sStatements := ottlspan.NewStatementSequence(parsedStatements, pc.Settings, ottlspan.WithStatementSequenceErrorMode(pc.ErrorMode)) + return traceStatements{sStatements, globalExpr}, nil +} - for _, op := range options { - err := op(tpc) - if err != nil { - return nil, err - } +func convertSpanEventStatements(pc *ottl.ParserCollection[TracesConsumer], _ *ottl.Parser[ottlspanevent.TransformContext], _ string, statements ottl.StatementsGetter, parsedStatements []*ottl.Statement[ottlspanevent.TransformContext]) (TracesConsumer, error) { + contextStatements, err := toContextStatements(statements) + if err != nil { + return nil, err } - - return tpc, nil + globalExpr, errGlobalBoolExpr := parseGlobalExpr(filterottl.NewBoolExprForSpanEvent, contextStatements.Conditions, pc.ErrorMode, pc.Settings, filterottl.StandardSpanEventFuncs()) + if errGlobalBoolExpr != nil { + return nil, errGlobalBoolExpr + } + seStatements := ottlspanevent.NewStatementSequence(parsedStatements, pc.Settings, ottlspanevent.WithStatementSequenceErrorMode(pc.ErrorMode)) + return spanEventStatements{seStatements, globalExpr}, nil } -func (pc TraceParserCollection) ParseContextStatements(contextStatements ContextStatements) (consumer.Traces, error) { - switch contextStatements.Context { - case Span: - parsedStatements, err := pc.spanParser.ParseStatements(contextStatements.Statements) - if err != nil { - return nil, err - } - globalExpr, errGlobalBoolExpr := parseGlobalExpr(filterottl.NewBoolExprForSpan, contextStatements.Conditions, pc.parserCollection, filterottl.StandardSpanFuncs()) - if errGlobalBoolExpr != nil { - return nil, errGlobalBoolExpr - } - sStatements := ottlspan.NewStatementSequence(parsedStatements, pc.settings, ottlspan.WithStatementSequenceErrorMode(pc.errorMode)) - return traceStatements{sStatements, globalExpr}, nil - case SpanEvent: - parsedStatements, err := pc.spanEventParser.ParseStatements(contextStatements.Statements) - if err != nil { - return nil, err - } - globalExpr, errGlobalBoolExpr := parseGlobalExpr(filterottl.NewBoolExprForSpanEvent, contextStatements.Conditions, pc.parserCollection, filterottl.StandardSpanEventFuncs()) - if errGlobalBoolExpr != nil { - return nil, errGlobalBoolExpr - } - seStatements := ottlspanevent.NewStatementSequence(parsedStatements, pc.settings, ottlspanevent.WithStatementSequenceErrorMode(pc.errorMode)) - return spanEventStatements{seStatements, globalExpr}, nil - default: - return pc.parseCommonContextStatements(contextStatements) +func (tpc *TraceParserCollection) ParseContextStatements(contextStatements ContextStatements) (TracesConsumer, error) { + pc := ottl.ParserCollection[TracesConsumer](*tpc) + if contextStatements.Context != "" { + return pc.ParseStatementsWithContext(string(contextStatements.Context), contextStatements, true) } + return pc.ParseStatements(contextStatements) } diff --git a/processor/transformprocessor/internal/logs/processor.go b/processor/transformprocessor/internal/logs/processor.go index e2b184f3c8d7..23037fe847ba 100644 --- a/processor/transformprocessor/internal/logs/processor.go +++ b/processor/transformprocessor/internal/logs/processor.go @@ -7,7 +7,6 @@ import ( "context" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/pdata/plog" "go.uber.org/multierr" "go.uber.org/zap" @@ -18,7 +17,7 @@ import ( ) type Processor struct { - contexts []consumer.Logs + contexts []common.LogsConsumer logger *zap.Logger flatMode bool } @@ -29,7 +28,7 @@ func NewProcessor(contextStatements []common.ContextStatements, errorMode ottl.E return nil, err } - contexts := make([]consumer.Logs, len(contextStatements)) + contexts := make([]common.LogsConsumer, len(contextStatements)) var errors error for i, cs := range contextStatements { context, err := pc.ParseContextStatements(cs) @@ -56,7 +55,7 @@ func (p *Processor) ProcessLogs(ctx context.Context, ld plog.Logs) (plog.Logs, e defer pdatautil.GroupByResourceLogs(ld.ResourceLogs()) } for _, c := range p.contexts { - err := c.ConsumeLogs(ctx, ld) + err := c.ConsumeLogs(ctx, ld, nil) if err != nil { p.logger.Error("failed processing logs", zap.Error(err)) return ld, err diff --git a/processor/transformprocessor/internal/logs/processor_test.go b/processor/transformprocessor/internal/logs/processor_test.go index 448328138c21..f3aee564b923 100644 --- a/processor/transformprocessor/internal/logs/processor_test.go +++ b/processor/transformprocessor/internal/logs/processor_test.go @@ -69,6 +69,47 @@ func Test_ProcessLogs_ResourceContext(t *testing.T) { } } +func Test_ProcessLogs_InferredResourceContext(t *testing.T) { + tests := []struct { + statement string + want func(td plog.Logs) + }{ + { + statement: `set(resource.attributes["test"], "pass")`, + want: func(td plog.Logs) { + td.ResourceLogs().At(0).Resource().Attributes().PutStr("test", "pass") + }, + }, + { + statement: `set(resource.attributes["test"], "pass") where resource.attributes["host.name"] == "wrong"`, + want: func(_ plog.Logs) { + }, + }, + { + statement: `set(resource.schema_url, "test_schema_url")`, + want: func(td plog.Logs) { + td.ResourceLogs().At(0).SetSchemaUrl("test_schema_url") + }, + }, + } + + for _, tt := range tests { + t.Run(tt.statement, func(t *testing.T) { + td := constructLogs() + processor, err := NewProcessor([]common.ContextStatements{{Context: "", Statements: []string{tt.statement}}}, ottl.IgnoreError, false, componenttest.NewNopTelemetrySettings()) + assert.NoError(t, err) + + _, err = processor.ProcessLogs(context.Background(), td) + assert.NoError(t, err) + + exTd := constructLogs() + tt.want(exTd) + + assert.Equal(t, exTd, td) + }) + } +} + func Test_ProcessLogs_ScopeContext(t *testing.T) { tests := []struct { statement string @@ -110,6 +151,47 @@ func Test_ProcessLogs_ScopeContext(t *testing.T) { } } +func Test_ProcessLogs_InferredScopeContext(t *testing.T) { + tests := []struct { + statement string + want func(td plog.Logs) + }{ + { + statement: `set(scope.attributes["test"], "pass") where scope.name == "scope"`, + want: func(td plog.Logs) { + td.ResourceLogs().At(0).ScopeLogs().At(0).Scope().Attributes().PutStr("test", "pass") + }, + }, + { + statement: `set(scope.attributes["test"], "pass") where scope.version == 2`, + want: func(_ plog.Logs) { + }, + }, + { + statement: `set(scope.schema_url, "test_schema_url")`, + want: func(td plog.Logs) { + td.ResourceLogs().At(0).ScopeLogs().At(0).SetSchemaUrl("test_schema_url") + }, + }, + } + + for _, tt := range tests { + t.Run(tt.statement, func(t *testing.T) { + td := constructLogs() + processor, err := NewProcessor([]common.ContextStatements{{Context: "", Statements: []string{tt.statement}}}, ottl.IgnoreError, false, componenttest.NewNopTelemetrySettings()) + assert.NoError(t, err) + + _, err = processor.ProcessLogs(context.Background(), td) + assert.NoError(t, err) + + exTd := constructLogs() + tt.want(exTd) + + assert.Equal(t, exTd, td) + }) + } +} + func Test_ProcessLogs_LogContext(t *testing.T) { tests := []struct { statement string @@ -364,6 +446,260 @@ func Test_ProcessLogs_LogContext(t *testing.T) { } } +func Test_ProcessLogs_InferredLogContext(t *testing.T) { + tests := []struct { + statement string + want func(td plog.Logs) + }{ + { + statement: `set(log.attributes["test"], "pass") where log.body == "operationA"`, + want: func(td plog.Logs) { + td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Attributes().PutStr("test", "pass") + }, + }, + { + statement: `set(log.attributes["test"], "pass") where resource.attributes["host.name"] == "localhost"`, + want: func(td plog.Logs) { + td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Attributes().PutStr("test", "pass") + td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(1).Attributes().PutStr("test", "pass") + }, + }, + { + statement: `keep_keys(log.attributes, ["http.method"]) where log.body == "operationA"`, + want: func(td plog.Logs) { + td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Attributes().Clear() + td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Attributes().PutStr("http.method", + "get") + }, + }, + { + statement: `set(log.severity_text, "ok") where log.attributes["http.path"] == "/health"`, + want: func(td plog.Logs) { + td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).SetSeverityText("ok") + td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(1).SetSeverityText("ok") + }, + }, + { + statement: `replace_pattern(log.attributes["http.method"], "get", "post")`, + want: func(td plog.Logs) { + td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Attributes().PutStr("http.method", "post") + td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(1).Attributes().PutStr("http.method", "post") + }, + }, + { + statement: `replace_all_patterns(log.attributes, "value", "get", "post")`, + want: func(td plog.Logs) { + td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Attributes().PutStr("http.method", "post") + td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(1).Attributes().PutStr("http.method", "post") + }, + }, + { + statement: `replace_all_patterns(log.attributes, "key", "http.url", "url")`, + want: func(td plog.Logs) { + td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Attributes().Clear() + td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Attributes().PutStr("http.method", "get") + td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Attributes().PutStr("http.path", "/health") + td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Attributes().PutStr("url", "http://localhost/health") + td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Attributes().PutStr("flags", "A|B|C") + td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Attributes().PutStr("total.string", "123456789") + td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(1).Attributes().Clear() + td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(1).Attributes().PutStr("http.method", "get") + td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(1).Attributes().PutStr("http.path", "/health") + td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(1).Attributes().PutStr("url", "http://localhost/health") + td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(1).Attributes().PutStr("flags", "C|D") + td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(1).Attributes().PutStr("total.string", "345678") + }, + }, + { + statement: `set(log.attributes["test"], "pass") where log.dropped_attributes_count == 1`, + want: func(td plog.Logs) { + td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Attributes().PutStr("test", "pass") + }, + }, + { + statement: `set(log.attributes["test"], "pass") where log.flags == 1`, + want: func(td plog.Logs) { + td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Attributes().PutStr("test", "pass") + }, + }, + { + statement: `set(log.attributes["test"], "pass") where log.severity_number == SEVERITY_NUMBER_TRACE`, + want: func(td plog.Logs) { + td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Attributes().PutStr("test", "pass") + }, + }, + { + statement: `set(log.severity_number, SEVERITY_NUMBER_TRACE2) where log.severity_number == 1`, + want: func(td plog.Logs) { + td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).SetSeverityNumber(2) + }, + }, + { + statement: `set(log.attributes["test"], "pass") where log.trace_id == TraceID(0x0102030405060708090a0b0c0d0e0f10)`, + want: func(td plog.Logs) { + td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Attributes().PutStr("test", "pass") + }, + }, + { + statement: `set(log.attributes["test"], "pass") where log.span_id == SpanID(0x0102030405060708)`, + want: func(td plog.Logs) { + td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Attributes().PutStr("test", "pass") + }, + }, + { + statement: `set(log.attributes["test"], "pass") where IsMatch(log.body, "operation[AC]")`, + want: func(td plog.Logs) { + td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Attributes().PutStr("test", "pass") + }, + }, + { + statement: `delete_key(log.attributes, "http.url") where log.body == "operationA"`, + want: func(td plog.Logs) { + td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Attributes().Clear() + td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Attributes().PutStr("http.method", + "get") + td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Attributes().PutStr("http.path", + "/health") + td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Attributes().PutStr("total.string", + "123456789") + td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Attributes().PutStr("flags", + "A|B|C") + }, + }, + { + statement: `delete_matching_keys(log.attributes, "http.*t.*") where log.body == "operationA"`, + want: func(td plog.Logs) { + td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Attributes().Clear() + td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Attributes().PutStr("http.url", + "http://localhost/health") + td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Attributes().PutStr("flags", + "A|B|C") + td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Attributes().PutStr("total.string", + "123456789") + }, + }, + { + statement: `set(log.attributes["test"], Concat([log.attributes["http.method"], log.attributes["http.url"]], ": ")) where log.body == Concat(["operation", "A"], "")`, + want: func(td plog.Logs) { + td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Attributes().PutStr("test", "get: http://localhost/health") + }, + }, + { + statement: `set(log.attributes["test"], Split(log.attributes["flags"], "|"))`, + want: func(td plog.Logs) { + v1 := td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Attributes().PutEmptySlice("test") + v1.AppendEmpty().SetStr("A") + v1.AppendEmpty().SetStr("B") + v1.AppendEmpty().SetStr("C") + v2 := td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(1).Attributes().PutEmptySlice("test") + v2.AppendEmpty().SetStr("C") + v2.AppendEmpty().SetStr("D") + }, + }, + { + statement: `set(log.attributes["test"], Split(log.attributes["flags"], "|")) where log.body == "operationA"`, + want: func(td plog.Logs) { + newValue := td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Attributes().PutEmptySlice("test") + newValue.AppendEmpty().SetStr("A") + newValue.AppendEmpty().SetStr("B") + newValue.AppendEmpty().SetStr("C") + }, + }, + { + statement: `set(log.attributes["test"], Split(log.attributes["not_exist"], "|"))`, + want: func(_ plog.Logs) {}, + }, + { + statement: `set(log.attributes["test"], Substring(log.attributes["total.string"], 3, 3))`, + want: func(td plog.Logs) { + td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Attributes().PutStr("test", "456") + td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(1).Attributes().PutStr("test", "678") + }, + }, + { + statement: `set(log.attributes["test"], Substring(log.attributes["total.string"], 3, 3)) where log.body == "operationA"`, + want: func(td plog.Logs) { + td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Attributes().PutStr("test", "456") + }, + }, + { + statement: `set(log.attributes["test"], Substring(log.attributes["not_exist"], 3, 3))`, + want: func(_ plog.Logs) {}, + }, + { + statement: `set(log.attributes["test"], ["A", "B", "C"]) where log.body == "operationA"`, + want: func(td plog.Logs) { + v1 := td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Attributes().PutEmptySlice("test") + v1.AppendEmpty().SetStr("A") + v1.AppendEmpty().SetStr("B") + v1.AppendEmpty().SetStr("C") + }, + }, + { + statement: `set(log.attributes["test"], ConvertCase(log.body, "lower")) where log.body == "operationA"`, + want: func(td plog.Logs) { + td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Attributes().PutStr("test", "operationa") + }, + }, + { + statement: `set(log.attributes["test"], ConvertCase(log.body, "upper")) where log.body == "operationA"`, + want: func(td plog.Logs) { + td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Attributes().PutStr("test", "OPERATIONA") + }, + }, + { + statement: `set(log.attributes["test"], ConvertCase(log.body, "snake")) where log.body == "operationA"`, + want: func(td plog.Logs) { + td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Attributes().PutStr("test", "operation_a") + }, + }, + { + statement: `set(log.attributes["test"], ConvertCase(log.body, "camel")) where log.body == "operationA"`, + want: func(td plog.Logs) { + td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Attributes().PutStr("test", "OperationA") + }, + }, + { + statement: `merge_maps(log.attributes, ParseJSON("{\"json_test\":\"pass\"}"), "insert") where log.body == "operationA"`, + want: func(td plog.Logs) { + td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Attributes().PutStr("json_test", "pass") + }, + }, + { + statement: `limit(log.attributes, 0, []) where log.body == "operationA"`, + want: func(td plog.Logs) { + td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Attributes().RemoveIf(func(_ string, _ pcommon.Value) bool { return true }) + }, + }, + { + statement: `set(log.attributes["test"], Log(1)) where log.body == "operationA"`, + want: func(td plog.Logs) { + td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Attributes().PutDouble("test", 0.0) + }, + }, + { + statement: `replace_match(log.body["metadata"]["uid"], "*", "12345")`, + want: func(_ plog.Logs) {}, + }, + } + + for _, tt := range tests { + t.Run(tt.statement, func(t *testing.T) { + td := constructLogs() + processor, err := NewProcessor([]common.ContextStatements{{Context: "", Statements: []string{tt.statement}}}, ottl.IgnoreError, false, componenttest.NewNopTelemetrySettings()) + assert.NoError(t, err) + + _, err = processor.ProcessLogs(context.Background(), td) + assert.NoError(t, err) + + exTd := constructLogs() + tt.want(exTd) + + assert.Equal(t, exTd, td) + }) + } +} + func Test_ProcessLogs_MixContext(t *testing.T) { tests := []struct { name string @@ -481,7 +817,97 @@ func Test_ProcessLogs_MixContext(t *testing.T) { } } -func Test_ProcessTraces_Error(t *testing.T) { +func Test_ProcessLogs_InferredMixContext(t *testing.T) { + tests := []struct { + name string + contextStatements []common.ContextStatements + want func(td plog.Logs) + }{ + { + name: "set resource and then use", + contextStatements: []common.ContextStatements{ + { + Statements: []string{`set(resource.attributes["test"], "pass")`}, + }, + { + Statements: []string{`set(log.attributes["test"], "pass") where resource.attributes["test"] == "pass"`}, + }, + }, + want: func(td plog.Logs) { + td.ResourceLogs().At(0).Resource().Attributes().PutStr("test", "pass") + td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Attributes().PutStr("test", "pass") + td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(1).Attributes().PutStr("test", "pass") + }, + }, + { + name: "set scope and then use", + contextStatements: []common.ContextStatements{ + { + Statements: []string{`set(scope.attributes["test"], "pass")`}, + }, + { + Statements: []string{`set(log.attributes["test"], "pass") where instrumentation_scope.attributes["test"] == "pass"`}, + }, + }, + want: func(td plog.Logs) { + td.ResourceLogs().At(0).ScopeLogs().At(0).Scope().Attributes().PutStr("test", "pass") + td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Attributes().PutStr("test", "pass") + td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(1).Attributes().PutStr("test", "pass") + }, + }, + { + name: "order matters", + contextStatements: []common.ContextStatements{ + { + Statements: []string{`set(log.attributes["test"], "pass") where instrumentation_scope.attributes["test"] == "pass"`}, + }, + { + Statements: []string{`set(scope.attributes["test"], "pass")`}, + }, + }, + want: func(td plog.Logs) { + td.ResourceLogs().At(0).ScopeLogs().At(0).Scope().Attributes().PutStr("test", "pass") + }, + }, + { + name: "reuse context", + contextStatements: []common.ContextStatements{ + { + Statements: []string{`set(scope.attributes["test"], "pass")`}, + }, + { + Statements: []string{`set(log.attributes["test"], "pass") where instrumentation_scope.attributes["test"] == "pass"`}, + }, + { + Statements: []string{`set(scope.attributes["test"], "fail")`}, + }, + }, + want: func(td plog.Logs) { + td.ResourceLogs().At(0).ScopeLogs().At(0).Scope().Attributes().PutStr("test", "fail") + td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Attributes().PutStr("test", "pass") + td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(1).Attributes().PutStr("test", "pass") + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + td := constructLogs() + processor, err := NewProcessor(tt.contextStatements, ottl.IgnoreError, false, componenttest.NewNopTelemetrySettings()) + assert.NoError(t, err) + + _, err = processor.ProcessLogs(context.Background(), td) + assert.NoError(t, err) + + exTd := constructLogs() + tt.want(exTd) + + assert.Equal(t, exTd, td) + }) + } +} + +func Test_ProcessLogs_ErrorMode(t *testing.T) { tests := []struct { statement string context common.ContextID diff --git a/processor/transformprocessor/internal/metrics/processor.go b/processor/transformprocessor/internal/metrics/processor.go index 135b1bcbad59..cc134d3d138f 100644 --- a/processor/transformprocessor/internal/metrics/processor.go +++ b/processor/transformprocessor/internal/metrics/processor.go @@ -7,7 +7,6 @@ import ( "context" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/multierr" "go.uber.org/zap" @@ -17,7 +16,7 @@ import ( ) type Processor struct { - contexts []consumer.Metrics + contexts []common.MetricsConsumer logger *zap.Logger } @@ -27,7 +26,7 @@ func NewProcessor(contextStatements []common.ContextStatements, errorMode ottl.E return nil, err } - contexts := make([]consumer.Metrics, len(contextStatements)) + contexts := make([]common.MetricsConsumer, len(contextStatements)) var errors error for i, cs := range contextStatements { context, err := pc.ParseContextStatements(cs) @@ -49,7 +48,7 @@ func NewProcessor(contextStatements []common.ContextStatements, errorMode ottl.E func (p *Processor) ProcessMetrics(ctx context.Context, md pmetric.Metrics) (pmetric.Metrics, error) { for _, c := range p.contexts { - err := c.ConsumeMetrics(ctx, md) + err := c.ConsumeMetrics(ctx, md, nil) if err != nil { p.logger.Error("failed processing metrics", zap.Error(err)) return md, err diff --git a/processor/transformprocessor/internal/metrics/processor_test.go b/processor/transformprocessor/internal/metrics/processor_test.go index 128a9d00ced0..10132713998b 100644 --- a/processor/transformprocessor/internal/metrics/processor_test.go +++ b/processor/transformprocessor/internal/metrics/processor_test.go @@ -65,6 +65,47 @@ func Test_ProcessMetrics_ResourceContext(t *testing.T) { } } +func Test_ProcessMetrics_InferredResourceContext(t *testing.T) { + tests := []struct { + statement string + want func(td pmetric.Metrics) + }{ + { + statement: `set(resource.attributes["test"], "pass")`, + want: func(td pmetric.Metrics) { + td.ResourceMetrics().At(0).Resource().Attributes().PutStr("test", "pass") + }, + }, + { + statement: `set(resource.attributes["test"], "pass") where resource.attributes["host.name"] == "wrong"`, + want: func(_ pmetric.Metrics) { + }, + }, + { + statement: `set(resource.schema_url, "test_schema_url")`, + want: func(td pmetric.Metrics) { + td.ResourceMetrics().At(0).SetSchemaUrl("test_schema_url") + }, + }, + } + + for _, tt := range tests { + t.Run(tt.statement, func(t *testing.T) { + td := constructMetrics() + processor, err := NewProcessor([]common.ContextStatements{{Context: "", Statements: []string{tt.statement}}}, ottl.IgnoreError, componenttest.NewNopTelemetrySettings()) + assert.NoError(t, err) + + _, err = processor.ProcessMetrics(context.Background(), td) + assert.NoError(t, err) + + exTd := constructMetrics() + tt.want(exTd) + + assert.Equal(t, exTd, td) + }) + } +} + func Test_ProcessMetrics_ScopeContext(t *testing.T) { tests := []struct { statement string @@ -106,6 +147,47 @@ func Test_ProcessMetrics_ScopeContext(t *testing.T) { } } +func Test_ProcessMetrics_InferredScopeContext(t *testing.T) { + tests := []struct { + statement string + want func(td pmetric.Metrics) + }{ + { + statement: `set(scope.attributes["test"], "pass") where scope.name == "scope"`, + want: func(td pmetric.Metrics) { + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Scope().Attributes().PutStr("test", "pass") + }, + }, + { + statement: `set(scope.attributes["test"], "pass") where scope.version == 2`, + want: func(_ pmetric.Metrics) { + }, + }, + { + statement: `set(scope.schema_url, "test_schema_url")`, + want: func(td pmetric.Metrics) { + td.ResourceMetrics().At(0).ScopeMetrics().At(0).SetSchemaUrl("test_schema_url") + }, + }, + } + + for _, tt := range tests { + t.Run(tt.statement, func(t *testing.T) { + td := constructMetrics() + processor, err := NewProcessor([]common.ContextStatements{{Context: "", Statements: []string{tt.statement}}}, ottl.IgnoreError, componenttest.NewNopTelemetrySettings()) + assert.NoError(t, err) + + _, err = processor.ProcessMetrics(context.Background(), td) + assert.NoError(t, err) + + exTd := constructMetrics() + tt.want(exTd) + + assert.Equal(t, exTd, td) + }) + } +} + func Test_ProcessMetrics_MetricContext(t *testing.T) { tests := []struct { statements []string @@ -281,6 +363,186 @@ func Test_ProcessMetrics_MetricContext(t *testing.T) { } } +func Test_ProcessMetrics_InferredMetricContext(t *testing.T) { + tests := []struct { + statements []string + want func(pmetric.Metrics) + }{ + { + statements: []string{`extract_sum_metric(true) where metric.name == "operationB"`}, + want: func(td pmetric.Metrics) { + sumMetric := td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().AppendEmpty() + sumDp := sumMetric.SetEmptySum().DataPoints().AppendEmpty() + + histogramMetric := pmetric.NewMetric() + fillMetricTwo(histogramMetric) + histogramDp := histogramMetric.Histogram().DataPoints().At(0) + + sumMetric.SetDescription(histogramMetric.Description()) + sumMetric.SetName(histogramMetric.Name() + "_sum") + sumMetric.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + sumMetric.Sum().SetIsMonotonic(true) + sumMetric.SetUnit(histogramMetric.Unit()) + + histogramDp.Attributes().CopyTo(sumDp.Attributes()) + sumDp.SetDoubleValue(histogramDp.Sum()) + sumDp.SetStartTimestamp(StartTimestamp) + + // we have two histogram datapoints, but only one of them has the Sum set + // so we should only have one Sum datapoint + }, + }, + { // this checks if subsequent statements apply to the newly created metric + statements: []string{ + `extract_sum_metric(true) where metric.name == "operationB"`, + `set(metric.name, "new_name") where metric.name == "operationB_sum"`, + }, + want: func(td pmetric.Metrics) { + sumMetric := td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().AppendEmpty() + sumDp := sumMetric.SetEmptySum().DataPoints().AppendEmpty() + + histogramMetric := pmetric.NewMetric() + fillMetricTwo(histogramMetric) + histogramDp := histogramMetric.Histogram().DataPoints().At(0) + + sumMetric.SetDescription(histogramMetric.Description()) + sumMetric.SetName("new_name") + sumMetric.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + sumMetric.Sum().SetIsMonotonic(true) + sumMetric.SetUnit(histogramMetric.Unit()) + + histogramDp.Attributes().CopyTo(sumDp.Attributes()) + sumDp.SetDoubleValue(histogramDp.Sum()) + sumDp.SetStartTimestamp(StartTimestamp) + + // we have two histogram datapoints, but only one of them has the Sum set + // so we should only have one Sum datapoint + }, + }, + { + statements: []string{`extract_count_metric(true) where metric.name == "operationB"`}, + want: func(td pmetric.Metrics) { + countMetric := td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().AppendEmpty() + countMetric.SetEmptySum() + + histogramMetric := pmetric.NewMetric() + fillMetricTwo(histogramMetric) + + countMetric.SetDescription(histogramMetric.Description()) + countMetric.SetName(histogramMetric.Name() + "_count") + countMetric.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + countMetric.Sum().SetIsMonotonic(true) + countMetric.SetUnit("1") + + histogramDp0 := histogramMetric.Histogram().DataPoints().At(0) + countDp0 := countMetric.Sum().DataPoints().AppendEmpty() + histogramDp0.Attributes().CopyTo(countDp0.Attributes()) + countDp0.SetIntValue(int64(histogramDp0.Count())) + countDp0.SetStartTimestamp(StartTimestamp) + + // we have two histogram datapoints + histogramDp1 := histogramMetric.Histogram().DataPoints().At(1) + countDp1 := countMetric.Sum().DataPoints().AppendEmpty() + histogramDp1.Attributes().CopyTo(countDp1.Attributes()) + countDp1.SetIntValue(int64(histogramDp1.Count())) + countDp1.SetStartTimestamp(StartTimestamp) + }, + }, + { + statements: []string{`copy_metric(name="http.request.status_code", unit="s") where metric.name == "operationA"`}, + want: func(td pmetric.Metrics) { + newMetric := td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().AppendEmpty() + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).CopyTo(newMetric) + newMetric.SetName("http.request.status_code") + newMetric.SetUnit("s") + }, + }, + { + statements: []string{`scale_metric(10.0,"s") where metric.name == "operationA"`}, + want: func(td pmetric.Metrics) { + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(0).SetDoubleValue(10.0) + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(1).SetDoubleValue(37.0) + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).SetUnit("s") + }, + }, + { + statements: []string{`scale_metric(10.0) where metric.name == "operationA"`}, + want: func(td pmetric.Metrics) { + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(0).SetDoubleValue(10.0) + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(1).SetDoubleValue(37.0) + }, + }, + { + statements: []string{`aggregate_on_attributes("sum", ["attr1", "attr2"]) where metric.name == "operationA"`}, + want: func(td pmetric.Metrics) { + m := td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0) + + dataPoints := pmetric.NewNumberDataPointSlice() + dataPoint1 := dataPoints.AppendEmpty() + dataPoint1.SetStartTimestamp(StartTimestamp) + dataPoint1.SetDoubleValue(4.7) + dataPoint1.Attributes().PutStr("attr1", "test1") + dataPoint1.Attributes().PutStr("attr2", "test2") + + dataPoints.CopyTo(m.Sum().DataPoints()) + }, + }, + { + statements: []string{`aggregate_on_attributes("min") where metric.name == "operationA"`}, + want: func(td pmetric.Metrics) { + m := td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0) + + dataPoints := pmetric.NewNumberDataPointSlice() + dataPoint1 := dataPoints.AppendEmpty() + dataPoint1.SetStartTimestamp(StartTimestamp) + dataPoint1.SetDoubleValue(1.0) + dataPoint1.Attributes().PutStr("attr1", "test1") + dataPoint1.Attributes().PutStr("attr2", "test2") + dataPoint1.Attributes().PutStr("attr3", "test3") + dataPoint1.Attributes().PutStr("flags", "A|B|C") + dataPoint1.Attributes().PutStr("total.string", "123456789") + + dataPoints.CopyTo(m.Sum().DataPoints()) + }, + }, + { + statements: []string{`aggregate_on_attribute_value("sum", "attr1", ["test1", "test2"], "test") where metric.name == "operationE"`}, + want: func(td pmetric.Metrics) { + m := td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(4) + + dataPoints := pmetric.NewNumberDataPointSlice() + dataPoint1 := dataPoints.AppendEmpty() + dataPoint1.SetStartTimestamp(StartTimestamp) + dataPoint1.SetDoubleValue(4.7) + dataPoint1.Attributes().PutStr("attr1", "test") + + dataPoints.CopyTo(m.Sum().DataPoints()) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.statements[0], func(t *testing.T) { + var contextStatements []common.ContextStatements + for _, statement := range tt.statements { + contextStatements = append(contextStatements, common.ContextStatements{Context: "", Statements: []string{statement}}) + } + + td := constructMetrics() + processor, err := NewProcessor(contextStatements, ottl.IgnoreError, componenttest.NewNopTelemetrySettings()) + assert.NoError(t, err) + + _, err = processor.ProcessMetrics(context.Background(), td) + assert.NoError(t, err) + + exTd := constructMetrics() + tt.want(exTd) + + assert.Equal(t, exTd, td) + }) + } +} + func Test_ProcessMetrics_DataPointContext(t *testing.T) { tests := []struct { statements []string @@ -724,6 +986,454 @@ func Test_ProcessMetrics_DataPointContext(t *testing.T) { } } +func Test_ProcessMetrics_InferredDataPointContext(t *testing.T) { + tests := []struct { + statements []string + want func(pmetric.Metrics) + }{ + { + statements: []string{`set(datapoint.attributes["test"], "pass") where metric.name == "operationA"`}, + want: func(td pmetric.Metrics) { + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(0).Attributes().PutStr("test", "pass") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(1).Attributes().PutStr("test", "pass") + }, + }, + { + statements: []string{`set(datapoint.attributes["test"], "pass") where resource.attributes["host.name"] == "myhost"`}, + want: func(td pmetric.Metrics) { + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(0).Attributes().PutStr("test", "pass") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(1).Attributes().PutStr("test", "pass") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(1).Histogram().DataPoints().At(0).Attributes().PutStr("test", "pass") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(1).Histogram().DataPoints().At(1).Attributes().PutStr("test", "pass") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(2).ExponentialHistogram().DataPoints().At(0).Attributes().PutStr("test", "pass") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(2).ExponentialHistogram().DataPoints().At(1).Attributes().PutStr("test", "pass") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(2).ExponentialHistogram().DataPoints().At(1).Attributes().PutStr("test", "pass") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(3).Summary().DataPoints().At(0).Attributes().PutStr("test", "pass") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(4).Sum().DataPoints().At(0).Attributes().PutStr("test", "pass") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(4).Sum().DataPoints().At(1).Attributes().PutStr("test", "pass") + }, + }, + { + statements: []string{`set(datapoint.attributes["int_value"], Int("2")) where metric.name == "operationA"`}, + want: func(td pmetric.Metrics) { + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(0).Attributes().PutInt("int_value", 2) + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(1).Attributes().PutInt("int_value", 2) + }, + }, + { + statements: []string{`set(datapoint.attributes["int_value"], Int(datapoint.value_double)) where metric.name == "operationA"`}, + want: func(td pmetric.Metrics) { + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(0).Attributes().PutInt("int_value", 1) + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(1).Attributes().PutInt("int_value", 3) + }, + }, + { + statements: []string{`keep_keys(datapoint.attributes, ["attr2"]) where metric.name == "operationA"`}, + want: func(td pmetric.Metrics) { + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(0).Attributes().Clear() + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(0).Attributes().PutStr("attr2", "test2") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(1).Attributes().Clear() + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(1).Attributes().PutStr("attr2", "test2") + }, + }, + { + statements: []string{`set(metric.description, "test") where datapoint.attributes["attr1"] == "test1"`}, + want: func(td pmetric.Metrics) { + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).SetDescription("test") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(1).SetDescription("test") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(2).SetDescription("test") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(3).SetDescription("test") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(4).SetDescription("test") + }, + }, + { + statements: []string{`set(metric.unit, "new unit")`}, + want: func(td pmetric.Metrics) { + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).SetUnit("new unit") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(1).SetUnit("new unit") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(2).SetUnit("new unit") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(3).SetUnit("new unit") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(4).SetUnit("new unit") + }, + }, + { + statements: []string{`set(metric.description, "Sum") where metric.type == METRIC_DATA_TYPE_SUM`}, + want: func(td pmetric.Metrics) { + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).SetDescription("Sum") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(4).SetDescription("Sum") + }, + }, + { + statements: []string{`set(metric.aggregation_temporality, AGGREGATION_TEMPORALITY_DELTA) where metric.aggregation_temporality == 0`}, + want: func(td pmetric.Metrics) { + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(1).Histogram().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(2).ExponentialHistogram().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(4).Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + }, + }, + { + statements: []string{`set(metric.is_monotonic, true) where metric.is_monotonic == false`}, + want: func(td pmetric.Metrics) { + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().SetIsMonotonic(true) + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(4).Sum().SetIsMonotonic(true) + }, + }, + { + statements: []string{`set(datapoint.attributes["test"], "pass") where datapoint.count == 1`}, + want: func(td pmetric.Metrics) { + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(1).Histogram().DataPoints().At(0).Attributes().PutStr("test", "pass") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(2).ExponentialHistogram().DataPoints().At(0).Attributes().PutStr("test", "pass") + }, + }, + { + statements: []string{`set(datapoint.attributes["test"], "pass") where datapoint.scale == 1`}, + want: func(td pmetric.Metrics) { + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(2).ExponentialHistogram().DataPoints().At(0).Attributes().PutStr("test", "pass") + }, + }, + { + statements: []string{`set(datapoint.attributes["test"], "pass") where datapoint.zero_count == 1`}, + want: func(td pmetric.Metrics) { + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(2).ExponentialHistogram().DataPoints().At(0).Attributes().PutStr("test", "pass") + }, + }, + { + statements: []string{`set(datapoint.attributes["test"], "pass") where datapoint.positive.offset == 1`}, + want: func(td pmetric.Metrics) { + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(2).ExponentialHistogram().DataPoints().At(0).Attributes().PutStr("test", "pass") + }, + }, + { + statements: []string{`set(datapoint.attributes["test"], "pass") where datapoint.negative.offset == 1`}, + want: func(td pmetric.Metrics) { + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(2).ExponentialHistogram().DataPoints().At(0).Attributes().PutStr("test", "pass") + }, + }, + { + statements: []string{`replace_pattern(datapoint.attributes["attr1"], "test1", "pass")`}, + want: func(td pmetric.Metrics) { + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(0).Attributes().PutStr("attr1", "pass") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(1).Attributes().PutStr("attr1", "pass") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(1).Histogram().DataPoints().At(0).Attributes().PutStr("attr1", "pass") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(1).Histogram().DataPoints().At(1).Attributes().PutStr("attr1", "pass") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(2).ExponentialHistogram().DataPoints().At(0).Attributes().PutStr("attr1", "pass") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(2).ExponentialHistogram().DataPoints().At(1).Attributes().PutStr("attr1", "pass") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(3).Summary().DataPoints().At(0).Attributes().PutStr("attr1", "pass") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(4).Sum().DataPoints().At(0).Attributes().PutStr("attr1", "pass") + }, + }, + { + statements: []string{`replace_all_patterns(datapoint.attributes, "value", "test1", "pass")`}, + want: func(td pmetric.Metrics) { + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(0).Attributes().PutStr("attr1", "pass") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(1).Attributes().PutStr("attr1", "pass") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(1).Histogram().DataPoints().At(0).Attributes().PutStr("attr1", "pass") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(1).Histogram().DataPoints().At(1).Attributes().PutStr("attr1", "pass") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(2).ExponentialHistogram().DataPoints().At(0).Attributes().PutStr("attr1", "pass") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(2).ExponentialHistogram().DataPoints().At(1).Attributes().PutStr("attr1", "pass") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(3).Summary().DataPoints().At(0).Attributes().PutStr("attr1", "pass") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(4).Sum().DataPoints().At(0).Attributes().PutStr("attr1", "pass") + }, + }, + { + statements: []string{`replace_all_patterns(datapoint.attributes, "key", "attr3", "attr4")`}, + want: func(td pmetric.Metrics) { + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(0).Attributes().Clear() + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(0).Attributes().PutStr("attr1", "test1") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(0).Attributes().PutStr("attr2", "test2") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(0).Attributes().PutStr("attr4", "test3") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(0).Attributes().PutStr("flags", "A|B|C") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(0).Attributes().PutStr("total.string", "123456789") + + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(1).Attributes().Clear() + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(1).Attributes().PutStr("attr1", "test1") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(1).Attributes().PutStr("attr2", "test2") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(1).Attributes().PutStr("attr4", "test3") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(1).Attributes().PutStr("flags", "A|B|C") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(1).Attributes().PutStr("total.string", "123456789") + + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(1).Histogram().DataPoints().At(0).Attributes().Clear() + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(1).Histogram().DataPoints().At(0).Attributes().PutStr("attr1", "test1") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(1).Histogram().DataPoints().At(0).Attributes().PutStr("attr2", "test2") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(1).Histogram().DataPoints().At(0).Attributes().PutStr("attr4", "test3") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(1).Histogram().DataPoints().At(0).Attributes().PutStr("flags", "C|D") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(1).Histogram().DataPoints().At(0).Attributes().PutStr("total.string", "345678") + + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(1).Histogram().DataPoints().At(1).Attributes().Clear() + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(1).Histogram().DataPoints().At(1).Attributes().PutStr("attr1", "test1") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(1).Histogram().DataPoints().At(1).Attributes().PutStr("attr2", "test2") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(1).Histogram().DataPoints().At(1).Attributes().PutStr("attr4", "test3") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(1).Histogram().DataPoints().At(1).Attributes().PutStr("flags", "C|D") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(1).Histogram().DataPoints().At(1).Attributes().PutStr("total.string", "345678") + + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(2).ExponentialHistogram().DataPoints().At(0).Attributes().Clear() + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(2).ExponentialHistogram().DataPoints().At(0).Attributes().PutStr("attr1", "test1") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(2).ExponentialHistogram().DataPoints().At(0).Attributes().PutStr("attr2", "test2") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(2).ExponentialHistogram().DataPoints().At(0).Attributes().PutStr("attr4", "test3") + + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(2).ExponentialHistogram().DataPoints().At(1).Attributes().Clear() + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(2).ExponentialHistogram().DataPoints().At(1).Attributes().PutStr("attr1", "test1") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(2).ExponentialHistogram().DataPoints().At(1).Attributes().PutStr("attr2", "test2") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(2).ExponentialHistogram().DataPoints().At(1).Attributes().PutStr("attr4", "test3") + + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(3).Summary().DataPoints().At(0).Attributes().Clear() + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(3).Summary().DataPoints().At(0).Attributes().PutStr("attr1", "test1") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(3).Summary().DataPoints().At(0).Attributes().PutStr("attr2", "test2") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(3).Summary().DataPoints().At(0).Attributes().PutStr("attr4", "test3") + }, + }, + { + statements: []string{`convert_summary_count_val_to_sum("delta", true) where metric.name == "operationD"`}, + want: func(td pmetric.Metrics) { + sumMetric := td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().AppendEmpty() + sumDp := sumMetric.SetEmptySum().DataPoints().AppendEmpty() + + summaryMetric := pmetric.NewMetric() + fillMetricFour(summaryMetric) + summaryDp := summaryMetric.Summary().DataPoints().At(0) + + sumMetric.SetDescription(summaryMetric.Description()) + sumMetric.SetName(summaryMetric.Name() + "_count") + sumMetric.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + sumMetric.Sum().SetIsMonotonic(true) + sumMetric.SetUnit(summaryMetric.Unit()) + + summaryDp.Attributes().CopyTo(sumDp.Attributes()) + sumDp.SetIntValue(int64(summaryDp.Count())) + sumDp.SetStartTimestamp(StartTimestamp) + sumDp.SetTimestamp(TestTimeStamp) + }, + }, + { + statements: []string{`convert_summary_sum_val_to_sum("delta", true) where metric.name == "operationD"`}, + want: func(td pmetric.Metrics) { + sumMetric := td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().AppendEmpty() + sumDp := sumMetric.SetEmptySum().DataPoints().AppendEmpty() + + summaryMetric := pmetric.NewMetric() + fillMetricFour(summaryMetric) + summaryDp := summaryMetric.Summary().DataPoints().At(0) + + sumMetric.SetDescription(summaryMetric.Description()) + sumMetric.SetName(summaryMetric.Name() + "_sum") + sumMetric.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + sumMetric.Sum().SetIsMonotonic(true) + sumMetric.SetUnit(summaryMetric.Unit()) + + summaryDp.Attributes().CopyTo(sumDp.Attributes()) + sumDp.SetDoubleValue(summaryDp.Sum()) + sumDp.SetStartTimestamp(StartTimestamp) + sumDp.SetTimestamp(TestTimeStamp) + }, + }, + { + statements: []string{ + `convert_summary_sum_val_to_sum("delta", true) where metric.name == "operationD"`, + `set(metric.unit, "new unit")`, + }, + want: func(td pmetric.Metrics) { + sumMetric := td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().AppendEmpty() + sumDp := sumMetric.SetEmptySum().DataPoints().AppendEmpty() + + summaryMetric := pmetric.NewMetric() + fillMetricFour(summaryMetric) + summaryDp := summaryMetric.Summary().DataPoints().At(0) + + sumMetric.SetDescription(summaryMetric.Description()) + sumMetric.SetName(summaryMetric.Name() + "_sum") + sumMetric.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + sumMetric.Sum().SetIsMonotonic(true) + sumMetric.SetUnit("new unit") + + summaryDp.Attributes().CopyTo(sumDp.Attributes()) + sumDp.SetDoubleValue(summaryDp.Sum()) + sumDp.SetStartTimestamp(StartTimestamp) + sumDp.SetTimestamp(TestTimeStamp) + + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).SetUnit("new unit") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(1).SetUnit("new unit") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(2).SetUnit("new unit") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(3).SetUnit("new unit") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(4).SetUnit("new unit") + }, + }, + { + statements: []string{`set(datapoint.attributes["test"], "pass") where IsMatch(metric.name, "operation[AC]")`}, + want: func(td pmetric.Metrics) { + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(0).Attributes().PutStr("test", "pass") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(1).Attributes().PutStr("test", "pass") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(2).ExponentialHistogram().DataPoints().At(0).Attributes().PutStr("test", "pass") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(2).ExponentialHistogram().DataPoints().At(1).Attributes().PutStr("test", "pass") + }, + }, + { + statements: []string{`delete_key(datapoint.attributes, "attr3") where metric.name == "operationA"`}, + want: func(td pmetric.Metrics) { + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(0).Attributes().Clear() + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(0).Attributes().PutStr("attr1", "test1") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(0).Attributes().PutStr("attr2", "test2") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(0).Attributes().PutStr("total.string", "123456789") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(0).Attributes().PutStr("flags", "A|B|C") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(1).Attributes().Clear() + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(1).Attributes().PutStr("attr1", "test1") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(1).Attributes().PutStr("attr2", "test2") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(1).Attributes().PutStr("total.string", "123456789") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(1).Attributes().PutStr("flags", "A|B|C") + }, + }, + { + statements: []string{`delete_matching_keys(datapoint.attributes, "[23]") where metric.name == "operationA"`}, + want: func(td pmetric.Metrics) { + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(0).Attributes().Clear() + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(0).Attributes().PutStr("attr1", "test1") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(0).Attributes().PutStr("flags", "A|B|C") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(0).Attributes().PutStr("total.string", "123456789") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(1).Attributes().Clear() + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(1).Attributes().PutStr("attr1", "test1") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(1).Attributes().PutStr("flags", "A|B|C") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(1).Attributes().PutStr("total.string", "123456789") + }, + }, + { + statements: []string{`set(datapoint.attributes["test"], Concat([datapoint.attributes["attr1"], datapoint.attributes["attr2"]], "-")) where metric.name == Concat(["operation", "A"], "")`}, + want: func(td pmetric.Metrics) { + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(0).Attributes().PutStr("test", "test1-test2") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(1).Attributes().PutStr("test", "test1-test2") + }, + }, + { + statements: []string{`set(datapoint.attributes["test"], Split(datapoint.attributes["flags"], "|"))`}, + want: func(td pmetric.Metrics) { + v00 := td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(0).Attributes().PutEmptySlice("test") + v00.AppendEmpty().SetStr("A") + v00.AppendEmpty().SetStr("B") + v00.AppendEmpty().SetStr("C") + v01 := td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(1).Attributes().PutEmptySlice("test") + v01.AppendEmpty().SetStr("A") + v01.AppendEmpty().SetStr("B") + v01.AppendEmpty().SetStr("C") + v10 := td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(1).Histogram().DataPoints().At(0).Attributes().PutEmptySlice("test") + v10.AppendEmpty().SetStr("C") + v10.AppendEmpty().SetStr("D") + v11 := td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(1).Histogram().DataPoints().At(1).Attributes().PutEmptySlice("test") + v11.AppendEmpty().SetStr("C") + v11.AppendEmpty().SetStr("D") + }, + }, + { + statements: []string{`set(datapoint.attributes["test"], Split(datapoint.attributes["flags"], "|")) where metric.name == "operationA"`}, + want: func(td pmetric.Metrics) { + v00 := td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(0).Attributes().PutEmptySlice("test") + v00.AppendEmpty().SetStr("A") + v00.AppendEmpty().SetStr("B") + v00.AppendEmpty().SetStr("C") + v01 := td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(1).Attributes().PutEmptySlice("test") + v01.AppendEmpty().SetStr("A") + v01.AppendEmpty().SetStr("B") + v01.AppendEmpty().SetStr("C") + }, + }, + { + statements: []string{`set(datapoint.attributes["test"], Split(datapoint.attributes["not_exist"], "|"))`}, + want: func(_ pmetric.Metrics) {}, + }, + { + statements: []string{`set(datapoint.attributes["test"], Substring(datapoint.attributes["total.string"], 3, 3))`}, + want: func(td pmetric.Metrics) { + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(0).Attributes().PutStr("test", "456") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(1).Attributes().PutStr("test", "456") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(1).Histogram().DataPoints().At(0).Attributes().PutStr("test", "678") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(1).Histogram().DataPoints().At(1).Attributes().PutStr("test", "678") + }, + }, + { + statements: []string{`set(datapoint.attributes["test"], Substring(datapoint.attributes["total.string"], 3, 3)) where metric.name == "operationA"`}, + want: func(td pmetric.Metrics) { + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(0).Attributes().PutStr("test", "456") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(1).Attributes().PutStr("test", "456") + }, + }, + { + statements: []string{`set(datapoint.attributes["test"], Substring(datapoint.attributes["not_exist"], 3, 3))`}, + want: func(_ pmetric.Metrics) {}, + }, + { + statements: []string{ + `set(datapoint.attributes["test_lower"], ConvertCase(metric.name, "lower")) where metric.name == "operationA"`, + `set(datapoint.attributes["test_upper"], ConvertCase(metric.name, "upper")) where metric.name == "operationA"`, + `set(datapoint.attributes["test_snake"], ConvertCase(metric.name, "snake")) where metric.name == "operationA"`, + `set(datapoint.attributes["test_camel"], ConvertCase(metric.name, "camel")) where metric.name == "operationA"`, + }, + want: func(td pmetric.Metrics) { + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(0).Attributes().PutStr("test_lower", "operationa") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(1).Attributes().PutStr("test_lower", "operationa") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(0).Attributes().PutStr("test_upper", "OPERATIONA") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(1).Attributes().PutStr("test_upper", "OPERATIONA") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(0).Attributes().PutStr("test_snake", "operation_a") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(1).Attributes().PutStr("test_snake", "operation_a") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(0).Attributes().PutStr("test_camel", "OperationA") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(1).Attributes().PutStr("test_camel", "OperationA") + }, + }, + { + statements: []string{`set(datapoint.attributes["test"], ["A", "B", "C"]) where metric.name == "operationA"`}, + want: func(td pmetric.Metrics) { + v00 := td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(0).Attributes().PutEmptySlice("test") + v00.AppendEmpty().SetStr("A") + v00.AppendEmpty().SetStr("B") + v00.AppendEmpty().SetStr("C") + v01 := td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(1).Attributes().PutEmptySlice("test") + v01.AppendEmpty().SetStr("A") + v01.AppendEmpty().SetStr("B") + v01.AppendEmpty().SetStr("C") + }, + }, + { + statements: []string{`merge_maps(datapoint.attributes, ParseJSON("{\"json_test\":\"pass\"}"), "insert") where metric.name == "operationA"`}, + want: func(td pmetric.Metrics) { + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(0).Attributes().PutStr("json_test", "pass") + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(1).Attributes().PutStr("json_test", "pass") + }, + }, + { + statements: []string{`limit(datapoint.attributes, 0, []) where metric.name == "operationA"`}, + want: func(td pmetric.Metrics) { + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(0).Attributes().RemoveIf(func(_ string, _ pcommon.Value) bool { return true }) + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(1).Attributes().RemoveIf(func(_ string, _ pcommon.Value) bool { return true }) + }, + }, + { + statements: []string{`set(datapoint.attributes["test"], Log(1)) where metric.name == "operationA"`}, + want: func(td pmetric.Metrics) { + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(0).Attributes().PutDouble("test", 0.0) + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(1).Attributes().PutDouble("test", 0.0) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.statements[0], func(t *testing.T) { + td := constructMetrics() + var contextStatements []common.ContextStatements + for _, statement := range tt.statements { + contextStatements = append(contextStatements, common.ContextStatements{Context: "", Statements: []string{statement}}) + } + + processor, err := NewProcessor(contextStatements, ottl.IgnoreError, componenttest.NewNopTelemetrySettings()) + assert.NoError(t, err) + + _, err = processor.ProcessMetrics(context.Background(), td) + assert.NoError(t, err) + + exTd := constructMetrics() + tt.want(exTd) + + assert.Equal(t, exTd, td) + }) + } +} + func Test_ProcessMetrics_MixContext(t *testing.T) { tests := []struct { name string @@ -862,7 +1572,7 @@ func Test_ProcessMetrics_MixContext(t *testing.T) { } } -func Test_ProcessMetrics_Error(t *testing.T) { +func Test_ProcessMetrics_ErrorMode(t *testing.T) { tests := []struct { statement string context common.ContextID diff --git a/processor/transformprocessor/internal/traces/processor.go b/processor/transformprocessor/internal/traces/processor.go index e20c87880ce3..6af07a4a942e 100644 --- a/processor/transformprocessor/internal/traces/processor.go +++ b/processor/transformprocessor/internal/traces/processor.go @@ -7,7 +7,6 @@ import ( "context" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/multierr" "go.uber.org/zap" @@ -17,7 +16,7 @@ import ( ) type Processor struct { - contexts []consumer.Traces + contexts []common.TracesConsumer logger *zap.Logger } @@ -27,7 +26,7 @@ func NewProcessor(contextStatements []common.ContextStatements, errorMode ottl.E return nil, err } - contexts := make([]consumer.Traces, len(contextStatements)) + contexts := make([]common.TracesConsumer, len(contextStatements)) var errors error for i, cs := range contextStatements { context, err := pc.ParseContextStatements(cs) @@ -49,7 +48,7 @@ func NewProcessor(contextStatements []common.ContextStatements, errorMode ottl.E func (p *Processor) ProcessTraces(ctx context.Context, td ptrace.Traces) (ptrace.Traces, error) { for _, c := range p.contexts { - err := c.ConsumeTraces(ctx, td) + err := c.ConsumeTraces(ctx, td, nil) if err != nil { p.logger.Error("failed processing traces", zap.Error(err)) return td, err diff --git a/processor/transformprocessor/internal/traces/processor_test.go b/processor/transformprocessor/internal/traces/processor_test.go index 0da86dfeb262..9ac79c8a2e4b 100644 --- a/processor/transformprocessor/internal/traces/processor_test.go +++ b/processor/transformprocessor/internal/traces/processor_test.go @@ -70,6 +70,47 @@ func Test_ProcessTraces_ResourceContext(t *testing.T) { } } +func Test_ProcessTraces_InferredResourceContext(t *testing.T) { + tests := []struct { + statement string + want func(td ptrace.Traces) + }{ + { + statement: `set(resource.attributes["test"], "pass")`, + want: func(td ptrace.Traces) { + td.ResourceSpans().At(0).Resource().Attributes().PutStr("test", "pass") + }, + }, + { + statement: `set(resource.attributes["test"], "pass") where resource.attributes["host.name"] == "wrong"`, + want: func(_ ptrace.Traces) { + }, + }, + { + statement: `set(resource.schema_url, "test_schema_url")`, + want: func(td ptrace.Traces) { + td.ResourceSpans().At(0).SetSchemaUrl("test_schema_url") + }, + }, + } + + for _, tt := range tests { + t.Run(tt.statement, func(t *testing.T) { + td := constructTraces() + processor, err := NewProcessor([]common.ContextStatements{{Context: "", Statements: []string{tt.statement}}}, ottl.IgnoreError, componenttest.NewNopTelemetrySettings()) + assert.NoError(t, err) + + _, err = processor.ProcessTraces(context.Background(), td) + assert.NoError(t, err) + + exTd := constructTraces() + tt.want(exTd) + + assert.Equal(t, exTd, td) + }) + } +} + func Test_ProcessTraces_ScopeContext(t *testing.T) { tests := []struct { statement string @@ -111,6 +152,47 @@ func Test_ProcessTraces_ScopeContext(t *testing.T) { } } +func Test_ProcessTraces_InferredScopeContext(t *testing.T) { + tests := []struct { + statement string + want func(td ptrace.Traces) + }{ + { + statement: `set(scope.attributes["test"], "pass") where scope.name == "scope"`, + want: func(td ptrace.Traces) { + td.ResourceSpans().At(0).ScopeSpans().At(0).Scope().Attributes().PutStr("test", "pass") + }, + }, + { + statement: `set(scope.attributes["test"], "pass") where scope.version == 2`, + want: func(_ ptrace.Traces) { + }, + }, + { + statement: `set(scope.schema_url, "test_schema_url")`, + want: func(td ptrace.Traces) { + td.ResourceSpans().At(0).ScopeSpans().At(0).SetSchemaUrl("test_schema_url") + }, + }, + } + + for _, tt := range tests { + t.Run(tt.statement, func(t *testing.T) { + td := constructTraces() + processor, err := NewProcessor([]common.ContextStatements{{Context: "", Statements: []string{tt.statement}}}, ottl.IgnoreError, componenttest.NewNopTelemetrySettings()) + assert.NoError(t, err) + + _, err = processor.ProcessTraces(context.Background(), td) + assert.NoError(t, err) + + exTd := constructTraces() + tt.want(exTd) + + assert.Equal(t, exTd, td) + }) + } +} + func Test_ProcessTraces_TraceContext(t *testing.T) { tests := []struct { statement string @@ -411,6 +493,306 @@ func Test_ProcessTraces_TraceContext(t *testing.T) { } } +func Test_ProcessTraces_InferredTraceContext(t *testing.T) { + tests := []struct { + statement string + want func(td ptrace.Traces) + }{ + { + statement: `set(span.attributes["test"], "pass") where span.name == "operationA"`, + want: func(td ptrace.Traces) { + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes().PutStr("test", "pass") + }, + }, + { + statement: `set(span.attributes["test"], "pass") where resource.attributes["host.name"] == "localhost"`, + want: func(td ptrace.Traces) { + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes().PutStr("test", "pass") + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(1).Attributes().PutStr("test", "pass") + }, + }, + { + statement: `keep_keys(span.attributes, ["http.method"]) where span.name == "operationA"`, + want: func(td ptrace.Traces) { + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes().Clear() + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes().PutStr("http.method", "get") + }, + }, + { + statement: `set(span.status.code, 1) where span.attributes["http.path"] == "/health"`, + want: func(td ptrace.Traces) { + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Status().SetCode(ptrace.StatusCodeOk) + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(1).Status().SetCode(ptrace.StatusCodeOk) + }, + }, + { + statement: `set(span.attributes["test"], "pass") where span.dropped_attributes_count == 1`, + want: func(td ptrace.Traces) { + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes().PutStr("test", "pass") + }, + }, + { + statement: `set(span.attributes["test"], "pass") where span.dropped_events_count == 1`, + want: func(td ptrace.Traces) { + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes().PutStr("test", "pass") + }, + }, + { + statement: `set(span.attributes["test"], "pass") where span.dropped_links_count == 1`, + want: func(td ptrace.Traces) { + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes().PutStr("test", "pass") + }, + }, + { + statement: `set(span.attributes["test"], "pass") where span.span_id == SpanID(0x0102030405060708)`, + want: func(td ptrace.Traces) { + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes().PutStr("test", "pass") + }, + }, + { + statement: `set(span.attributes["test"], "pass") where span.parent_span_id == SpanID(0x0807060504030201)`, + want: func(td ptrace.Traces) { + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes().PutStr("test", "pass") + }, + }, + { + statement: `set(span.attributes["test"], "pass") where span.trace_id == TraceID(0x0102030405060708090a0b0c0d0e0f10)`, + want: func(td ptrace.Traces) { + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes().PutStr("test", "pass") + }, + }, + { + statement: `set(span.attributes["test"], "pass") where span.trace_state == "new"`, + want: func(td ptrace.Traces) { + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes().PutStr("test", "pass") + }, + }, + { + statement: `replace_pattern(span.attributes["http.method"], "get", "post")`, + want: func(td ptrace.Traces) { + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes().PutStr("http.method", "post") + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(1).Attributes().PutStr("http.method", "post") + }, + }, + { + statement: `replace_all_patterns(span.attributes, "value", "get", "post")`, + want: func(td ptrace.Traces) { + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes().PutStr("http.method", "post") + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(1).Attributes().PutStr("http.method", "post") + }, + }, + { + statement: `replace_all_patterns(span.attributes, "key", "http.url", "url")`, + want: func(td ptrace.Traces) { + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes().Clear() + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes().PutStr("http.method", "get") + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes().PutStr("http.path", "/health") + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes().PutStr("url", "http://localhost/health") + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes().PutStr("flags", "A|B|C") + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes().PutStr("total.string", "123456789") + + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(1).Attributes().Clear() + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(1).Attributes().PutStr("http.method", "get") + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(1).Attributes().PutStr("http.path", "/health") + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(1).Attributes().PutStr("url", "http://localhost/health") + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(1).Attributes().PutStr("flags", "C|D") + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(1).Attributes().PutStr("total.string", "345678") + }, + }, + { + statement: `set(span.attributes["test"], "pass") where IsMatch(span.name, "operation[AC]")`, + want: func(td ptrace.Traces) { + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes().PutStr("test", "pass") + }, + }, + { + statement: `set(span.attributes["test"], "pass") where span.attributes["doesnt exist"] == nil`, + want: func(td ptrace.Traces) { + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes().PutStr("test", "pass") + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(1).Attributes().PutStr("test", "pass") + }, + }, + { + statement: `delete_key(span.attributes, "http.url") where span.name == "operationA"`, + want: func(td ptrace.Traces) { + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes().Clear() + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes().PutStr("http.method", "get") + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes().PutStr("http.path", "/health") + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes().PutStr("total.string", "123456789") + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes().PutStr("flags", "A|B|C") + }, + }, + { + statement: `delete_matching_keys(span.attributes, "http.*t.*") where span.name == "operationA"`, + want: func(td ptrace.Traces) { + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes().Clear() + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes().PutStr("http.url", "http://localhost/health") + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes().PutStr("flags", "A|B|C") + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes().PutStr("total.string", "123456789") + }, + }, + { + statement: `set(span.attributes["test"], "pass") where span.kind == SPAN_KIND_INTERNAL`, + want: func(td ptrace.Traces) { + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes().PutStr("test", "pass") + }, + }, + { + statement: `set(span.kind, SPAN_KIND_SERVER) where span.kind == 1`, + want: func(td ptrace.Traces) { + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).SetKind(2) + }, + }, + { + statement: `set(span.attributes["test"], Concat([span.attributes["http.method"], span.attributes["http.url"]], ": "))`, + want: func(td ptrace.Traces) { + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes().PutStr("test", "get: http://localhost/health") + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(1).Attributes().PutStr("test", "get: http://localhost/health") + }, + }, + { + statement: `set(span.attributes["test"], Concat([span.attributes["http.method"], ": ", span.attributes["http.url"]], ""))`, + want: func(td ptrace.Traces) { + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes().PutStr("test", "get: http://localhost/health") + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(1).Attributes().PutStr("test", "get: http://localhost/health") + }, + }, + { + statement: `set(span.attributes["test"], Concat([span.attributes["http.method"], span.attributes["http.url"]], ": ")) where span.name == Concat(["operation", "A"], "")`, + want: func(td ptrace.Traces) { + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes().PutStr("test", "get: http://localhost/health") + }, + }, + { + statement: `set(span.attributes["kind"], Concat(["kind", ": ", span.kind], "")) where span.kind == 1`, + want: func(td ptrace.Traces) { + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes().PutStr("kind", "kind: 1") + }, + }, + { + statement: `set(span.attributes["test"], Split(span.attributes["flags"], "|"))`, + want: func(td ptrace.Traces) { + v1 := td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes().PutEmptySlice("test") + v1.AppendEmpty().SetStr("A") + v1.AppendEmpty().SetStr("B") + v1.AppendEmpty().SetStr("C") + v2 := td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(1).Attributes().PutEmptySlice("test") + v2.AppendEmpty().SetStr("C") + v2.AppendEmpty().SetStr("D") + }, + }, + { + statement: `set(span.attributes["test"], Split(span.attributes["flags"], "|")) where span.name == "operationA"`, + want: func(td ptrace.Traces) { + v1 := td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes().PutEmptySlice("test") + v1.AppendEmpty().SetStr("A") + v1.AppendEmpty().SetStr("B") + v1.AppendEmpty().SetStr("C") + }, + }, + { + statement: `set(span.attributes["test"], Split(span.attributes["not_exist"], "|"))`, + want: func(_ ptrace.Traces) {}, + }, + { + statement: `set(span.attributes["test"], Substring(span.attributes["total.string"], 3, 3))`, + want: func(td ptrace.Traces) { + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes().PutStr("test", "456") + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(1).Attributes().PutStr("test", "678") + }, + }, + { + statement: `set(span.attributes["test"], Substring(span.attributes["total.string"], 3, 3)) where span.name == "operationA"`, + want: func(td ptrace.Traces) { + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes().PutStr("test", "456") + }, + }, + { + statement: `set(span.attributes["test"], Substring(span.attributes["not_exist"], 3, 3))`, + want: func(_ ptrace.Traces) {}, + }, + { + statement: `set(span.attributes["test"], ["A", "B", "C"]) where span.name == "operationA"`, + want: func(td ptrace.Traces) { + v1 := td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes().PutEmptySlice("test") + v1.AppendEmpty().SetStr("A") + v1.AppendEmpty().SetStr("B") + v1.AppendEmpty().SetStr("C") + }, + }, + { + statement: `set(span.attributes["entrypoint"], span.name) where span.parent_span_id == SpanID(0x0000000000000000)`, + want: func(td ptrace.Traces) { + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(1).Attributes().PutStr("entrypoint", "operationB") + }, + }, + { + statement: `set(span.attributes["entrypoint-root"], span.name) where IsRootSpan()`, + want: func(td ptrace.Traces) { + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(1).Attributes().PutStr("entrypoint-root", "operationB") + }, + }, + { + statement: `set(span.attributes["test"], ConvertCase(span.name, "lower")) where span.name == "operationA"`, + want: func(td ptrace.Traces) { + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes().PutStr("test", "operationa") + }, + }, + { + statement: `set(span.attributes["test"], ConvertCase(span.name, "upper")) where span.name == "operationA"`, + want: func(td ptrace.Traces) { + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes().PutStr("test", "OPERATIONA") + }, + }, + { + statement: `set(span.attributes["test"], ConvertCase(span.name, "snake")) where span.name == "operationA"`, + want: func(td ptrace.Traces) { + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes().PutStr("test", "operation_a") + }, + }, + { + statement: `set(span.attributes["test"], ConvertCase(span.name, "camel")) where span.name == "operationA"`, + want: func(td ptrace.Traces) { + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes().PutStr("test", "OperationA") + }, + }, + { + statement: `merge_maps(span.attributes, ParseJSON("{\"json_test\":\"pass\"}"), "insert") where span.name == "operationA"`, + want: func(td ptrace.Traces) { + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes().PutStr("json_test", "pass") + }, + }, + { + statement: `limit(span.attributes, 0, []) where span.name == "operationA"`, + want: func(td ptrace.Traces) { + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes().RemoveIf(func(_ string, _ pcommon.Value) bool { return true }) + }, + }, + { + statement: `set(span.attributes["test"], Log(1)) where span.name == "operationA"`, + want: func(td ptrace.Traces) { + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes().PutDouble("test", 0.0) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.statement, func(t *testing.T) { + td := constructTraces() + processor, err := NewProcessor([]common.ContextStatements{{Context: "", Statements: []string{tt.statement}}}, ottl.IgnoreError, componenttest.NewNopTelemetrySettings()) + assert.NoError(t, err) + + _, err = processor.ProcessTraces(context.Background(), td) + assert.NoError(t, err) + + exTd := constructTraces() + tt.want(exTd) + + assert.Equal(t, exTd, td) + }) + } +} + func Test_ProcessTraces_SpanEventContext(t *testing.T) { tests := []struct { statement string @@ -441,6 +823,36 @@ func Test_ProcessTraces_SpanEventContext(t *testing.T) { } } +func Test_ProcessTraces_InferredSpanEventContext(t *testing.T) { + tests := []struct { + statement string + want func(td ptrace.Traces) + }{ + { + statement: `set(spanevent.attributes["test"], "pass") where spanevent.name == "eventA"`, + want: func(td ptrace.Traces) { + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Events().At(0).Attributes().PutStr("test", "pass") + }, + }, + } + + for _, tt := range tests { + t.Run(tt.statement, func(t *testing.T) { + td := constructTraces() + processor, err := NewProcessor([]common.ContextStatements{{Context: "", Statements: []string{tt.statement}}}, ottl.IgnoreError, componenttest.NewNopTelemetrySettings()) + assert.NoError(t, err) + + _, err = processor.ProcessTraces(context.Background(), td) + assert.NoError(t, err) + + exTd := constructTraces() + tt.want(exTd) + + assert.Equal(t, exTd, td) + }) + } +} + func Test_ProcessTraces_MixContext(t *testing.T) { tests := []struct { name string @@ -558,7 +970,7 @@ func Test_ProcessTraces_MixContext(t *testing.T) { } } -func Test_ProcessTraces_Error(t *testing.T) { +func Test_ProcessTraces_ErrorMode(t *testing.T) { tests := []struct { statement string context common.ContextID diff --git a/processor/transformprocessor/testdata/config.yaml b/processor/transformprocessor/testdata/config.yaml index 8cf295298e54..c327eeb3a2ef 100644 --- a/processor/transformprocessor/testdata/config.yaml +++ b/processor/transformprocessor/testdata/config.yaml @@ -118,3 +118,31 @@ transform/unknown_context: transform/unknown_error_mode: error_mode: test + +transform/structured_configuration_with_path_context: + trace_statements: + - context: span + statements: + - set(span.name, "bear") where span.attributes["http.path"] == "/animal" + metric_statements: + - context: metric + statements: + - set(metric.name, "bear") where resource.attributes["http.path"] == "/animal" + log_statements: + - context: log + statements: + - set(log.body, "bear") where log.attributes["http.path"] == "/animal" + +transform/structured_configuration_with_inferred_context: + trace_statements: + - statements: + - set(span.name, "bear") where span.attributes["http.path"] == "/animal" + - set(resource.attributes["name"], "bear") + metric_statements: + - statements: + - set(metric.name, "bear") where resource.attributes["http.path"] == "/animal" + - set(resource.attributes["name"], "bear") + log_statements: + - statements: + - set(log.body, "bear") where log.attributes["http.path"] == "/animal" + - set(resource.attributes["name"], "bear") From 231aa7f8cf7c3449b8cd353a3564a761a09d64bd Mon Sep 17 00:00:00 2001 From: Alex Boten <223565+codeboten@users.noreply.github.com> Date: Wed, 22 Jan 2025 12:18:45 -0800 Subject: [PATCH 03/12] [cmd/opampsupervisor] report own metrics via OTLP (#37346) --- .../codeboten_opampsupervisor-emit-otlp.yaml | 27 ++++++++++ .../supervisor/supervisor_test.go | 53 +++++-------------- .../supervisor/templates/owntelemetry.yaml | 23 +++----- 3 files changed, 47 insertions(+), 56 deletions(-) create mode 100644 .chloggen/codeboten_opampsupervisor-emit-otlp.yaml diff --git a/.chloggen/codeboten_opampsupervisor-emit-otlp.yaml b/.chloggen/codeboten_opampsupervisor-emit-otlp.yaml new file mode 100644 index 000000000000..8b1d4227e16a --- /dev/null +++ b/.chloggen/codeboten_opampsupervisor-emit-otlp.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: opampsupervisor + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: report own metrics via OTLP instead of prometheus receiver + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [37346] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/cmd/opampsupervisor/supervisor/supervisor_test.go b/cmd/opampsupervisor/supervisor/supervisor_test.go index fcd132a16da6..0ea373a32778 100644 --- a/cmd/opampsupervisor/supervisor/supervisor_test.go +++ b/cmd/opampsupervisor/supervisor/supervisor_test.go @@ -364,7 +364,6 @@ func Test_onMessage(t *testing.T) { require.Equal(t, newID, s.persistentState.InstanceID) t.Log(s.cfgState.Load()) mergedCfg := s.cfgState.Load().(*configState).mergedConfig - require.Contains(t, mergedCfg, "prometheus/own_metrics") require.Contains(t, mergedCfg, newID.String()) require.Contains(t, mergedCfg, "runtime.type: test") }) @@ -1129,27 +1128,16 @@ func TestSupervisor_setupOwnMetrics(t *testing.T) { DestinationEndpoint: "localhost", }) - expectedOwnMetricsSection := `receivers: - # Collect own metrics - prometheus/own_metrics: - config: - scrape_configs: - - job_name: 'otel-collector' - scrape_interval: 10s - static_configs: - - targets: ['0.0.0.0:55555'] -exporters: - otlphttp/own_metrics: - metrics_endpoint: "localhost" - + expectedOwnMetricsSection := ` service: telemetry: metrics: - address: ":55555" - pipelines: - metrics/own_metrics: - receivers: [prometheus/own_metrics] - exporters: [otlphttp/own_metrics] + readers: + - periodic: + exporter: + otlp: + protocol: http/protobuf + endpoint: localhost ` assert.True(t, configChanged) @@ -1209,10 +1197,7 @@ func TestSupervisor_loadAndWriteInitialMergedConfig(t *testing.T) { debug/remote: ` - const expectedMergedConfig = `exporters: - otlphttp/own_metrics: - metrics_endpoint: localhost -extensions: + const expectedMergedConfig = `extensions: health_check: endpoint: "" opamp: @@ -1226,30 +1211,20 @@ extensions: insecure: true receiver: debug/remote: null -receivers: - prometheus/own_metrics: - config: - scrape_configs: - - job_name: otel-collector - scrape_interval: 10s - static_configs: - - targets: - - 0.0.0.0:55555 service: extensions: - health_check - opamp - pipelines: - metrics/own_metrics: - exporters: - - otlphttp/own_metrics - receivers: - - prometheus/own_metrics telemetry: logs: encoding: json metrics: - address: :55555 + readers: + - periodic: + exporter: + otlp: + endpoint: localhost + protocol: http/protobuf resource: service.name: otelcol ` diff --git a/cmd/opampsupervisor/supervisor/templates/owntelemetry.yaml b/cmd/opampsupervisor/supervisor/templates/owntelemetry.yaml index f46851298780..976e7d3f4037 100644 --- a/cmd/opampsupervisor/supervisor/templates/owntelemetry.yaml +++ b/cmd/opampsupervisor/supervisor/templates/owntelemetry.yaml @@ -1,21 +1,10 @@ -receivers: - # Collect own metrics - prometheus/own_metrics: - config: - scrape_configs: - - job_name: 'otel-collector' - scrape_interval: 10s - static_configs: - - targets: ['0.0.0.0:{{.PrometheusPort}}'] -exporters: - otlphttp/own_metrics: - metrics_endpoint: "{{.MetricsEndpoint}}" service: telemetry: metrics: - address: ":{{.PrometheusPort}}" - pipelines: - metrics/own_metrics: - receivers: [prometheus/own_metrics] - exporters: [otlphttp/own_metrics] + readers: + - periodic: + exporter: + otlp: + protocol: http/protobuf + endpoint: {{.MetricsEndpoint}} From 71e5ad51c8e4ffc432bd3caaedcbfb72bc9c5648 Mon Sep 17 00:00:00 2001 From: Antoine Toulme Date: Wed, 22 Jan 2025 14:14:54 -0800 Subject: [PATCH 04/12] [chore] fix allowlist (#37423) #### Description @jerrytfleung is now a member of the OpenTelemetry organization, and the allowlist check fails as he should not be present in the list anymore. Congrats to @jerrytfleung for his new membership! --- cmd/githubgen/allowlist.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/cmd/githubgen/allowlist.txt b/cmd/githubgen/allowlist.txt index 282f0f412d77..b5d97e94fa7c 100644 --- a/cmd/githubgen/allowlist.txt +++ b/cmd/githubgen/allowlist.txt @@ -10,7 +10,6 @@ harishbohara11 heitorganzeli Hemansh31 jcreixell -jerrytfleung jriguera KiranmayiB m1rp From 23117abe094a1ac61515ad153163fb156e19995f Mon Sep 17 00:00:00 2001 From: Tigran Najaryan <4194920+tigrannajaryan@users.noreply.github.com> Date: Wed, 22 Jan 2025 18:42:28 -0500 Subject: [PATCH 05/12] Add STEF exporter (#37400) Resolves https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/37352 This is the *First PR* done according to the requirements listed at https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#adding-new-components I intentionally left the Config barebones. It is possible we will add more config options in the future but I don't want to do it prematurely. Subsequent PRs will add the implementation and will clarify how it works in the README.md. --------- Co-authored-by: Antoine Toulme Co-authored-by: Dmitry Anoshin --- .github/CODEOWNERS | 1 + .github/ISSUE_TEMPLATE/bug_report.yaml | 1 + .github/ISSUE_TEMPLATE/feature_request.yaml | 1 + .github/ISSUE_TEMPLATE/other.yaml | 1 + .github/ISSUE_TEMPLATE/unmaintained.yaml | 1 + exporter/stefexporter/Makefile | 1 + exporter/stefexporter/README.md | 15 ++ exporter/stefexporter/config.go | 57 ++++++ exporter/stefexporter/config_test.go | 18 ++ exporter/stefexporter/doc.go | 7 + exporter/stefexporter/exporter.go | 30 +++ exporter/stefexporter/exporter_test.go | 4 + exporter/stefexporter/factory.go | 46 +++++ exporter/stefexporter/factory_test.go | 29 +++ .../stefexporter/generated_component_test.go | 134 ++++++++++++ .../stefexporter/generated_package_test.go | 13 ++ exporter/stefexporter/go.mod | 77 +++++++ exporter/stefexporter/go.sum | 190 ++++++++++++++++++ .../internal/metadata/generated_status.go | 16 ++ exporter/stefexporter/metadata.yaml | 13 ++ versions.yaml | 1 + 21 files changed, 656 insertions(+) create mode 100644 exporter/stefexporter/Makefile create mode 100644 exporter/stefexporter/README.md create mode 100644 exporter/stefexporter/config.go create mode 100644 exporter/stefexporter/config_test.go create mode 100644 exporter/stefexporter/doc.go create mode 100644 exporter/stefexporter/exporter.go create mode 100644 exporter/stefexporter/exporter_test.go create mode 100644 exporter/stefexporter/factory.go create mode 100644 exporter/stefexporter/factory_test.go create mode 100644 exporter/stefexporter/generated_component_test.go create mode 100644 exporter/stefexporter/generated_package_test.go create mode 100644 exporter/stefexporter/go.mod create mode 100644 exporter/stefexporter/go.sum create mode 100644 exporter/stefexporter/internal/metadata/generated_status.go create mode 100644 exporter/stefexporter/metadata.yaml diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 6ea4bb33db3b..ca402aeb52ce 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -80,6 +80,7 @@ exporter/sapmexporter/ @open-telemetry exporter/sentryexporter/ @open-telemetry/collector-contrib-approvers @AbhiPrasad exporter/signalfxexporter/ @open-telemetry/collector-contrib-approvers @dmitryax @crobert-1 exporter/splunkhecexporter/ @open-telemetry/collector-contrib-approvers @atoulme @dmitryax +exporter/stefexporter/ @open-telemetry/collector-contrib-approvers @tigrannajaryan @dmitryax exporter/sumologicexporter/ @open-telemetry/collector-contrib-approvers @rnishtala-sumo @chan-tim-sumo exporter/syslogexporter/ @open-telemetry/collector-contrib-approvers @kasia-kujawa @rnishtala-sumo @andrzej-stencel exporter/tencentcloudlogserviceexporter/ @open-telemetry/collector-contrib-approvers @wgliang diff --git a/.github/ISSUE_TEMPLATE/bug_report.yaml b/.github/ISSUE_TEMPLATE/bug_report.yaml index 920ad09c50af..580e28f0cd1b 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yaml +++ b/.github/ISSUE_TEMPLATE/bug_report.yaml @@ -81,6 +81,7 @@ body: - exporter/sentry - exporter/signalfx - exporter/splunkhec + - exporter/stef - exporter/sumologic - exporter/syslog - exporter/tencentcloudlogservice diff --git a/.github/ISSUE_TEMPLATE/feature_request.yaml b/.github/ISSUE_TEMPLATE/feature_request.yaml index 0bcdcf6429d3..64ba734ab373 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.yaml +++ b/.github/ISSUE_TEMPLATE/feature_request.yaml @@ -75,6 +75,7 @@ body: - exporter/sentry - exporter/signalfx - exporter/splunkhec + - exporter/stef - exporter/sumologic - exporter/syslog - exporter/tencentcloudlogservice diff --git a/.github/ISSUE_TEMPLATE/other.yaml b/.github/ISSUE_TEMPLATE/other.yaml index 945d7cf6a8de..a7ab6232949e 100644 --- a/.github/ISSUE_TEMPLATE/other.yaml +++ b/.github/ISSUE_TEMPLATE/other.yaml @@ -75,6 +75,7 @@ body: - exporter/sentry - exporter/signalfx - exporter/splunkhec + - exporter/stef - exporter/sumologic - exporter/syslog - exporter/tencentcloudlogservice diff --git a/.github/ISSUE_TEMPLATE/unmaintained.yaml b/.github/ISSUE_TEMPLATE/unmaintained.yaml index c816002a0cc6..120bf2c250d6 100644 --- a/.github/ISSUE_TEMPLATE/unmaintained.yaml +++ b/.github/ISSUE_TEMPLATE/unmaintained.yaml @@ -80,6 +80,7 @@ body: - exporter/sentry - exporter/signalfx - exporter/splunkhec + - exporter/stef - exporter/sumologic - exporter/syslog - exporter/tencentcloudlogservice diff --git a/exporter/stefexporter/Makefile b/exporter/stefexporter/Makefile new file mode 100644 index 000000000000..c1496226e590 --- /dev/null +++ b/exporter/stefexporter/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common \ No newline at end of file diff --git a/exporter/stefexporter/README.md b/exporter/stefexporter/README.md new file mode 100644 index 000000000000..fbbd24cd03bb --- /dev/null +++ b/exporter/stefexporter/README.md @@ -0,0 +1,15 @@ +# STEF Exporter + + +| Status | | +| ------------- |-----------| +| Stability | [development]: metrics | +| Distributions | [] | +| Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Aexporter%2Fstef%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Aexporter%2Fstef) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Aexporter%2Fstef%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Aexporter%2Fstef) | +| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@tigrannajaryan](https://www.github.com/tigrannajaryan), [@dmitryax](https://www.github.com/dmitryax) | + +[development]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#development + + +Export data via gRPC using +[Otel/STEF format](https://github.com/splunk/stef/tree/main/go/otel) format. diff --git a/exporter/stefexporter/config.go b/exporter/stefexporter/config.go new file mode 100644 index 000000000000..76d273150f43 --- /dev/null +++ b/exporter/stefexporter/config.go @@ -0,0 +1,57 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package stefexporter // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/stefexporter" + +import ( + "errors" + "fmt" + "net" + "regexp" + "strconv" + "strings" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configgrpc" +) + +// Config defines configuration for logging exporter. +type Config struct { + configgrpc.ClientConfig `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct. +} + +var _ component.Config = (*Config)(nil) + +// Validate checks if the exporter configuration is valid +func (c *Config) Validate() error { + endpoint := c.sanitizedEndpoint() + if endpoint == "" { + return errors.New(`requires a non-empty "endpoint"`) + } + + // Validate that the port is in the address + _, port, err := net.SplitHostPort(endpoint) + if err != nil { + return err + } + if _, err := strconv.Atoi(port); err != nil { + return fmt.Errorf(`invalid port "%s"`, port) + } + + return nil +} + +// TODO: move this to configgrpc.ClientConfig to avoid this code duplication (copied from OTLP exporter). +func (c *Config) sanitizedEndpoint() string { + switch { + case strings.HasPrefix(c.Endpoint, "http://"): + return strings.TrimPrefix(c.Endpoint, "http://") + case strings.HasPrefix(c.Endpoint, "https://"): + return strings.TrimPrefix(c.Endpoint, "https://") + case strings.HasPrefix(c.Endpoint, "dns://"): + r := regexp.MustCompile("^dns://[/]?") + return r.ReplaceAllString(c.Endpoint, "") + default: + return c.Endpoint + } +} diff --git a/exporter/stefexporter/config_test.go b/exporter/stefexporter/config_test.go new file mode 100644 index 000000000000..308fad292ae2 --- /dev/null +++ b/exporter/stefexporter/config_test.go @@ -0,0 +1,18 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package stefexporter + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/confmap" +) + +func TestUnmarshalDefaultConfig(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + assert.NoError(t, confmap.New().Unmarshal(&cfg)) + assert.Equal(t, factory.CreateDefaultConfig(), cfg) +} diff --git a/exporter/stefexporter/doc.go b/exporter/stefexporter/doc.go new file mode 100644 index 000000000000..79582d0c006c --- /dev/null +++ b/exporter/stefexporter/doc.go @@ -0,0 +1,7 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:generate mdatagen metadata.yaml + +// Package stefexporter implements an exporter that sends data Otel/STEF format. +package stefexporter // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/stefexporter" diff --git a/exporter/stefexporter/exporter.go b/exporter/stefexporter/exporter.go new file mode 100644 index 000000000000..8c293ae06671 --- /dev/null +++ b/exporter/stefexporter/exporter.go @@ -0,0 +1,30 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package stefexporter // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/stefexporter" + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.uber.org/zap" +) + +type stefExporter struct{} + +func newStefExporter(_ *zap.Logger, _ *Config) *stefExporter { + return &stefExporter{} +} + +func (s *stefExporter) Start(_ context.Context, _ component.Host) error { + return nil +} + +func (s *stefExporter) Shutdown(_ context.Context) error { + return nil +} + +func (s *stefExporter) pushMetrics(_ context.Context, _ pmetric.Metrics) error { + return nil +} diff --git a/exporter/stefexporter/exporter_test.go b/exporter/stefexporter/exporter_test.go new file mode 100644 index 000000000000..536671c0a9ba --- /dev/null +++ b/exporter/stefexporter/exporter_test.go @@ -0,0 +1,4 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package stefexporter // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/stefexporter" diff --git a/exporter/stefexporter/factory.go b/exporter/stefexporter/factory.go new file mode 100644 index 000000000000..c92174c94ccb --- /dev/null +++ b/exporter/stefexporter/factory.go @@ -0,0 +1,46 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package stefexporter // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/stefexporter" + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/exporterhelper" + + "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/stefexporter/internal/metadata" +) + +// The value of "type" key in configuration. +var componentType = component.MustNewType("stef") + +// NewFactory creates a factory for Debug exporter +func NewFactory() exporter.Factory { + return exporter.NewFactory( + componentType, + createDefaultConfig, + exporter.WithMetrics(createMetricsExporter, metadata.MetricsStability), + ) +} + +func createDefaultConfig() component.Config { + return &Config{} +} + +func createMetricsExporter(ctx context.Context, set exporter.Settings, config component.Config) ( + exporter.Metrics, error, +) { + cfg := config.(*Config) + stefexporter := newStefExporter(set.TelemetrySettings.Logger, cfg) + return exporterhelper.NewMetrics( + ctx, set, config, + stefexporter.pushMetrics, + exporterhelper.WithStart(stefexporter.Start), + exporterhelper.WithShutdown(stefexporter.Shutdown), + exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), + exporterhelper.WithTimeout(exporterhelper.TimeoutConfig{Timeout: 0}), + ) +} diff --git a/exporter/stefexporter/factory_test.go b/exporter/stefexporter/factory_test.go new file mode 100644 index 000000000000..fa2e10685565 --- /dev/null +++ b/exporter/stefexporter/factory_test.go @@ -0,0 +1,29 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package stefexporter + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/exporter/exportertest" +) + +func TestCreateDefaultConfig(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + assert.NotNil(t, cfg, "failed to create default config") + assert.NoError(t, componenttest.CheckConfigStruct(cfg)) +} + +func TestCreateMetricsExporter(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + + me, err := factory.CreateMetrics(context.Background(), exportertest.NewNopSettings(), cfg) + assert.NoError(t, err) + assert.NotNil(t, me) +} diff --git a/exporter/stefexporter/generated_component_test.go b/exporter/stefexporter/generated_component_test.go new file mode 100644 index 000000000000..64da76f8b782 --- /dev/null +++ b/exporter/stefexporter/generated_component_test.go @@ -0,0 +1,134 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package stefexporter + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/confmap/confmaptest" + "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/exportertest" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" +) + +func TestComponentFactoryType(t *testing.T) { + require.Equal(t, "stef", NewFactory().Type().String()) +} + +func TestComponentConfigStruct(t *testing.T) { + require.NoError(t, componenttest.CheckConfigStruct(NewFactory().CreateDefaultConfig())) +} + +func TestComponentLifecycle(t *testing.T) { + factory := NewFactory() + + tests := []struct { + createFn func(ctx context.Context, set exporter.Settings, cfg component.Config) (component.Component, error) + name string + }{ + + { + name: "metrics", + createFn: func(ctx context.Context, set exporter.Settings, cfg component.Config) (component.Component, error) { + return factory.CreateMetrics(ctx, set, cfg) + }, + }, + } + + cm, err := confmaptest.LoadConf("metadata.yaml") + require.NoError(t, err) + cfg := factory.CreateDefaultConfig() + sub, err := cm.Sub("tests::config") + require.NoError(t, err) + require.NoError(t, sub.Unmarshal(&cfg)) + + for _, tt := range tests { + t.Run(tt.name+"-shutdown", func(t *testing.T) { + c, err := tt.createFn(context.Background(), exportertest.NewNopSettings(), cfg) + require.NoError(t, err) + err = c.Shutdown(context.Background()) + require.NoError(t, err) + }) + t.Run(tt.name+"-lifecycle", func(t *testing.T) { + c, err := tt.createFn(context.Background(), exportertest.NewNopSettings(), cfg) + require.NoError(t, err) + host := componenttest.NewNopHost() + err = c.Start(context.Background(), host) + require.NoError(t, err) + require.NotPanics(t, func() { + switch tt.name { + case "logs": + e, ok := c.(exporter.Logs) + require.True(t, ok) + logs := generateLifecycleTestLogs() + if !e.Capabilities().MutatesData { + logs.MarkReadOnly() + } + err = e.ConsumeLogs(context.Background(), logs) + case "metrics": + e, ok := c.(exporter.Metrics) + require.True(t, ok) + metrics := generateLifecycleTestMetrics() + if !e.Capabilities().MutatesData { + metrics.MarkReadOnly() + } + err = e.ConsumeMetrics(context.Background(), metrics) + case "traces": + e, ok := c.(exporter.Traces) + require.True(t, ok) + traces := generateLifecycleTestTraces() + if !e.Capabilities().MutatesData { + traces.MarkReadOnly() + } + err = e.ConsumeTraces(context.Background(), traces) + } + }) + + err = c.Shutdown(context.Background()) + require.NoError(t, err) + }) + } +} + +func generateLifecycleTestLogs() plog.Logs { + logs := plog.NewLogs() + rl := logs.ResourceLogs().AppendEmpty() + rl.Resource().Attributes().PutStr("resource", "R1") + l := rl.ScopeLogs().AppendEmpty().LogRecords().AppendEmpty() + l.Body().SetStr("test log message") + l.SetTimestamp(pcommon.NewTimestampFromTime(time.Now())) + return logs +} + +func generateLifecycleTestMetrics() pmetric.Metrics { + metrics := pmetric.NewMetrics() + rm := metrics.ResourceMetrics().AppendEmpty() + rm.Resource().Attributes().PutStr("resource", "R1") + m := rm.ScopeMetrics().AppendEmpty().Metrics().AppendEmpty() + m.SetName("test_metric") + dp := m.SetEmptyGauge().DataPoints().AppendEmpty() + dp.Attributes().PutStr("test_attr", "value_1") + dp.SetIntValue(123) + dp.SetTimestamp(pcommon.NewTimestampFromTime(time.Now())) + return metrics +} + +func generateLifecycleTestTraces() ptrace.Traces { + traces := ptrace.NewTraces() + rs := traces.ResourceSpans().AppendEmpty() + rs.Resource().Attributes().PutStr("resource", "R1") + span := rs.ScopeSpans().AppendEmpty().Spans().AppendEmpty() + span.Attributes().PutStr("test_attr", "value_1") + span.SetName("test_span") + span.SetStartTimestamp(pcommon.NewTimestampFromTime(time.Now().Add(-1 * time.Second))) + span.SetEndTimestamp(pcommon.NewTimestampFromTime(time.Now())) + return traces +} diff --git a/exporter/stefexporter/generated_package_test.go b/exporter/stefexporter/generated_package_test.go new file mode 100644 index 000000000000..d6c7b549b6d3 --- /dev/null +++ b/exporter/stefexporter/generated_package_test.go @@ -0,0 +1,13 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package stefexporter + +import ( + "testing" + + "go.uber.org/goleak" +) + +func TestMain(m *testing.M) { + goleak.VerifyTestMain(m) +} diff --git a/exporter/stefexporter/go.mod b/exporter/stefexporter/go.mod new file mode 100644 index 000000000000..88e2ed268a9a --- /dev/null +++ b/exporter/stefexporter/go.mod @@ -0,0 +1,77 @@ +module github.com/open-telemetry/opentelemetry-collector-contrib/exporter/stefexporter + +go 1.22.0 + +require ( + github.com/stretchr/testify v1.10.0 + go.opentelemetry.io/collector/component v0.118.1-0.20250121185328-fbefb22cc2b3 + go.opentelemetry.io/collector/component/componenttest v0.118.1-0.20250121185328-fbefb22cc2b3 + go.opentelemetry.io/collector/config/configgrpc v0.118.1-0.20250121185328-fbefb22cc2b3 + go.opentelemetry.io/collector/confmap v1.24.1-0.20250121185328-fbefb22cc2b3 + go.opentelemetry.io/collector/consumer v1.24.1-0.20250121185328-fbefb22cc2b3 + go.opentelemetry.io/collector/exporter v0.118.1-0.20250121185328-fbefb22cc2b3 + go.opentelemetry.io/collector/exporter/exportertest v0.118.1-0.20250121185328-fbefb22cc2b3 + go.opentelemetry.io/collector/pdata v1.24.1-0.20250121185328-fbefb22cc2b3 + go.uber.org/goleak v1.3.0 + go.uber.org/zap v1.27.0 +) + +require ( + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/fsnotify/fsnotify v1.8.0 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-viper/mapstructure/v2 v2.2.1 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/snappy v0.0.4 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/hashicorp/go-version v1.7.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.17.9 // indirect + github.com/knadh/koanf/maps v0.1.1 // indirect + github.com/knadh/koanf/providers/confmap v0.1.0 // indirect + github.com/knadh/koanf/v2 v2.1.2 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/mostynb/go-grpc-compression v1.2.3 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/collector/client v1.24.1-0.20250121185328-fbefb22cc2b3 // indirect + go.opentelemetry.io/collector/config/configauth v0.118.1-0.20250121185328-fbefb22cc2b3 // indirect + go.opentelemetry.io/collector/config/configcompression v1.24.1-0.20250121185328-fbefb22cc2b3 // indirect + go.opentelemetry.io/collector/config/confignet v1.24.1-0.20250121185328-fbefb22cc2b3 // indirect + go.opentelemetry.io/collector/config/configopaque v1.24.1-0.20250121185328-fbefb22cc2b3 // indirect + go.opentelemetry.io/collector/config/configretry v1.24.1-0.20250121185328-fbefb22cc2b3 // indirect + go.opentelemetry.io/collector/config/configtelemetry v0.118.1-0.20250121185328-fbefb22cc2b3 // indirect + go.opentelemetry.io/collector/config/configtls v1.24.1-0.20250121185328-fbefb22cc2b3 // indirect + go.opentelemetry.io/collector/consumer/consumererror v0.118.1-0.20250121185328-fbefb22cc2b3 // indirect + go.opentelemetry.io/collector/consumer/consumertest v0.118.1-0.20250121185328-fbefb22cc2b3 // indirect + go.opentelemetry.io/collector/consumer/xconsumer v0.118.1-0.20250121185328-fbefb22cc2b3 // indirect + go.opentelemetry.io/collector/exporter/xexporter v0.118.1-0.20250121185328-fbefb22cc2b3 // indirect + go.opentelemetry.io/collector/extension v0.118.1-0.20250121185328-fbefb22cc2b3 // indirect + go.opentelemetry.io/collector/extension/auth v0.118.1-0.20250121185328-fbefb22cc2b3 // indirect + go.opentelemetry.io/collector/extension/xextension v0.118.1-0.20250121185328-fbefb22cc2b3 // indirect + go.opentelemetry.io/collector/featuregate v1.24.1-0.20250121185328-fbefb22cc2b3 // indirect + go.opentelemetry.io/collector/pdata/pprofile v0.118.1-0.20250121185328-fbefb22cc2b3 // indirect + go.opentelemetry.io/collector/pipeline v0.118.1-0.20250121185328-fbefb22cc2b3 // indirect + go.opentelemetry.io/collector/receiver v0.118.1-0.20250121185328-fbefb22cc2b3 // indirect + go.opentelemetry.io/collector/receiver/receivertest v0.118.1-0.20250121185328-fbefb22cc2b3 // indirect + go.opentelemetry.io/collector/receiver/xreceiver v0.118.1-0.20250121185328-fbefb22cc2b3 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 // indirect + go.opentelemetry.io/otel v1.34.0 // indirect + go.opentelemetry.io/otel/metric v1.34.0 // indirect + go.opentelemetry.io/otel/sdk v1.34.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.34.0 // indirect + go.opentelemetry.io/otel/trace v1.34.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/net v0.33.0 // indirect + golang.org/x/sys v0.29.0 // indirect + golang.org/x/text v0.21.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect + google.golang.org/grpc v1.69.4 // indirect + google.golang.org/protobuf v1.36.3 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/exporter/stefexporter/go.sum b/exporter/stefexporter/go.sum new file mode 100644 index 000000000000..e9e754e962be --- /dev/null +++ b/exporter/stefexporter/go.sum @@ -0,0 +1,190 @@ +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= +github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss= +github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= +github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/knadh/koanf/maps v0.1.1 h1:G5TjmUh2D7G2YWf5SQQqSiHRJEjaicvU0KpypqB3NIs= +github.com/knadh/koanf/maps v0.1.1/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI= +github.com/knadh/koanf/providers/confmap v0.1.0 h1:gOkxhHkemwG4LezxxN8DMOFopOPghxRVp7JbIvdvqzU= +github.com/knadh/koanf/providers/confmap v0.1.0/go.mod h1:2uLhxQzJnyHKfxG927awZC7+fyHFdQkd697K4MdLnIU= +github.com/knadh/koanf/v2 v2.1.2 h1:I2rtLRqXRy1p01m/utEtpZSSA6dcJbgGVuE27kW2PzQ= +github.com/knadh/koanf/v2 v2.1.2/go.mod h1:Gphfaen0q1Fc1HTgJgSTC4oRX9R2R5ErYMZJy8fLJBo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/mostynb/go-grpc-compression v1.2.3 h1:42/BKWMy0KEJGSdWvzqIyOZ95YcR9mLPqKctH7Uo//I= +github.com/mostynb/go-grpc-compression v1.2.3/go.mod h1:AghIxF3P57umzqM9yz795+y1Vjs47Km/Y2FE6ouQ7Lg= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/collector/client v1.24.1-0.20250121185328-fbefb22cc2b3 h1:MxzfNtItYodclGVQDLzdyBaKixbqEKC2sPGxTiY0uEE= +go.opentelemetry.io/collector/client v1.24.1-0.20250121185328-fbefb22cc2b3/go.mod h1:I5195HMWPseUSVEbNaEgMbz8rzx11T59I2YIkJQ2jrE= +go.opentelemetry.io/collector/component v0.118.1-0.20250121185328-fbefb22cc2b3 h1:ODfDW9siyGYEvEv1+oKf0abnpYbIsMwAlXuZMCUFPXw= +go.opentelemetry.io/collector/component v0.118.1-0.20250121185328-fbefb22cc2b3/go.mod h1:/fqrkzmOXsqm4boZaVtxi5YIz39/i8K8Wqd9oryz8Iw= +go.opentelemetry.io/collector/component/componenttest v0.118.1-0.20250121185328-fbefb22cc2b3 h1:ZnCUlmJ6ZqG+pL1fYrEXmg2FG+RxiSay5Fyxa0i79dY= +go.opentelemetry.io/collector/component/componenttest v0.118.1-0.20250121185328-fbefb22cc2b3/go.mod h1:eug78n4rxt5hdCSDWZ50wpYZXAl0ho/w6IsNtVZzQII= +go.opentelemetry.io/collector/config/configauth v0.118.1-0.20250121185328-fbefb22cc2b3 h1:FrH9pOMBYyhYnMCeINzeeWeT/RdcUHUnpGWooak4apM= +go.opentelemetry.io/collector/config/configauth v0.118.1-0.20250121185328-fbefb22cc2b3/go.mod h1:4w14UrrB+We1k+gt3/3+34SWKLKQdGDPQ/lpsL0tiHc= +go.opentelemetry.io/collector/config/configcompression v1.24.1-0.20250121185328-fbefb22cc2b3 h1:dJzzLwFqU/j3VHoaJetgUlPOzrZPtg9zUGhKVsM9WUo= +go.opentelemetry.io/collector/config/configcompression v1.24.1-0.20250121185328-fbefb22cc2b3/go.mod h1:LvYG00tbPTv0NOLoZN0wXq1F5thcxvukO8INq7xyfWU= +go.opentelemetry.io/collector/config/configgrpc v0.118.1-0.20250121185328-fbefb22cc2b3 h1:tptVdunGC+0y1KmEYvmgmLRR8Jam4y1KtfYRVoyLw5U= +go.opentelemetry.io/collector/config/configgrpc v0.118.1-0.20250121185328-fbefb22cc2b3/go.mod h1:R//tIJknJigDNZhuDmKiUpPrgCZ79HPKVdq0Jub3fkw= +go.opentelemetry.io/collector/config/confignet v1.24.1-0.20250121185328-fbefb22cc2b3 h1:z2wSQoQlbMfqEguwKl2NFqD3dhT9wIeRENZmadadvmg= +go.opentelemetry.io/collector/config/confignet v1.24.1-0.20250121185328-fbefb22cc2b3/go.mod h1:ZppUH1hgUJOubawEsxsQ9MzEYFytqo2GnVSS7d4CVxc= +go.opentelemetry.io/collector/config/configopaque v1.24.1-0.20250121185328-fbefb22cc2b3 h1:Oi9hXd7YIf3wa4F9SXeKwYyOkB+DRhfZgHjs44Z6jyQ= +go.opentelemetry.io/collector/config/configopaque v1.24.1-0.20250121185328-fbefb22cc2b3/go.mod h1:sW0t0iI/VfRL9VYX7Ik6XzVgPcR+Y5kejTLsYcMyDWs= +go.opentelemetry.io/collector/config/configretry v1.24.1-0.20250121185328-fbefb22cc2b3 h1:HpwrcWtvUjIIlWVseYvNemnmwtAGHLFTzfoTs2fQ2eY= +go.opentelemetry.io/collector/config/configretry v1.24.1-0.20250121185328-fbefb22cc2b3/go.mod h1:cleBc9I0DIWpTiiHfu9v83FUaCTqcPXmebpLxjEIqro= +go.opentelemetry.io/collector/config/configtelemetry v0.118.1-0.20250121185328-fbefb22cc2b3 h1:AOaJFxyz+7Zlh2AbZd7vu2gYA5a4rSItbwAS7GYAaO4= +go.opentelemetry.io/collector/config/configtelemetry v0.118.1-0.20250121185328-fbefb22cc2b3/go.mod h1:SlBEwQg0qly75rXZ6W1Ig8jN25KBVBkFIIAUI1GiAAE= +go.opentelemetry.io/collector/config/configtls v1.24.1-0.20250121185328-fbefb22cc2b3 h1:zeC8GoDbDxtUbEvp8sPCXONuMxqWQPowXEzUZySxSgA= +go.opentelemetry.io/collector/config/configtls v1.24.1-0.20250121185328-fbefb22cc2b3/go.mod h1:d0OdfkbuYEMYDBJLSbpH0wPI29lmSiFT3geqh/ygF2k= +go.opentelemetry.io/collector/confmap v1.24.1-0.20250121185328-fbefb22cc2b3 h1:bYJCjMGjEi0hFpVsdkg20ri5ZGhG7VfrlPjdW7FhclI= +go.opentelemetry.io/collector/confmap v1.24.1-0.20250121185328-fbefb22cc2b3/go.mod h1:Rrhs+MWoaP6AswZp+ReQ2VO9dfOfcUjdjiSHBsG+nec= +go.opentelemetry.io/collector/consumer v1.24.1-0.20250121185328-fbefb22cc2b3 h1:rMGS7YpPjLWbykAQNoBZhTZ8OONKSmnewCFggZXMPmg= +go.opentelemetry.io/collector/consumer v1.24.1-0.20250121185328-fbefb22cc2b3/go.mod h1:YyTWeyBUYlVi983ylJAY5qHnCajq67on3A59OpS6A/I= +go.opentelemetry.io/collector/consumer/consumererror v0.118.1-0.20250121185328-fbefb22cc2b3 h1:wVb72DufdN0fQoScGeK7ByM5GTf0BkdTA4ZtKOQg+RI= +go.opentelemetry.io/collector/consumer/consumererror v0.118.1-0.20250121185328-fbefb22cc2b3/go.mod h1:/fhqEIxH0hmnDa6zm38XzsdURr5GrlC9oKO70JVorHU= +go.opentelemetry.io/collector/consumer/consumertest v0.118.1-0.20250121185328-fbefb22cc2b3 h1:sQKFJz7EYn9e9KsgVNjnLsONuc4w3uUo2+YzM8C2jtE= +go.opentelemetry.io/collector/consumer/consumertest v0.118.1-0.20250121185328-fbefb22cc2b3/go.mod h1:fOVRcSFNghbaDpTJtTVHvFEQHeAAW8WEX0dYWbPpgBc= +go.opentelemetry.io/collector/consumer/xconsumer v0.118.1-0.20250121185328-fbefb22cc2b3 h1:HCyq06lz8dtWHhcKCd5BuhZBu6USgjBEuHyYhBuiw54= +go.opentelemetry.io/collector/consumer/xconsumer v0.118.1-0.20250121185328-fbefb22cc2b3/go.mod h1:Ij9o9d7hZb4be6ql6yqMR7xy5fcFR0SSD6RRIYWlu88= +go.opentelemetry.io/collector/exporter v0.118.1-0.20250121185328-fbefb22cc2b3 h1:E2mBzlnz7aRRCGCp5osAGDnGO+ZrCnHzkpuRvn5tSTk= +go.opentelemetry.io/collector/exporter v0.118.1-0.20250121185328-fbefb22cc2b3/go.mod h1:KlWSon7+gCvxsq4QWiguXq0FgUteHvH7rN4rHFQSJn4= +go.opentelemetry.io/collector/exporter/exportertest v0.118.1-0.20250121185328-fbefb22cc2b3 h1:/rnRrsOvxVsG79Oyk4dwfwTHmIs49Jnj2YvPmmbI57o= +go.opentelemetry.io/collector/exporter/exportertest v0.118.1-0.20250121185328-fbefb22cc2b3/go.mod h1:lNczSjG3ihPPHKTifLBoNy02XRAVKchRvGSS0QbNxZo= +go.opentelemetry.io/collector/exporter/xexporter v0.118.1-0.20250121185328-fbefb22cc2b3 h1:cHIgmy1TCOGh+Fv8C72kPYljTVC3tvUVi20PTla7yfo= +go.opentelemetry.io/collector/exporter/xexporter v0.118.1-0.20250121185328-fbefb22cc2b3/go.mod h1:ltZ3Qosr6DVRxe9j7KnlHQ9WTEJNnvoVhldUFVJiOWA= +go.opentelemetry.io/collector/extension v0.118.1-0.20250121185328-fbefb22cc2b3 h1:pigm8Nxub1OMInnkdu9U/Gqm0GuWmYgVUiRa0WuJmo0= +go.opentelemetry.io/collector/extension v0.118.1-0.20250121185328-fbefb22cc2b3/go.mod h1:7yUjnhGc/ota8nhFdLdP3trrYFx3jqtq7NAV+i04eJw= +go.opentelemetry.io/collector/extension/auth v0.118.1-0.20250121185328-fbefb22cc2b3 h1:ENw3837wlS/3iSu0BIyUNjDIQAstkdBiTaCixj6yzrA= +go.opentelemetry.io/collector/extension/auth v0.118.1-0.20250121185328-fbefb22cc2b3/go.mod h1:cs4Er00Asntjw7aPHRVQDvvtMzppKjRgMECa89b86AE= +go.opentelemetry.io/collector/extension/auth/authtest v0.118.0 h1:KIORXNc71vfpQrrZOntiZesRCZtQ8alrASWVT/zZkyo= +go.opentelemetry.io/collector/extension/auth/authtest v0.118.0/go.mod h1:0ZlSP9NPAfTRQd6Tx4mOH0IWrp6ufHaVN//L9Mb87gM= +go.opentelemetry.io/collector/extension/extensiontest v0.118.0 h1:rKBUaFS9elGfENG45wANmrwx7mHsmt1+YWCzxjftElg= +go.opentelemetry.io/collector/extension/extensiontest v0.118.0/go.mod h1:CqNXzkIOR32D8EUpptpOXhpFkibs3kFlRyNMEgIW8l4= +go.opentelemetry.io/collector/extension/xextension v0.118.1-0.20250121185328-fbefb22cc2b3 h1:Wd6nfGwTu6HMm+/4T37H9zkCG5XHMGIif2z/6qAamqQ= +go.opentelemetry.io/collector/extension/xextension v0.118.1-0.20250121185328-fbefb22cc2b3/go.mod h1:oNYxFX5QI2BUb01qZz23x4yxCletpiGwoZLB+JcHd2M= +go.opentelemetry.io/collector/featuregate v1.24.1-0.20250121185328-fbefb22cc2b3 h1:bTrqWcaRulXfpSQwnWrGlCsN4ZO5wzD931vH2E28Vc4= +go.opentelemetry.io/collector/featuregate v1.24.1-0.20250121185328-fbefb22cc2b3/go.mod h1:3GaXqflNDVwWndNGBJ1+XJFy3Fv/XrFgjMN60N3z7yg= +go.opentelemetry.io/collector/pdata v1.24.1-0.20250121185328-fbefb22cc2b3 h1:GXjNTD7hyz2Qwuu5uwLYeJTkWECWL6eL41w/JrQIJrU= +go.opentelemetry.io/collector/pdata v1.24.1-0.20250121185328-fbefb22cc2b3/go.mod h1:6lE9r5x41Z9GyvTSBetXSHRikhiZZK5ApmFtX35ZbXc= +go.opentelemetry.io/collector/pdata/pprofile v0.118.1-0.20250121185328-fbefb22cc2b3 h1:INViss+PcyyzYe/ZFHHFr/h+Mmo7n94nSzdmp68gBqI= +go.opentelemetry.io/collector/pdata/pprofile v0.118.1-0.20250121185328-fbefb22cc2b3/go.mod h1:iD66/nCk+xHh4q/1FBcYBQTEZKZuejggZBkm14/cobA= +go.opentelemetry.io/collector/pdata/testdata v0.118.0 h1:5N0w1SX9KIRkwvtkrpzQgXy9eGk3vfNG0ds6mhEPMIM= +go.opentelemetry.io/collector/pdata/testdata v0.118.0/go.mod h1:UY+GHV5bOC1BnFburOZ0wiHReJj1XbW12mi2Ogbc5Lw= +go.opentelemetry.io/collector/pipeline v0.118.1-0.20250121185328-fbefb22cc2b3 h1:uXvVXIkbxeEJa9L+xM7b5+2Y/LjfGKX65fQdRfW5+PQ= +go.opentelemetry.io/collector/pipeline v0.118.1-0.20250121185328-fbefb22cc2b3/go.mod h1:qE3DmoB05AW0C3lmPvdxZqd/H4po84NPzd5MrqgtL74= +go.opentelemetry.io/collector/receiver v0.118.1-0.20250121185328-fbefb22cc2b3 h1:uP/22oV69zYMWFdeCQHlSpVC22UZWmZsHgcdFDW89eo= +go.opentelemetry.io/collector/receiver v0.118.1-0.20250121185328-fbefb22cc2b3/go.mod h1:ycM9v5g4GvMspTtQbwLvmLOv4djo/bVw4RefJreGGaY= +go.opentelemetry.io/collector/receiver/receivertest v0.118.1-0.20250121185328-fbefb22cc2b3 h1:i9gXuyWdAXD+NVaGJbPnY4q+u5RwkOb/NSBnv1+IAMw= +go.opentelemetry.io/collector/receiver/receivertest v0.118.1-0.20250121185328-fbefb22cc2b3/go.mod h1:x9N91YI3onF0+enjYegcHYOb50Of2xO05c8EyE/baJ0= +go.opentelemetry.io/collector/receiver/xreceiver v0.118.1-0.20250121185328-fbefb22cc2b3 h1:lSOxA/PFNKwCCf0bYwOkTtvYn4Ch4QADFVJU/kuye08= +go.opentelemetry.io/collector/receiver/xreceiver v0.118.1-0.20250121185328-fbefb22cc2b3/go.mod h1:WLPXXIuodY7quBgqCz3OIsPNdBMLDej5nUIbiyyfoUc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 h1:yMkBS9yViCc7U7yeLzJPM2XizlfdVvBRSmsQDWu6qc0= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0/go.mod h1:n8MR6/liuGB5EmTETUBeU5ZgqMOlqKRxUaqPQBOANZ8= +go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY= +go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= +go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ= +go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= +go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A= +go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= +go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk= +go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w= +go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= +go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= +golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:XVhgTWWV3kGQlwJHR3upFWZeTsei6Oks1apkZSeonIE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A= +google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/exporter/stefexporter/internal/metadata/generated_status.go b/exporter/stefexporter/internal/metadata/generated_status.go new file mode 100644 index 000000000000..831f424d2aa7 --- /dev/null +++ b/exporter/stefexporter/internal/metadata/generated_status.go @@ -0,0 +1,16 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "go.opentelemetry.io/collector/component" +) + +var ( + Type = component.MustNewType("stef") + ScopeName = "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/stefexporter" +) + +const ( + MetricsStability = component.StabilityLevelDevelopment +) diff --git a/exporter/stefexporter/metadata.yaml b/exporter/stefexporter/metadata.yaml new file mode 100644 index 000000000000..525e593c9262 --- /dev/null +++ b/exporter/stefexporter/metadata.yaml @@ -0,0 +1,13 @@ +type: stef + +status: + class: exporter + stability: + development: [metrics] + distributions: [] + codeowners: + active: [tigrannajaryan, dmitryax] +tests: + config: + endpoint: "http://localhost:0" + expect_consumer_error: true diff --git a/versions.yaml b/versions.yaml index 5f1dc302091d..802e23d45cfe 100644 --- a/versions.yaml +++ b/versions.yaml @@ -70,6 +70,7 @@ module-sets: - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sentryexporter - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/splunkhecexporter + - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/stefexporter - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sumologicexporter - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/syslogexporter - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/tencentcloudlogserviceexporter From 44002d46fabd9cf7c50ca158a6be627572fcb40e Mon Sep 17 00:00:00 2001 From: Alex Boten <223565+codeboten@users.noreply.github.com> Date: Wed, 22 Jan 2025 20:36:52 -0800 Subject: [PATCH 06/12] [chore] update test to use a different prometheus endpoint (#37425) Instead of relying on the same port for each test, use `GetAvailableLocalAddress` to get a new port for each test. --------- Signed-off-by: Alex Boten <223565+codeboten@users.noreply.github.com> --- exporter/datadogexporter/integrationtest/go.mod | 2 +- exporter/datadogexporter/integrationtest/integration_test.go | 5 +++++ .../integration_test_internal_metrics_config.yaml | 4 ++-- .../integrationtest/integration_test_logs_config.yaml | 4 ++-- .../integrationtest/no_race_integration_test.go | 4 +++- 5 files changed, 13 insertions(+), 6 deletions(-) diff --git a/exporter/datadogexporter/integrationtest/go.mod b/exporter/datadogexporter/integrationtest/go.mod index 363575858803..65b7307f844d 100644 --- a/exporter/datadogexporter/integrationtest/go.mod +++ b/exporter/datadogexporter/integrationtest/go.mod @@ -7,6 +7,7 @@ require ( github.com/DataDog/datadog-agent/pkg/proto v0.63.0-devel github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector v0.118.0 github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter v0.118.0 + github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.118.0 github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.118.0 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.118.0 github.com/stretchr/testify v1.10.0 @@ -254,7 +255,6 @@ require ( github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.118.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.118.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.118.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.118.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.118.0 // indirect diff --git a/exporter/datadogexporter/integrationtest/integration_test.go b/exporter/datadogexporter/integrationtest/integration_test.go index ef586f5423b6..7d04569e859e 100644 --- a/exporter/datadogexporter/integrationtest/integration_test.go +++ b/exporter/datadogexporter/integrationtest/integration_test.go @@ -47,6 +47,7 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter" + commonTestutil "github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/testutil" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver" ) @@ -92,6 +93,7 @@ func testIntegration(t *testing.T) { server := testutil.DatadogServerMock(apmstatsRec.HandlerFunc, tracesRec.HandlerFunc) defer server.Close() t.Setenv("SERVER_URL", server.URL) + t.Setenv("PROM_SERVER", commonTestutil.GetAvailableLocalAddress(t)) // 2. Start in-process collector factories := getIntegrationTestComponents(t) @@ -284,6 +286,7 @@ func TestIntegrationComputeTopLevelBySpanKind(t *testing.T) { server := testutil.DatadogServerMock(apmstatsRec.HandlerFunc, tracesRec.HandlerFunc) defer server.Close() t.Setenv("SERVER_URL", server.URL) + t.Setenv("PROM_SERVER", commonTestutil.GetAvailableLocalAddress(t)) // 2. Start in-process collector factories := getIntegrationTestComponents(t) @@ -463,7 +466,9 @@ func TestIntegrationLogs(t *testing.T) { } }) defer server.Close() + thing := commonTestutil.GetAvailableLocalAddress(t) t.Setenv("SERVER_URL", server.URL) + t.Setenv("PROM_SERVER", thing) // 2. Start in-process collector factories := getIntegrationTestComponents(t) diff --git a/exporter/datadogexporter/integrationtest/integration_test_internal_metrics_config.yaml b/exporter/datadogexporter/integrationtest/integration_test_internal_metrics_config.yaml index 9100aecf8cad..47a7115436dd 100644 --- a/exporter/datadogexporter/integrationtest/integration_test_internal_metrics_config.yaml +++ b/exporter/datadogexporter/integrationtest/integration_test_internal_metrics_config.yaml @@ -12,7 +12,7 @@ receivers: - job_name: 'otelcol' scrape_interval: 1s static_configs: - - targets: [ 'localhost:8888' ] + - targets: [ '${env:PROM_SERVER}' ] exporters: datadog: @@ -33,7 +33,7 @@ service: telemetry: metrics: level: basic - address: "localhost:8888" + address: ${env:PROM_SERVER} pipelines: traces: receivers: [otlp] diff --git a/exporter/datadogexporter/integrationtest/integration_test_logs_config.yaml b/exporter/datadogexporter/integrationtest/integration_test_logs_config.yaml index f02d45dafc16..28ac1ff0789b 100644 --- a/exporter/datadogexporter/integrationtest/integration_test_logs_config.yaml +++ b/exporter/datadogexporter/integrationtest/integration_test_logs_config.yaml @@ -12,7 +12,7 @@ receivers: - job_name: 'otelcol' scrape_interval: 1s static_configs: - - targets: [ 'localhost:8888' ] + - targets: [ '${env:PROM_SERVER}' ] metric_relabel_configs: - source_labels: [ __name__ ] regex: "(otelcol_receiver_accepted_log_records|otelcol_exporter_sent_log_records)" @@ -37,7 +37,7 @@ service: telemetry: metrics: level: basic - address: "localhost:8888" + address: ${env:PROM_SERVER} pipelines: logs: receivers: [otlp] diff --git a/exporter/datadogexporter/integrationtest/no_race_integration_test.go b/exporter/datadogexporter/integrationtest/no_race_integration_test.go index a42262337550..7c7bb38651b2 100644 --- a/exporter/datadogexporter/integrationtest/no_race_integration_test.go +++ b/exporter/datadogexporter/integrationtest/no_race_integration_test.go @@ -14,19 +14,21 @@ import ( "github.com/DataDog/datadog-agent/comp/otelcol/otlp/testutil" "github.com/stretchr/testify/assert" + + commonTestutil "github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/testutil" ) func TestIntegrationInternalMetrics(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("flaky test on windows https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/34836") } - // 1. Set up mock Datadog server seriesRec := &testutil.HTTPRequestRecorderWithChan{Pattern: testutil.MetricV2Endpoint, ReqChan: make(chan []byte, 100)} tracesRec := &testutil.HTTPRequestRecorderWithChan{Pattern: testutil.TraceEndpoint, ReqChan: make(chan []byte, 100)} server := testutil.DatadogServerMock(seriesRec.HandlerFunc, tracesRec.HandlerFunc) defer server.Close() t.Setenv("SERVER_URL", server.URL) + t.Setenv("PROM_SERVER", commonTestutil.GetAvailableLocalAddress(t)) // 2. Start in-process collector factories := getIntegrationTestComponents(t) From 9982d2c79574062354a3c05581876c2e80b1f47c Mon Sep 17 00:00:00 2001 From: Alex Boten <223565+codeboten@users.noreply.github.com> Date: Thu, 23 Jan 2025 03:00:27 -0800 Subject: [PATCH 07/12] [chore] use AvailablePort for grpc/http receivers (#37431) Prevent tests from using the same port, possibly address flaky test. Signed-off-by: Alex Boten <223565+codeboten@users.noreply.github.com> --- .../integrationtest/integration_test.go | 27 ++++++++++++------- .../integration_test_config.yaml | 4 +-- ...egration_test_internal_metrics_config.yaml | 4 +-- .../integration_test_logs_config.yaml | 4 +-- .../integration_test_toplevel_config.yaml | 4 +-- .../no_race_integration_test.go | 5 +++- 6 files changed, 30 insertions(+), 18 deletions(-) diff --git a/exporter/datadogexporter/integrationtest/integration_test.go b/exporter/datadogexporter/integrationtest/integration_test.go index 7d04569e859e..0e4e4d203b33 100644 --- a/exporter/datadogexporter/integrationtest/integration_test.go +++ b/exporter/datadogexporter/integrationtest/integration_test.go @@ -94,6 +94,9 @@ func testIntegration(t *testing.T) { defer server.Close() t.Setenv("SERVER_URL", server.URL) t.Setenv("PROM_SERVER", commonTestutil.GetAvailableLocalAddress(t)) + t.Setenv("OTLP_HTTP_SERVER", commonTestutil.GetAvailableLocalAddress(t)) + otlpGRPCEndpoint := commonTestutil.GetAvailableLocalAddress(t) + t.Setenv("OTLP_GRPC_SERVER", otlpGRPCEndpoint) // 2. Start in-process collector factories := getIntegrationTestComponents(t) @@ -106,7 +109,7 @@ func testIntegration(t *testing.T) { waitForReadiness(app) // 3. Generate and send traces - sendTraces(t) + sendTraces(t, otlpGRPCEndpoint) // 4. Validate traces and APM stats from the mock server var spans []*pb.Span @@ -225,11 +228,11 @@ func waitForReadiness(app *otelcol.Collector) { } } -func sendTraces(t *testing.T) { +func sendTraces(t *testing.T, endpoint string) { ctx := context.Background() // Set up OTel-Go SDK and exporter - traceExporter, err := otlptracegrpc.New(ctx, otlptracegrpc.WithInsecure()) + traceExporter, err := otlptracegrpc.New(ctx, otlptracegrpc.WithInsecure(), otlptracegrpc.WithEndpoint(endpoint)) require.NoError(t, err) bsp := sdktrace.NewBatchSpanProcessor(traceExporter) r1, _ := resource.New(ctx, resource.WithAttributes(attribute.String("k8s.node.name", "aaaa"))) @@ -287,6 +290,9 @@ func TestIntegrationComputeTopLevelBySpanKind(t *testing.T) { defer server.Close() t.Setenv("SERVER_URL", server.URL) t.Setenv("PROM_SERVER", commonTestutil.GetAvailableLocalAddress(t)) + t.Setenv("OTLP_HTTP_SERVER", commonTestutil.GetAvailableLocalAddress(t)) + otlpGRPCEndpoint := commonTestutil.GetAvailableLocalAddress(t) + t.Setenv("OTLP_GRPC_SERVER", otlpGRPCEndpoint) // 2. Start in-process collector factories := getIntegrationTestComponents(t) @@ -299,7 +305,7 @@ func TestIntegrationComputeTopLevelBySpanKind(t *testing.T) { waitForReadiness(app) // 3. Generate and send traces - sendTracesComputeTopLevelBySpanKind(t) + sendTracesComputeTopLevelBySpanKind(t, otlpGRPCEndpoint) // 4. Validate traces and APM stats from the mock server var spans []*pb.Span @@ -387,11 +393,11 @@ func TestIntegrationComputeTopLevelBySpanKind(t *testing.T) { } } -func sendTracesComputeTopLevelBySpanKind(t *testing.T) { +func sendTracesComputeTopLevelBySpanKind(t *testing.T, endpoint string) { ctx := context.Background() // Set up OTel-Go SDK and exporter - traceExporter, err := otlptracegrpc.New(ctx, otlptracegrpc.WithInsecure()) + traceExporter, err := otlptracegrpc.New(ctx, otlptracegrpc.WithInsecure(), otlptracegrpc.WithEndpoint(endpoint)) require.NoError(t, err) bsp := sdktrace.NewBatchSpanProcessor(traceExporter) r1, _ := resource.New(ctx, resource.WithAttributes(attribute.String("k8s.node.name", "aaaa"))) @@ -469,6 +475,9 @@ func TestIntegrationLogs(t *testing.T) { thing := commonTestutil.GetAvailableLocalAddress(t) t.Setenv("SERVER_URL", server.URL) t.Setenv("PROM_SERVER", thing) + t.Setenv("OTLP_HTTP_SERVER", commonTestutil.GetAvailableLocalAddress(t)) + otlpGRPCEndpoint := commonTestutil.GetAvailableLocalAddress(t) + t.Setenv("OTLP_GRPC_SERVER", otlpGRPCEndpoint) // 2. Start in-process collector factories := getIntegrationTestComponents(t) @@ -481,7 +490,7 @@ func TestIntegrationLogs(t *testing.T) { waitForReadiness(app) // 3. Generate and send logs - sendLogs(t, 5) + sendLogs(t, 5, otlpGRPCEndpoint) // 4. Validate logs and metrics from the mock server // Wait until `doneChannel` is closed and prometheus metrics are received. @@ -525,9 +534,9 @@ func TestIntegrationLogs(t *testing.T) { assert.Equal(t, 2, numSentLogRecords) } -func sendLogs(t *testing.T, numLogs int) { +func sendLogs(t *testing.T, numLogs int, endpoint string) { ctx := context.Background() - logExporter, err := otlploggrpc.New(ctx, otlploggrpc.WithInsecure()) + logExporter, err := otlploggrpc.New(ctx, otlploggrpc.WithInsecure(), otlploggrpc.WithEndpoint(endpoint)) assert.NoError(t, err) lr := make([]log.Record, numLogs) assert.NoError(t, logExporter.Export(ctx, lr)) diff --git a/exporter/datadogexporter/integrationtest/integration_test_config.yaml b/exporter/datadogexporter/integrationtest/integration_test_config.yaml index c32bf27da49e..b29a7ebf4f83 100644 --- a/exporter/datadogexporter/integrationtest/integration_test_config.yaml +++ b/exporter/datadogexporter/integrationtest/integration_test_config.yaml @@ -3,9 +3,9 @@ receivers: otlp: protocols: http: - endpoint: "localhost:4318" + endpoint: ${env:OTLP_HTTP_SERVER} grpc: - endpoint: "localhost:4317" + endpoint: ${env:OTLP_GRPC_SERVER} processors: tail_sampling: diff --git a/exporter/datadogexporter/integrationtest/integration_test_internal_metrics_config.yaml b/exporter/datadogexporter/integrationtest/integration_test_internal_metrics_config.yaml index 47a7115436dd..1d1e6762b998 100644 --- a/exporter/datadogexporter/integrationtest/integration_test_internal_metrics_config.yaml +++ b/exporter/datadogexporter/integrationtest/integration_test_internal_metrics_config.yaml @@ -3,9 +3,9 @@ receivers: otlp: protocols: http: - endpoint: "localhost:4318" + endpoint: ${env:OTLP_HTTP_SERVER} grpc: - endpoint: "localhost:4317" + endpoint: ${env:OTLP_GRPC_SERVER} prometheus: config: scrape_configs: diff --git a/exporter/datadogexporter/integrationtest/integration_test_logs_config.yaml b/exporter/datadogexporter/integrationtest/integration_test_logs_config.yaml index 28ac1ff0789b..ae2af2358967 100644 --- a/exporter/datadogexporter/integrationtest/integration_test_logs_config.yaml +++ b/exporter/datadogexporter/integrationtest/integration_test_logs_config.yaml @@ -3,9 +3,9 @@ receivers: otlp: protocols: http: - endpoint: "localhost:4318" + endpoint: ${env:OTLP_HTTP_SERVER} grpc: - endpoint: "localhost:4317" + endpoint: ${env:OTLP_GRPC_SERVER} prometheus: config: scrape_configs: diff --git a/exporter/datadogexporter/integrationtest/integration_test_toplevel_config.yaml b/exporter/datadogexporter/integrationtest/integration_test_toplevel_config.yaml index e542b95d3626..2a997226be99 100644 --- a/exporter/datadogexporter/integrationtest/integration_test_toplevel_config.yaml +++ b/exporter/datadogexporter/integrationtest/integration_test_toplevel_config.yaml @@ -3,9 +3,9 @@ receivers: otlp: protocols: http: - endpoint: "localhost:4318" + endpoint: ${env:OTLP_HTTP_SERVER} grpc: - endpoint: "localhost:4317" + endpoint: ${env:OTLP_GRPC_SERVER} connectors: datadog/connector: diff --git a/exporter/datadogexporter/integrationtest/no_race_integration_test.go b/exporter/datadogexporter/integrationtest/no_race_integration_test.go index 7c7bb38651b2..82ec0724ff9e 100644 --- a/exporter/datadogexporter/integrationtest/no_race_integration_test.go +++ b/exporter/datadogexporter/integrationtest/no_race_integration_test.go @@ -29,6 +29,9 @@ func TestIntegrationInternalMetrics(t *testing.T) { defer server.Close() t.Setenv("SERVER_URL", server.URL) t.Setenv("PROM_SERVER", commonTestutil.GetAvailableLocalAddress(t)) + t.Setenv("OTLP_HTTP_SERVER", commonTestutil.GetAvailableLocalAddress(t)) + otlpGRPCEndpoint := commonTestutil.GetAvailableLocalAddress(t) + t.Setenv("OTLP_GRPC_SERVER", otlpGRPCEndpoint) // 2. Start in-process collector factories := getIntegrationTestComponents(t) @@ -41,7 +44,7 @@ func TestIntegrationInternalMetrics(t *testing.T) { waitForReadiness(app) // 3. Generate and send traces - sendTraces(t) + sendTraces(t, otlpGRPCEndpoint) // 4. Validate Datadog trace agent & OTel internal metrics are sent to the mock server expectedMetrics := map[string]struct{}{ From 5d0a771b8b929edab4dfe6405084b758b8bfa821 Mon Sep 17 00:00:00 2001 From: Ruslan Nigmatullin Date: Thu, 23 Jan 2025 03:00:42 -0800 Subject: [PATCH 08/12] deltatocumulative: Fix selection of the target scale for exponential histograms (#37432) #### Description While addressing comments a bug was added to the logic of calculating of the desired scale and it slipped through tests. Fix the bug (use `min` instead of `max`) and update tests to avoid regressions in the future. #### Link to tracking issue Fixes #37416 #### Testing Update tests to separately cover positive and negative buckets. #### Documentation n/a --- .chloggen/expo-histogram-fix-downscaling.yaml | 27 ++++++++++++ .../internal/data/add.go | 2 +- .../internal/data/expo_test.go | 42 +++++++++++++++---- 3 files changed, 61 insertions(+), 10 deletions(-) create mode 100644 .chloggen/expo-histogram-fix-downscaling.yaml diff --git a/.chloggen/expo-histogram-fix-downscaling.yaml b/.chloggen/expo-histogram-fix-downscaling.yaml new file mode 100644 index 000000000000..0e4e89e0ea36 --- /dev/null +++ b/.chloggen/expo-histogram-fix-downscaling.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: bug_fix + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: deltatocumulativeprocessor + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: In order to cap number of histogram buckets take the min of desired scale across negative and positive buckets instead of the max + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [37416] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/processor/deltatocumulativeprocessor/internal/data/add.go b/processor/deltatocumulativeprocessor/internal/data/add.go index 13b9a8151106..c1a1ee6ad8f7 100644 --- a/processor/deltatocumulativeprocessor/internal/data/add.go +++ b/processor/deltatocumulativeprocessor/internal/data/add.go @@ -80,7 +80,7 @@ func (dp ExpHistogram) Add(in ExpHistogram) ExpHistogram { // Downscale if an expected number of buckets after the merge is too large. from := expo.Scale(dp.Scale()) - to := max( + to := min( expo.Limit(maxBuckets, from, dp.Positive(), in.Positive()), expo.Limit(maxBuckets, from, dp.Negative(), in.Negative()), ) diff --git a/processor/deltatocumulativeprocessor/internal/data/expo_test.go b/processor/deltatocumulativeprocessor/internal/data/expo_test.go index 970eda2b67c7..768f1f39b1cf 100644 --- a/processor/deltatocumulativeprocessor/internal/data/expo_test.go +++ b/processor/deltatocumulativeprocessor/internal/data/expo_test.go @@ -27,10 +27,11 @@ func TestExpoAdd(t *testing.T) { defer func() { maxBuckets = prevMaxBuckets }() cases := []struct { - name string - dp, in expdp - want expdp - flip bool + name string + dp, in expdp + want expdp + flip bool + alsoTryEachSign bool }{{ name: "noop", dp: expdp{PosNeg: bins{0, 0, 0, 0, 0, 0, 0, 0}.Into(), Count: 0}, @@ -108,6 +109,7 @@ func TestExpoAdd(t *testing.T) { PosNeg: bins{3, 3, 3, 3, 3, 3, 3, 3}.Into(), Count: 24, }, + alsoTryEachSign: true, }, { name: "scale/downscale_once_exceeds_limit", dp: expdp{ @@ -125,6 +127,7 @@ func TestExpoAdd(t *testing.T) { PosNeg: rawbs([]uint64{2, 2, 2, 6, 4, 4, 4}, 0), Count: 24, }, + alsoTryEachSign: true, }, { name: "scale/downscale_multiple_times_until_within_limit", dp: expdp{ @@ -142,6 +145,7 @@ func TestExpoAdd(t *testing.T) { PosNeg: rawbs([]uint64{2, 4, 2, 4, 8, 4}, -2), Count: 24, }, + alsoTryEachSign: true, }, { name: "scale/ignore_leading_trailing_zeros_in_bucket_count", dp: expdp{ @@ -159,6 +163,7 @@ func TestExpoAdd(t *testing.T) { PosNeg: rawbs([]uint64{1, 7, 7, 4, 3, 2, 2}, 0), Count: 26, }, + alsoTryEachSign: true, }, { name: "scale/downscale_with_leading_trailing_zeros", dp: expdp{ @@ -176,17 +181,18 @@ func TestExpoAdd(t *testing.T) { PosNeg: rawbs([]uint64{11, 11, 0, 0, 12, 12}, -1), Count: 46, }, + alsoTryEachSign: true, }} for _, cs := range cases { - run := func(dp, in expdp) func(t *testing.T) { + run := func(dp, in, want expdp) func(t *testing.T) { return func(t *testing.T) { is := datatest.New(t) var ( dp = ExpHistogram{dp.Into()} in = ExpHistogram{in.Into()} - want = ExpHistogram{cs.want.Into()} + want = ExpHistogram{want.Into()} ) dp.SetTimestamp(0) @@ -199,14 +205,32 @@ func TestExpoAdd(t *testing.T) { } if cs.flip { - t.Run(cs.name+"-dp", run(cs.dp, cs.in)) - t.Run(cs.name+"-in", run(cs.in, cs.dp)) + t.Run(cs.name+"-dp", run(cs.dp, cs.in, cs.want)) + t.Run(cs.name+"-in", run(cs.in, cs.dp, cs.want)) continue } - t.Run(cs.name, run(cs.dp, cs.in)) + if cs.alsoTryEachSign { + t.Run(cs.name+"-pos", run(clonePosExpdp(cs.dp), clonePosExpdp(cs.in), clonePosExpdp(cs.want))) + t.Run(cs.name+"-neg", run(cloneNegExpdp(cs.dp), cloneNegExpdp(cs.in), cloneNegExpdp(cs.want))) + } + t.Run(cs.name, run(cs.dp, cs.in, cs.want)) } } +func cloneNegExpdp(dp expotest.Histogram) expotest.Histogram { + dp.Neg = pmetric.NewExponentialHistogramDataPointBuckets() + dp.PosNeg.CopyTo(dp.Neg) + dp.PosNeg = expo.Buckets{} + return dp +} + +func clonePosExpdp(dp expotest.Histogram) expotest.Histogram { + dp.Pos = pmetric.NewExponentialHistogramDataPointBuckets() + dp.PosNeg.CopyTo(dp.Pos) + dp.PosNeg = expo.Buckets{} + return dp +} + func rawbs(data []uint64, offset int32) expo.Buckets { bs := pmetric.NewExponentialHistogramDataPointBuckets() bs.BucketCounts().FromRaw(data) From 68fa024f4d0297c754f173894c5fda9d22b3b1d1 Mon Sep 17 00:00:00 2001 From: Pablo Baeyens Date: Thu, 23 Jan 2025 14:33:09 +0000 Subject: [PATCH 09/12] [chore][update-otel] Tidy after generating the code (#37440) #### Description Adds an extra `make gotidy` after `make generate` and `make crosslink` See #37437 for example where it failed because of this. --- Makefile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Makefile b/Makefile index 341c909dbfc3..768375b48ec7 100644 --- a/Makefile +++ b/Makefile @@ -423,6 +423,8 @@ update-otel:$(MULTIMOD) $(MAKE) genoteltestbedcol $(MAKE) generate $(MAKE) crosslink + # Tidy again after generating code + $(MAKE) gotidy $(MAKE) remove-toolchain git add . && git commit -s -m "[chore] mod and toolchain tidy" ; \ From 11e18af085a606e5b591f350f5747a073ab088d6 Mon Sep 17 00:00:00 2001 From: zirain Date: Thu, 23 Jan 2025 22:49:19 +0800 Subject: [PATCH 10/12] [receiver/envoyals] init new receiver (#37193) --- .chloggen/anvoy-als-receiver.yaml | 27 +++ .github/CODEOWNERS | 1 + .github/ISSUE_TEMPLATE/bug_report.yaml | 1 + .github/ISSUE_TEMPLATE/feature_request.yaml | 1 + .github/ISSUE_TEMPLATE/other.yaml | 1 + .github/ISSUE_TEMPLATE/unmaintained.yaml | 1 + cmd/otelcontribcol/builder-config.yaml | 1 + receiver/envoyalsreceiver/Makefile | 1 + receiver/envoyalsreceiver/README.md | 44 ++++ receiver/envoyalsreceiver/als.go | 90 +++++++++ receiver/envoyalsreceiver/als_test.go | 188 ++++++++++++++++++ receiver/envoyalsreceiver/config.go | 15 ++ receiver/envoyalsreceiver/config_test.go | 67 +++++++ receiver/envoyalsreceiver/doc.go | 6 + receiver/envoyalsreceiver/factory.go | 49 +++++ receiver/envoyalsreceiver/factory_test.go | 38 ++++ .../generated_component_test.go | 69 +++++++ .../generated_package_test.go | 13 ++ receiver/envoyalsreceiver/go.mod | 88 ++++++++ receiver/envoyalsreceiver/go.sum | 188 ++++++++++++++++++ .../envoyalsreceiver/internal/als/server.go | 89 +++++++++ .../internal/metadata/generated_status.go | 16 ++ receiver/envoyalsreceiver/metadata.yaml | 9 + .../envoyalsreceiver/testdata/config.yaml | 5 + versions.yaml | 1 + 25 files changed, 1009 insertions(+) create mode 100644 .chloggen/anvoy-als-receiver.yaml create mode 100644 receiver/envoyalsreceiver/Makefile create mode 100644 receiver/envoyalsreceiver/README.md create mode 100644 receiver/envoyalsreceiver/als.go create mode 100644 receiver/envoyalsreceiver/als_test.go create mode 100644 receiver/envoyalsreceiver/config.go create mode 100644 receiver/envoyalsreceiver/config_test.go create mode 100644 receiver/envoyalsreceiver/doc.go create mode 100644 receiver/envoyalsreceiver/factory.go create mode 100644 receiver/envoyalsreceiver/factory_test.go create mode 100644 receiver/envoyalsreceiver/generated_component_test.go create mode 100644 receiver/envoyalsreceiver/generated_package_test.go create mode 100644 receiver/envoyalsreceiver/go.mod create mode 100644 receiver/envoyalsreceiver/go.sum create mode 100644 receiver/envoyalsreceiver/internal/als/server.go create mode 100644 receiver/envoyalsreceiver/internal/metadata/generated_status.go create mode 100644 receiver/envoyalsreceiver/metadata.yaml create mode 100644 receiver/envoyalsreceiver/testdata/config.yaml diff --git a/.chloggen/anvoy-als-receiver.yaml b/.chloggen/anvoy-als-receiver.yaml new file mode 100644 index 000000000000..5b1686c06c48 --- /dev/null +++ b/.chloggen/anvoy-als-receiver.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: new_component + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: envoyalsreceiver + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Add a new receiver for the Envoy ALS (Access Log Service). + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [36464] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index ca402aeb52ce..b07cf95004b7 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -222,6 +222,7 @@ receiver/couchdbreceiver/ @open-telemetry receiver/datadogreceiver/ @open-telemetry/collector-contrib-approvers @boostchicken @gouthamve @MovieStoreGuy receiver/dockerstatsreceiver/ @open-telemetry/collector-contrib-approvers @jamesmoessis receiver/elasticsearchreceiver/ @open-telemetry/collector-contrib-approvers @djaglowski +receiver/envoyalsreceiver/ @open-telemetry/collector-contrib-approvers @evan-bradley receiver/expvarreceiver/ @open-telemetry/collector-contrib-approvers @jamesmoessis @MovieStoreGuy receiver/filelogreceiver/ @open-telemetry/collector-contrib-approvers @djaglowski receiver/filestatsreceiver/ @open-telemetry/collector-contrib-approvers @atoulme diff --git a/.github/ISSUE_TEMPLATE/bug_report.yaml b/.github/ISSUE_TEMPLATE/bug_report.yaml index 580e28f0cd1b..95b022b70897 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yaml +++ b/.github/ISSUE_TEMPLATE/bug_report.yaml @@ -220,6 +220,7 @@ body: - receiver/datadog - receiver/dockerstats - receiver/elasticsearch + - receiver/envoyals - receiver/expvar - receiver/filelog - receiver/filestats diff --git a/.github/ISSUE_TEMPLATE/feature_request.yaml b/.github/ISSUE_TEMPLATE/feature_request.yaml index 64ba734ab373..acf39e99429e 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.yaml +++ b/.github/ISSUE_TEMPLATE/feature_request.yaml @@ -214,6 +214,7 @@ body: - receiver/datadog - receiver/dockerstats - receiver/elasticsearch + - receiver/envoyals - receiver/expvar - receiver/filelog - receiver/filestats diff --git a/.github/ISSUE_TEMPLATE/other.yaml b/.github/ISSUE_TEMPLATE/other.yaml index a7ab6232949e..a99efb30d46b 100644 --- a/.github/ISSUE_TEMPLATE/other.yaml +++ b/.github/ISSUE_TEMPLATE/other.yaml @@ -214,6 +214,7 @@ body: - receiver/datadog - receiver/dockerstats - receiver/elasticsearch + - receiver/envoyals - receiver/expvar - receiver/filelog - receiver/filestats diff --git a/.github/ISSUE_TEMPLATE/unmaintained.yaml b/.github/ISSUE_TEMPLATE/unmaintained.yaml index 120bf2c250d6..c7148dc20c9a 100644 --- a/.github/ISSUE_TEMPLATE/unmaintained.yaml +++ b/.github/ISSUE_TEMPLATE/unmaintained.yaml @@ -219,6 +219,7 @@ body: - receiver/datadog - receiver/dockerstats - receiver/elasticsearch + - receiver/envoyals - receiver/expvar - receiver/filelog - receiver/filestats diff --git a/cmd/otelcontribcol/builder-config.yaml b/cmd/otelcontribcol/builder-config.yaml index 40551f955ffb..56f3f52ef02f 100644 --- a/cmd/otelcontribcol/builder-config.yaml +++ b/cmd/otelcontribcol/builder-config.yaml @@ -355,6 +355,7 @@ replaces: - github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil => ../../internal/aws/ecsutil - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/rabbitmqreceiver => ../../receiver/rabbitmqreceiver - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/elasticsearchreceiver => ../../receiver/elasticsearchreceiver + - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/envoyalsreceiver => ../../receiver/envoyalsreceiver - github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricsgenerationprocessor => ../../processor/metricsgenerationprocessor - github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor => ../../processor/attributesprocessor - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/sqlqueryreceiver => ../../receiver/sqlqueryreceiver diff --git a/receiver/envoyalsreceiver/Makefile b/receiver/envoyalsreceiver/Makefile new file mode 100644 index 000000000000..ded7a36092dc --- /dev/null +++ b/receiver/envoyalsreceiver/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common diff --git a/receiver/envoyalsreceiver/README.md b/receiver/envoyalsreceiver/README.md new file mode 100644 index 000000000000..1ef1fb384c17 --- /dev/null +++ b/receiver/envoyalsreceiver/README.md @@ -0,0 +1,44 @@ +# Envoy ALS(access log service) receiver + + +| Status | | +| ------------- |-----------| +| Stability | [development]: logs | +| Distributions | [] | +| Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Areceiver%2Fenvoyals%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Areceiver%2Fenvoyals) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Areceiver%2Fenvoyals%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Areceiver%2Fenvoyals) | +| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@evan-bradley](https://www.github.com/evan-bradley) | + +[development]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#development + + +This is a receiver for the [Envoy gRPC ALS](https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/access_loggers/grpc/v3/als.proto#envoy-v3-api-msg-extensions-access-loggers-grpc-v3-httpgrpcaccesslogconfig) sink. + +Envoy ALS (Access Log Service) is a feature of Envoy Proxy that allows for the +centralized collection and management of access logs. + +Instead of writing access logs to local files, Envoy can be configured to send these logs to a remote gRPC service. + +This is particularly useful in distributed systems where centralized logging is required for monitoring, auditing, and debugging purposes. + +[Istio](https://istio.io) and [Envoy Gateway](https://gateway.envoyproxy.io) support OTLP and gRPC ALS with first class API. + +## Getting Started + +The settings are: + +- `endpoint` (required, default = localhost:19001 gRPC protocol): host:port to which the receiver is going to receive data. See our [security best practices doc](https://opentelemetry.io/docs/security/config-best-practices/#protect-against-denial-of-service-attacks) to understand how to set the endpoint in different environments. + +Example: +```yaml +receivers: + envoyals: + endpoint: 0.0.0.0:3500 +``` + +## Advanced Configuration + +Other options can be configured to support more advanced use cases: + +- [gRPC settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configgrpc/README.md) including CORS +- [TLS and mTLS settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configtls/README.md) + diff --git a/receiver/envoyalsreceiver/als.go b/receiver/envoyalsreceiver/als.go new file mode 100644 index 000000000000..f9a32e87a3ca --- /dev/null +++ b/receiver/envoyalsreceiver/als.go @@ -0,0 +1,90 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package envoyalsreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/envoyalsreceiver" + +import ( + "context" + "errors" + "fmt" + + alsv3 "github.com/envoyproxy/go-control-plane/envoy/service/accesslog/v3" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componentstatus" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/receiver" + "go.opentelemetry.io/collector/receiver/receiverhelper" + "go.uber.org/zap" + "google.golang.org/grpc" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/envoyalsreceiver/internal/als" +) + +type alsReceiver struct { + conf *Config + nextConsumer consumer.Logs + settings receiver.Settings + serverGRPC *grpc.Server + + obsrepGRPC *receiverhelper.ObsReport +} + +func (r *alsReceiver) Start(ctx context.Context, host component.Host) error { + var err error + r.serverGRPC, err = r.conf.ToServer(ctx, host, r.settings.TelemetrySettings) + if err != nil { + return fmt.Errorf("failed create grpc server error: %w", err) + } + + alsv3.RegisterAccessLogServiceServer(r.serverGRPC, als.New(r.nextConsumer, r.obsrepGRPC)) + + err = r.startGRPCServer(ctx, host) + if err != nil { + return fmt.Errorf("failed to start grpc server error: %w", err) + } + + return err +} + +func (r *alsReceiver) startGRPCServer(ctx context.Context, host component.Host) error { + r.settings.Logger.Info("Starting GRPC server", zap.String("endpoint", r.conf.NetAddr.Endpoint)) + listener, err := r.conf.NetAddr.Listen(ctx) + if err != nil { + return err + } + + go func() { + if errGRPC := r.serverGRPC.Serve(listener); !errors.Is(errGRPC, grpc.ErrServerStopped) && errGRPC != nil { + componentstatus.ReportStatus(host, componentstatus.NewFatalErrorEvent(errGRPC)) + } + }() + return nil +} + +func (r *alsReceiver) Shutdown(_ context.Context) error { + if r.serverGRPC != nil { + r.serverGRPC.GracefulStop() + } + + return nil +} + +func newALSReceiver(cfg *Config, nextConsumer consumer.Logs, settings receiver.Settings) (*alsReceiver, error) { + r := &alsReceiver{ + conf: cfg, + nextConsumer: nextConsumer, + settings: settings, + } + + var err error + r.obsrepGRPC, err = receiverhelper.NewObsReport(receiverhelper.ObsReportSettings{ + ReceiverID: settings.ID, + Transport: "grpc", + ReceiverCreateSettings: settings, + }) + if err != nil { + return nil, err + } + + return r, nil +} diff --git a/receiver/envoyalsreceiver/als_test.go b/receiver/envoyalsreceiver/als_test.go new file mode 100644 index 000000000000..ef11a6e2233c --- /dev/null +++ b/receiver/envoyalsreceiver/als_test.go @@ -0,0 +1,188 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package envoyalsreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/envoyalsreceiver" + +import ( + "context" + "testing" + "time" + + corev3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + alsdata "github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3" + alsv3 "github.com/envoyproxy/go-control-plane/envoy/service/accesslog/v3" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/configgrpc" + "go.opentelemetry.io/collector/config/confignet" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/receiver/receivertest" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/protobuf/types/known/timestamppb" + "google.golang.org/protobuf/types/known/wrapperspb" + + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/testutil" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest/plogtest" +) + +func startGRPCServer(t *testing.T) (*grpc.ClientConn, *consumertest.LogsSink) { + config := &Config{ + ServerConfig: configgrpc.ServerConfig{ + NetAddr: confignet.AddrConfig{ + Endpoint: testutil.GetAvailableLocalAddress(t), + Transport: confignet.TransportTypeTCP, + }, + }, + } + sink := new(consumertest.LogsSink) + + set := receivertest.NewNopSettings() + lr, err := newALSReceiver(config, sink, set) + require.NoError(t, err) + + require.NoError(t, lr.Start(context.Background(), componenttest.NewNopHost())) + t.Cleanup(func() { require.NoError(t, lr.Shutdown(context.Background())) }) + + conn, err := grpc.NewClient(config.NetAddr.Endpoint, grpc.WithTransportCredentials(insecure.NewCredentials())) + require.NoError(t, err) + return conn, sink +} + +func TestLogs(t *testing.T) { + // Start grpc server + conn, sink := startGRPCServer(t) + defer func() { + _ = conn.Close() + }() + + client, err := alsv3.NewAccessLogServiceClient(conn).StreamAccessLogs(context.Background()) + require.NoError(t, err) + + tm, err := time.Parse(time.RFC3339Nano, "2020-07-30T01:01:01.123456789Z") + require.NoError(t, err) + ts := int64(pcommon.NewTimestampFromTime(tm)) + + identifier := &alsv3.StreamAccessLogsMessage_Identifier{ + Node: &corev3.Node{ + Id: "test-id", + Cluster: "test-cluster", + }, + LogName: "test-log-name", + } + + httpLog := &alsdata.HTTPAccessLogEntry{ + CommonProperties: &alsdata.AccessLogCommon{ + StartTime: timestamppb.New(tm), + }, + Request: &alsdata.HTTPRequestProperties{ + Path: "/test", + Authority: "example.com", + }, + Response: &alsdata.HTTPResponseProperties{ + ResponseCode: wrapperspb.UInt32(200), + }, + } + + tcpLog := &alsdata.TCPAccessLogEntry{ + CommonProperties: &alsdata.AccessLogCommon{ + StartTime: timestamppb.New(tm), + }, + ConnectionProperties: &alsdata.ConnectionProperties{ + ReceivedBytes: 10, + SentBytes: 20, + }, + } + + tests := []struct { + name string + message *alsv3.StreamAccessLogsMessage + expected plog.Logs + }{ + { + name: "http", + message: &alsv3.StreamAccessLogsMessage{ + Identifier: identifier, + LogEntries: &alsv3.StreamAccessLogsMessage_HttpLogs{ + HttpLogs: &alsv3.StreamAccessLogsMessage_HTTPAccessLogEntries{ + LogEntry: []*alsdata.HTTPAccessLogEntry{ + httpLog, + }, + }, + }, + }, + expected: generateLogs([]Log{ + { + Timestamp: ts, + Attributes: map[string]any{ + "api_version": "v3", + "log_type": "http", + }, + Body: pcommon.NewValueStr(httpLog.String()), + }, + }), + }, + { + name: "tcp", + message: &alsv3.StreamAccessLogsMessage{ + Identifier: identifier, + LogEntries: &alsv3.StreamAccessLogsMessage_TcpLogs{ + TcpLogs: &alsv3.StreamAccessLogsMessage_TCPAccessLogEntries{ + LogEntry: []*alsdata.TCPAccessLogEntry{ + tcpLog, + }, + }, + }, + }, + expected: generateLogs([]Log{ + { + Timestamp: ts, + Attributes: map[string]any{ + "api_version": "v3", + "log_type": "tcp", + }, + Body: pcommon.NewValueStr(tcpLog.String()), + }, + }), + }, + } + + for i, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err = client.Send(tt.message) + require.NoError(t, err, "should not have failed to post logs") + + require.Eventually(t, func() bool { + gotLogs := sink.AllLogs() + + err := plogtest.CompareLogs(tt.expected, gotLogs[i], plogtest.IgnoreObservedTimestamp()) + if err == nil { + return true + } + t.Logf("Logs not received yet: %v", err) + return false + }, 5*time.Second, 100*time.Millisecond) + }) + } +} + +type Log struct { + Timestamp int64 + Body pcommon.Value + Attributes map[string]any +} + +func generateLogs(logs []Log) plog.Logs { + ld := plog.NewLogs() + logSlice := ld.ResourceLogs().AppendEmpty().ScopeLogs().AppendEmpty().LogRecords() + + for _, log := range logs { + lr := logSlice.AppendEmpty() + _ = lr.Attributes().FromRaw(log.Attributes) + lr.SetTimestamp(pcommon.Timestamp(log.Timestamp)) + lr.Body().SetStr(log.Body.AsString()) + } + return ld +} diff --git a/receiver/envoyalsreceiver/config.go b/receiver/envoyalsreceiver/config.go new file mode 100644 index 000000000000..57a183022723 --- /dev/null +++ b/receiver/envoyalsreceiver/config.go @@ -0,0 +1,15 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package envoyalsreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/envoyalsreceiver" + +import ( + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configgrpc" +) + +type Config struct { + configgrpc.ServerConfig `mapstructure:",squash"` +} + +var _ component.Config = (*Config)(nil) diff --git a/receiver/envoyalsreceiver/config_test.go b/receiver/envoyalsreceiver/config_test.go new file mode 100644 index 000000000000..c14d9d145d7e --- /dev/null +++ b/receiver/envoyalsreceiver/config_test.go @@ -0,0 +1,67 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package envoyalsreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/envoyalsreceiver" + +import ( + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configgrpc" + "go.opentelemetry.io/collector/config/confignet" + "go.opentelemetry.io/collector/confmap/confmaptest" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/envoyalsreceiver/internal/metadata" +) + +func TestLoadConfig(t *testing.T) { + t.Parallel() + + cm, err := confmaptest.LoadConf(filepath.Join("testdata", "config.yaml")) + require.NoError(t, err) + + tests := []struct { + id component.ID + expected component.Config + }{ + { + id: component.NewIDWithName(metadata.Type, "defaults"), + expected: &Config{ + ServerConfig: configgrpc.ServerConfig{ + NetAddr: confignet.AddrConfig{ + Endpoint: "localhost:19001", + Transport: confignet.TransportTypeTCP, + }, + }, + }, + }, + { + id: component.NewIDWithName(metadata.Type, "custom"), + expected: &Config{ + ServerConfig: configgrpc.ServerConfig{ + NetAddr: confignet.AddrConfig{ + Endpoint: "localhost:4600", + Transport: confignet.TransportTypeTCP, + }, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.id.String(), func(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + + sub, err := cm.Sub(tt.id.String()) + require.NoError(t, err) + require.NoError(t, sub.Unmarshal(cfg)) + + assert.NoError(t, component.ValidateConfig(cfg)) + assert.Equal(t, tt.expected, cfg) + }) + } +} diff --git a/receiver/envoyalsreceiver/doc.go b/receiver/envoyalsreceiver/doc.go new file mode 100644 index 000000000000..0b647b53c155 --- /dev/null +++ b/receiver/envoyalsreceiver/doc.go @@ -0,0 +1,6 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:generate mdatagen metadata.yaml + +package envoyalsreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/envoyalsreceiver" diff --git a/receiver/envoyalsreceiver/factory.go b/receiver/envoyalsreceiver/factory.go new file mode 100644 index 000000000000..bfbb2cee4a1f --- /dev/null +++ b/receiver/envoyalsreceiver/factory.go @@ -0,0 +1,49 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package envoyalsreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/envoyalsreceiver" + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configgrpc" + "go.opentelemetry.io/collector/config/confignet" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/receiver" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/envoyalsreceiver/internal/metadata" +) + +const ( + defaultGRPCEndpoint = "localhost:19001" +) + +// NewFactory creates a new ALS receiver factory. +func NewFactory() receiver.Factory { + return receiver.NewFactory( + metadata.Type, + createDefaultConfig, + receiver.WithLogs(newReceiver, metadata.LogsStability)) +} + +func createDefaultConfig() component.Config { + return &Config{ + ServerConfig: configgrpc.ServerConfig{ + NetAddr: confignet.AddrConfig{ + Endpoint: defaultGRPCEndpoint, + Transport: confignet.TransportTypeTCP, + }, + }, + } +} + +func newReceiver( + _ context.Context, + st receiver.Settings, + cfg component.Config, + consumer consumer.Logs, +) (receiver.Logs, error) { + alsCfg := cfg.(*Config) + return newALSReceiver(alsCfg, consumer, st) +} diff --git a/receiver/envoyalsreceiver/factory_test.go b/receiver/envoyalsreceiver/factory_test.go new file mode 100644 index 000000000000..7ba5941f65b8 --- /dev/null +++ b/receiver/envoyalsreceiver/factory_test.go @@ -0,0 +1,38 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package envoyalsreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/envoyalsreceiver" + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/configgrpc" + "go.opentelemetry.io/collector/config/confignet" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/receiver/receivertest" +) + +func TestCreateDefaultConfig(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + require.NotNil(t, cfg, "failed to create default config") + require.NoError(t, componenttest.CheckConfigStruct(cfg)) +} + +func TestCreateReceiver(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + cfg.(*Config).ServerConfig = configgrpc.ServerConfig{ + NetAddr: confignet.AddrConfig{ + Endpoint: defaultGRPCEndpoint, + Transport: confignet.TransportTypeTCP, + }, + } + set := receivertest.NewNopSettings() + receiver, err := factory.CreateLogs(context.Background(), set, cfg, consumertest.NewNop()) + require.NoError(t, err, "receiver creation failed") + require.NotNil(t, receiver, "receiver creation failed") +} diff --git a/receiver/envoyalsreceiver/generated_component_test.go b/receiver/envoyalsreceiver/generated_component_test.go new file mode 100644 index 000000000000..35150d9f1ce0 --- /dev/null +++ b/receiver/envoyalsreceiver/generated_component_test.go @@ -0,0 +1,69 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package envoyalsreceiver + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/confmap/confmaptest" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/receiver" + "go.opentelemetry.io/collector/receiver/receivertest" +) + +func TestComponentFactoryType(t *testing.T) { + require.Equal(t, "envoyals", NewFactory().Type().String()) +} + +func TestComponentConfigStruct(t *testing.T) { + require.NoError(t, componenttest.CheckConfigStruct(NewFactory().CreateDefaultConfig())) +} + +func TestComponentLifecycle(t *testing.T) { + factory := NewFactory() + + tests := []struct { + name string + createFn func(ctx context.Context, set receiver.Settings, cfg component.Config) (component.Component, error) + }{ + + { + name: "logs", + createFn: func(ctx context.Context, set receiver.Settings, cfg component.Config) (component.Component, error) { + return factory.CreateLogs(ctx, set, cfg, consumertest.NewNop()) + }, + }, + } + + cm, err := confmaptest.LoadConf("metadata.yaml") + require.NoError(t, err) + cfg := factory.CreateDefaultConfig() + sub, err := cm.Sub("tests::config") + require.NoError(t, err) + require.NoError(t, sub.Unmarshal(&cfg)) + + for _, tt := range tests { + t.Run(tt.name+"-shutdown", func(t *testing.T) { + c, err := tt.createFn(context.Background(), receivertest.NewNopSettings(), cfg) + require.NoError(t, err) + err = c.Shutdown(context.Background()) + require.NoError(t, err) + }) + t.Run(tt.name+"-lifecycle", func(t *testing.T) { + firstRcvr, err := tt.createFn(context.Background(), receivertest.NewNopSettings(), cfg) + require.NoError(t, err) + host := componenttest.NewNopHost() + require.NoError(t, err) + require.NoError(t, firstRcvr.Start(context.Background(), host)) + require.NoError(t, firstRcvr.Shutdown(context.Background())) + secondRcvr, err := tt.createFn(context.Background(), receivertest.NewNopSettings(), cfg) + require.NoError(t, err) + require.NoError(t, secondRcvr.Start(context.Background(), host)) + require.NoError(t, secondRcvr.Shutdown(context.Background())) + }) + } +} diff --git a/receiver/envoyalsreceiver/generated_package_test.go b/receiver/envoyalsreceiver/generated_package_test.go new file mode 100644 index 000000000000..2e965ab81961 --- /dev/null +++ b/receiver/envoyalsreceiver/generated_package_test.go @@ -0,0 +1,13 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package envoyalsreceiver + +import ( + "testing" + + "go.uber.org/goleak" +) + +func TestMain(m *testing.M) { + goleak.VerifyTestMain(m) +} diff --git a/receiver/envoyalsreceiver/go.mod b/receiver/envoyalsreceiver/go.mod new file mode 100644 index 000000000000..6030ed07c479 --- /dev/null +++ b/receiver/envoyalsreceiver/go.mod @@ -0,0 +1,88 @@ +module github.com/open-telemetry/opentelemetry-collector-contrib/receiver/envoyalsreceiver + +go 1.22.0 + +require ( + github.com/envoyproxy/go-control-plane v0.13.1 + github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.117.0 + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.117.0 + github.com/stretchr/testify v1.10.0 + go.opentelemetry.io/collector/component v0.118.1-0.20250121185328-fbefb22cc2b3 + go.opentelemetry.io/collector/component/componentstatus v0.118.1-0.20250121185328-fbefb22cc2b3 + go.opentelemetry.io/collector/component/componenttest v0.118.1-0.20250121185328-fbefb22cc2b3 + go.opentelemetry.io/collector/config/configgrpc v0.118.1-0.20250121185328-fbefb22cc2b3 + go.opentelemetry.io/collector/config/confignet v1.24.1-0.20250121185328-fbefb22cc2b3 + go.opentelemetry.io/collector/confmap v1.24.1-0.20250121185328-fbefb22cc2b3 + go.opentelemetry.io/collector/consumer v1.24.1-0.20250121185328-fbefb22cc2b3 + go.opentelemetry.io/collector/consumer/consumertest v0.118.1-0.20250121185328-fbefb22cc2b3 + go.opentelemetry.io/collector/pdata v1.24.1-0.20250121185328-fbefb22cc2b3 + go.opentelemetry.io/collector/receiver v0.118.1-0.20250121185328-fbefb22cc2b3 + go.opentelemetry.io/collector/receiver/receivertest v0.118.1-0.20250121185328-fbefb22cc2b3 + go.uber.org/goleak v1.3.0 + go.uber.org/zap v1.27.0 + google.golang.org/grpc v1.69.4 + google.golang.org/protobuf v1.36.3 +) + +require ( + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/envoyproxy/protoc-gen-validate v1.1.0 // indirect + github.com/fsnotify/fsnotify v1.8.0 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-viper/mapstructure/v2 v2.2.1 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/snappy v0.0.4 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/hashicorp/go-version v1.7.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.17.11 // indirect + github.com/knadh/koanf/maps v0.1.1 // indirect + github.com/knadh/koanf/providers/confmap v0.1.0 // indirect + github.com/knadh/koanf/v2 v2.1.2 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/mostynb/go-grpc-compression v1.2.3 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.118.0 // indirect + github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/collector/client v1.24.1-0.20250121185328-fbefb22cc2b3 // indirect + go.opentelemetry.io/collector/config/configauth v0.118.1-0.20250121185328-fbefb22cc2b3 // indirect + go.opentelemetry.io/collector/config/configcompression v1.24.1-0.20250121185328-fbefb22cc2b3 // indirect + go.opentelemetry.io/collector/config/configopaque v1.24.1-0.20250121185328-fbefb22cc2b3 // indirect + go.opentelemetry.io/collector/config/configtelemetry v0.118.1-0.20250121185328-fbefb22cc2b3 // indirect + go.opentelemetry.io/collector/config/configtls v1.24.1-0.20250121185328-fbefb22cc2b3 // indirect + go.opentelemetry.io/collector/consumer/consumererror v0.118.1-0.20250121185328-fbefb22cc2b3 // indirect + go.opentelemetry.io/collector/consumer/xconsumer v0.118.1-0.20250121185328-fbefb22cc2b3 // indirect + go.opentelemetry.io/collector/extension v0.118.1-0.20250121185328-fbefb22cc2b3 // indirect + go.opentelemetry.io/collector/extension/auth v0.118.1-0.20250121185328-fbefb22cc2b3 // indirect + go.opentelemetry.io/collector/featuregate v1.24.1-0.20250121185328-fbefb22cc2b3 // indirect + go.opentelemetry.io/collector/pdata/pprofile v0.118.1-0.20250121185328-fbefb22cc2b3 // indirect + go.opentelemetry.io/collector/pipeline v0.118.1-0.20250121185328-fbefb22cc2b3 // indirect + go.opentelemetry.io/collector/receiver/xreceiver v0.118.1-0.20250121185328-fbefb22cc2b3 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 // indirect + go.opentelemetry.io/otel v1.34.0 // indirect + go.opentelemetry.io/otel/metric v1.34.0 // indirect + go.opentelemetry.io/otel/sdk v1.34.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.34.0 // indirect + go.opentelemetry.io/otel/trace v1.34.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/net v0.34.0 // indirect + golang.org/x/sys v0.29.0 // indirect + golang.org/x/text v0.21.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) + +replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest => ../../pkg/pdatatest + +replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden => ../../pkg/golden + +replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/common => ../../internal/common + +replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil => ../../pkg/pdatautil diff --git a/receiver/envoyalsreceiver/go.sum b/receiver/envoyalsreceiver/go.sum new file mode 100644 index 000000000000..b889a68270a8 --- /dev/null +++ b/receiver/envoyalsreceiver/go.sum @@ -0,0 +1,188 @@ +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 h1:QVw89YDxXxEe+l8gU8ETbOasdwEV+avkR75ZzsVV9WI= +github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.13.1 h1:vPfJZCkob6yTMEgS+0TwfTUfbHjfy/6vOJ8hUWX/uXE= +github.com/envoyproxy/go-control-plane v0.13.1/go.mod h1:X45hY0mufo6Fd0KW3rqsGvQMw58jvjymeCzBU3mWyHw= +github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM= +github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= +github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= +github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss= +github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= +github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= +github.com/knadh/koanf/maps v0.1.1 h1:G5TjmUh2D7G2YWf5SQQqSiHRJEjaicvU0KpypqB3NIs= +github.com/knadh/koanf/maps v0.1.1/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI= +github.com/knadh/koanf/providers/confmap v0.1.0 h1:gOkxhHkemwG4LezxxN8DMOFopOPghxRVp7JbIvdvqzU= +github.com/knadh/koanf/providers/confmap v0.1.0/go.mod h1:2uLhxQzJnyHKfxG927awZC7+fyHFdQkd697K4MdLnIU= +github.com/knadh/koanf/v2 v2.1.2 h1:I2rtLRqXRy1p01m/utEtpZSSA6dcJbgGVuE27kW2PzQ= +github.com/knadh/koanf/v2 v2.1.2/go.mod h1:Gphfaen0q1Fc1HTgJgSTC4oRX9R2R5ErYMZJy8fLJBo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/mostynb/go-grpc-compression v1.2.3 h1:42/BKWMy0KEJGSdWvzqIyOZ95YcR9mLPqKctH7Uo//I= +github.com/mostynb/go-grpc-compression v1.2.3/go.mod h1:AghIxF3P57umzqM9yz795+y1Vjs47Km/Y2FE6ouQ7Lg= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/collector/client v1.24.1-0.20250121185328-fbefb22cc2b3 h1:MxzfNtItYodclGVQDLzdyBaKixbqEKC2sPGxTiY0uEE= +go.opentelemetry.io/collector/client v1.24.1-0.20250121185328-fbefb22cc2b3/go.mod h1:I5195HMWPseUSVEbNaEgMbz8rzx11T59I2YIkJQ2jrE= +go.opentelemetry.io/collector/component v0.118.1-0.20250121185328-fbefb22cc2b3 h1:ODfDW9siyGYEvEv1+oKf0abnpYbIsMwAlXuZMCUFPXw= +go.opentelemetry.io/collector/component v0.118.1-0.20250121185328-fbefb22cc2b3/go.mod h1:/fqrkzmOXsqm4boZaVtxi5YIz39/i8K8Wqd9oryz8Iw= +go.opentelemetry.io/collector/component/componentstatus v0.118.1-0.20250121185328-fbefb22cc2b3 h1:anyK0wvAeTO3QpO2AvVGXaN7t9K/CWQXSHii+0Ygr8o= +go.opentelemetry.io/collector/component/componentstatus v0.118.1-0.20250121185328-fbefb22cc2b3/go.mod h1:V84x0j/NyHPJciFJ5R8DrJrTOgkYFkyXTh7TXQYvol4= +go.opentelemetry.io/collector/component/componenttest v0.118.1-0.20250121185328-fbefb22cc2b3 h1:ZnCUlmJ6ZqG+pL1fYrEXmg2FG+RxiSay5Fyxa0i79dY= +go.opentelemetry.io/collector/component/componenttest v0.118.1-0.20250121185328-fbefb22cc2b3/go.mod h1:eug78n4rxt5hdCSDWZ50wpYZXAl0ho/w6IsNtVZzQII= +go.opentelemetry.io/collector/config/configauth v0.118.1-0.20250121185328-fbefb22cc2b3 h1:FrH9pOMBYyhYnMCeINzeeWeT/RdcUHUnpGWooak4apM= +go.opentelemetry.io/collector/config/configauth v0.118.1-0.20250121185328-fbefb22cc2b3/go.mod h1:4w14UrrB+We1k+gt3/3+34SWKLKQdGDPQ/lpsL0tiHc= +go.opentelemetry.io/collector/config/configcompression v1.24.1-0.20250121185328-fbefb22cc2b3 h1:dJzzLwFqU/j3VHoaJetgUlPOzrZPtg9zUGhKVsM9WUo= +go.opentelemetry.io/collector/config/configcompression v1.24.1-0.20250121185328-fbefb22cc2b3/go.mod h1:LvYG00tbPTv0NOLoZN0wXq1F5thcxvukO8INq7xyfWU= +go.opentelemetry.io/collector/config/configgrpc v0.118.1-0.20250121185328-fbefb22cc2b3 h1:tptVdunGC+0y1KmEYvmgmLRR8Jam4y1KtfYRVoyLw5U= +go.opentelemetry.io/collector/config/configgrpc v0.118.1-0.20250121185328-fbefb22cc2b3/go.mod h1:R//tIJknJigDNZhuDmKiUpPrgCZ79HPKVdq0Jub3fkw= +go.opentelemetry.io/collector/config/confignet v1.24.1-0.20250121185328-fbefb22cc2b3 h1:z2wSQoQlbMfqEguwKl2NFqD3dhT9wIeRENZmadadvmg= +go.opentelemetry.io/collector/config/confignet v1.24.1-0.20250121185328-fbefb22cc2b3/go.mod h1:ZppUH1hgUJOubawEsxsQ9MzEYFytqo2GnVSS7d4CVxc= +go.opentelemetry.io/collector/config/configopaque v1.24.1-0.20250121185328-fbefb22cc2b3 h1:Oi9hXd7YIf3wa4F9SXeKwYyOkB+DRhfZgHjs44Z6jyQ= +go.opentelemetry.io/collector/config/configopaque v1.24.1-0.20250121185328-fbefb22cc2b3/go.mod h1:sW0t0iI/VfRL9VYX7Ik6XzVgPcR+Y5kejTLsYcMyDWs= +go.opentelemetry.io/collector/config/configtelemetry v0.118.1-0.20250121185328-fbefb22cc2b3 h1:AOaJFxyz+7Zlh2AbZd7vu2gYA5a4rSItbwAS7GYAaO4= +go.opentelemetry.io/collector/config/configtelemetry v0.118.1-0.20250121185328-fbefb22cc2b3/go.mod h1:SlBEwQg0qly75rXZ6W1Ig8jN25KBVBkFIIAUI1GiAAE= +go.opentelemetry.io/collector/config/configtls v1.24.1-0.20250121185328-fbefb22cc2b3 h1:zeC8GoDbDxtUbEvp8sPCXONuMxqWQPowXEzUZySxSgA= +go.opentelemetry.io/collector/config/configtls v1.24.1-0.20250121185328-fbefb22cc2b3/go.mod h1:d0OdfkbuYEMYDBJLSbpH0wPI29lmSiFT3geqh/ygF2k= +go.opentelemetry.io/collector/confmap v1.24.1-0.20250121185328-fbefb22cc2b3 h1:bYJCjMGjEi0hFpVsdkg20ri5ZGhG7VfrlPjdW7FhclI= +go.opentelemetry.io/collector/confmap v1.24.1-0.20250121185328-fbefb22cc2b3/go.mod h1:Rrhs+MWoaP6AswZp+ReQ2VO9dfOfcUjdjiSHBsG+nec= +go.opentelemetry.io/collector/consumer v1.24.1-0.20250121185328-fbefb22cc2b3 h1:rMGS7YpPjLWbykAQNoBZhTZ8OONKSmnewCFggZXMPmg= +go.opentelemetry.io/collector/consumer v1.24.1-0.20250121185328-fbefb22cc2b3/go.mod h1:YyTWeyBUYlVi983ylJAY5qHnCajq67on3A59OpS6A/I= +go.opentelemetry.io/collector/consumer/consumererror v0.118.1-0.20250121185328-fbefb22cc2b3 h1:wVb72DufdN0fQoScGeK7ByM5GTf0BkdTA4ZtKOQg+RI= +go.opentelemetry.io/collector/consumer/consumererror v0.118.1-0.20250121185328-fbefb22cc2b3/go.mod h1:/fhqEIxH0hmnDa6zm38XzsdURr5GrlC9oKO70JVorHU= +go.opentelemetry.io/collector/consumer/consumertest v0.118.1-0.20250121185328-fbefb22cc2b3 h1:sQKFJz7EYn9e9KsgVNjnLsONuc4w3uUo2+YzM8C2jtE= +go.opentelemetry.io/collector/consumer/consumertest v0.118.1-0.20250121185328-fbefb22cc2b3/go.mod h1:fOVRcSFNghbaDpTJtTVHvFEQHeAAW8WEX0dYWbPpgBc= +go.opentelemetry.io/collector/consumer/xconsumer v0.118.1-0.20250121185328-fbefb22cc2b3 h1:HCyq06lz8dtWHhcKCd5BuhZBu6USgjBEuHyYhBuiw54= +go.opentelemetry.io/collector/consumer/xconsumer v0.118.1-0.20250121185328-fbefb22cc2b3/go.mod h1:Ij9o9d7hZb4be6ql6yqMR7xy5fcFR0SSD6RRIYWlu88= +go.opentelemetry.io/collector/extension v0.118.1-0.20250121185328-fbefb22cc2b3 h1:pigm8Nxub1OMInnkdu9U/Gqm0GuWmYgVUiRa0WuJmo0= +go.opentelemetry.io/collector/extension v0.118.1-0.20250121185328-fbefb22cc2b3/go.mod h1:7yUjnhGc/ota8nhFdLdP3trrYFx3jqtq7NAV+i04eJw= +go.opentelemetry.io/collector/extension/auth v0.118.1-0.20250121185328-fbefb22cc2b3 h1:ENw3837wlS/3iSu0BIyUNjDIQAstkdBiTaCixj6yzrA= +go.opentelemetry.io/collector/extension/auth v0.118.1-0.20250121185328-fbefb22cc2b3/go.mod h1:cs4Er00Asntjw7aPHRVQDvvtMzppKjRgMECa89b86AE= +go.opentelemetry.io/collector/extension/auth/authtest v0.118.0 h1:KIORXNc71vfpQrrZOntiZesRCZtQ8alrASWVT/zZkyo= +go.opentelemetry.io/collector/extension/auth/authtest v0.118.0/go.mod h1:0ZlSP9NPAfTRQd6Tx4mOH0IWrp6ufHaVN//L9Mb87gM= +go.opentelemetry.io/collector/featuregate v1.24.1-0.20250121185328-fbefb22cc2b3 h1:bTrqWcaRulXfpSQwnWrGlCsN4ZO5wzD931vH2E28Vc4= +go.opentelemetry.io/collector/featuregate v1.24.1-0.20250121185328-fbefb22cc2b3/go.mod h1:3GaXqflNDVwWndNGBJ1+XJFy3Fv/XrFgjMN60N3z7yg= +go.opentelemetry.io/collector/pdata v1.24.1-0.20250121185328-fbefb22cc2b3 h1:GXjNTD7hyz2Qwuu5uwLYeJTkWECWL6eL41w/JrQIJrU= +go.opentelemetry.io/collector/pdata v1.24.1-0.20250121185328-fbefb22cc2b3/go.mod h1:6lE9r5x41Z9GyvTSBetXSHRikhiZZK5ApmFtX35ZbXc= +go.opentelemetry.io/collector/pdata/pprofile v0.118.1-0.20250121185328-fbefb22cc2b3 h1:INViss+PcyyzYe/ZFHHFr/h+Mmo7n94nSzdmp68gBqI= +go.opentelemetry.io/collector/pdata/pprofile v0.118.1-0.20250121185328-fbefb22cc2b3/go.mod h1:iD66/nCk+xHh4q/1FBcYBQTEZKZuejggZBkm14/cobA= +go.opentelemetry.io/collector/pdata/testdata v0.118.0 h1:5N0w1SX9KIRkwvtkrpzQgXy9eGk3vfNG0ds6mhEPMIM= +go.opentelemetry.io/collector/pdata/testdata v0.118.0/go.mod h1:UY+GHV5bOC1BnFburOZ0wiHReJj1XbW12mi2Ogbc5Lw= +go.opentelemetry.io/collector/pipeline v0.118.1-0.20250121185328-fbefb22cc2b3 h1:uXvVXIkbxeEJa9L+xM7b5+2Y/LjfGKX65fQdRfW5+PQ= +go.opentelemetry.io/collector/pipeline v0.118.1-0.20250121185328-fbefb22cc2b3/go.mod h1:qE3DmoB05AW0C3lmPvdxZqd/H4po84NPzd5MrqgtL74= +go.opentelemetry.io/collector/receiver v0.118.1-0.20250121185328-fbefb22cc2b3 h1:uP/22oV69zYMWFdeCQHlSpVC22UZWmZsHgcdFDW89eo= +go.opentelemetry.io/collector/receiver v0.118.1-0.20250121185328-fbefb22cc2b3/go.mod h1:ycM9v5g4GvMspTtQbwLvmLOv4djo/bVw4RefJreGGaY= +go.opentelemetry.io/collector/receiver/receivertest v0.118.1-0.20250121185328-fbefb22cc2b3 h1:i9gXuyWdAXD+NVaGJbPnY4q+u5RwkOb/NSBnv1+IAMw= +go.opentelemetry.io/collector/receiver/receivertest v0.118.1-0.20250121185328-fbefb22cc2b3/go.mod h1:x9N91YI3onF0+enjYegcHYOb50Of2xO05c8EyE/baJ0= +go.opentelemetry.io/collector/receiver/xreceiver v0.118.1-0.20250121185328-fbefb22cc2b3 h1:lSOxA/PFNKwCCf0bYwOkTtvYn4Ch4QADFVJU/kuye08= +go.opentelemetry.io/collector/receiver/xreceiver v0.118.1-0.20250121185328-fbefb22cc2b3/go.mod h1:WLPXXIuodY7quBgqCz3OIsPNdBMLDej5nUIbiyyfoUc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 h1:yMkBS9yViCc7U7yeLzJPM2XizlfdVvBRSmsQDWu6qc0= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0/go.mod h1:n8MR6/liuGB5EmTETUBeU5ZgqMOlqKRxUaqPQBOANZ8= +go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY= +go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= +go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ= +go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= +go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A= +go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= +go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk= +go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w= +go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= +go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:XVhgTWWV3kGQlwJHR3upFWZeTsei6Oks1apkZSeonIE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A= +google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/receiver/envoyalsreceiver/internal/als/server.go b/receiver/envoyalsreceiver/internal/als/server.go new file mode 100644 index 000000000000..7928bd6bf5c7 --- /dev/null +++ b/receiver/envoyalsreceiver/internal/als/server.go @@ -0,0 +1,89 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package als // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/envoyalsreceiver/internal/als" + +import ( + "context" + "errors" + "io" + + alsv3 "github.com/envoyproxy/go-control-plane/envoy/service/accesslog/v3" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/receiver/receiverhelper" +) + +const ( + apiVersionAttr = "api_version" + apiVersionVal = "v3" + logTypeAttr = "log_type" + httpTypeVal = "http" + tcpTypeVal = "tcp" +) + +type Server struct { + nextConsumer consumer.Logs + obsrep *receiverhelper.ObsReport +} + +func New(nextConsumer consumer.Logs, obsrep *receiverhelper.ObsReport) *Server { + return &Server{ + nextConsumer: nextConsumer, + obsrep: obsrep, + } +} + +func (s *Server) StreamAccessLogs(logStream alsv3.AccessLogService_StreamAccessLogsServer) error { + for { + data, err := logStream.Recv() + if errors.Is(err, io.EOF) { + break + } + if err != nil { + return err + } + + ctx := s.obsrep.StartLogsOp(context.Background()) + logs := toLogs(data) + logRecordCount := logs.LogRecordCount() + err = s.nextConsumer.ConsumeLogs(ctx, logs) + s.obsrep.EndLogsOp(ctx, "protobuf", logRecordCount, err) + if err != nil { + return err + } + } + + return nil +} + +func toLogs(data *alsv3.StreamAccessLogsMessage) plog.Logs { + logs := plog.NewLogs() + + rls := logs.ResourceLogs().AppendEmpty() + logSlice := rls.ScopeLogs().AppendEmpty().LogRecords() + + httpLogs := data.GetHttpLogs() + if httpLogs != nil { + for _, httpLog := range httpLogs.LogEntry { + lr := logSlice.AppendEmpty() + lr.SetTimestamp(pcommon.NewTimestampFromTime(httpLog.CommonProperties.StartTime.AsTime())) + lr.Attributes().PutStr(apiVersionAttr, apiVersionVal) + lr.Attributes().PutStr(logTypeAttr, httpTypeVal) + lr.Body().SetStr(httpLog.String()) + } + } + + tcpLogs := data.GetTcpLogs() + if tcpLogs != nil { + for _, tcpLog := range tcpLogs.LogEntry { + lr := logSlice.AppendEmpty() + lr.SetTimestamp(pcommon.NewTimestampFromTime(tcpLog.CommonProperties.StartTime.AsTime())) + lr.Attributes().PutStr(apiVersionAttr, apiVersionVal) + lr.Attributes().PutStr(logTypeAttr, tcpTypeVal) + lr.Body().SetStr(tcpLog.String()) + } + } + return logs +} diff --git a/receiver/envoyalsreceiver/internal/metadata/generated_status.go b/receiver/envoyalsreceiver/internal/metadata/generated_status.go new file mode 100644 index 000000000000..68a29876bc76 --- /dev/null +++ b/receiver/envoyalsreceiver/internal/metadata/generated_status.go @@ -0,0 +1,16 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "go.opentelemetry.io/collector/component" +) + +var ( + Type = component.MustNewType("envoyals") + ScopeName = "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/envoyalsreceiver" +) + +const ( + LogsStability = component.StabilityLevelDevelopment +) diff --git a/receiver/envoyalsreceiver/metadata.yaml b/receiver/envoyalsreceiver/metadata.yaml new file mode 100644 index 000000000000..cb9d5cc1a140 --- /dev/null +++ b/receiver/envoyalsreceiver/metadata.yaml @@ -0,0 +1,9 @@ +type: envoyals + +status: + class: receiver + stability: + development: [logs] + distributions: [] + codeowners: + active: [evan-bradley] diff --git a/receiver/envoyalsreceiver/testdata/config.yaml b/receiver/envoyalsreceiver/testdata/config.yaml new file mode 100644 index 000000000000..4883c58986ae --- /dev/null +++ b/receiver/envoyalsreceiver/testdata/config.yaml @@ -0,0 +1,5 @@ +# The following demonstrates how to enable protocols with defaults. +envoyals/defaults: + +envoyals/custom: + endpoint: localhost:4600 diff --git a/versions.yaml b/versions.yaml index 802e23d45cfe..2f15b1d7eb99 100644 --- a/versions.yaml +++ b/versions.yaml @@ -217,6 +217,7 @@ module-sets: - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/datadogreceiver - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/dockerstatsreceiver - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/elasticsearchreceiver + - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/envoyalsreceiver - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/expvarreceiver - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filestatsreceiver From 39816b7f76956c8f6502ddfb5d54d50520325a54 Mon Sep 17 00:00:00 2001 From: Paulo Janotti Date: Thu, 23 Jan 2025 07:03:27 -0800 Subject: [PATCH 11/12] Fix flaky prom remote write exporter concurrency test (#37430) Fix #37104 This is more an artifact of the test firing an unbounded number of go routines each one making its own HTTP request. Although keepalive is enabled by default the code ends up not re-using many of the connections causing the many connections to end up in a TIME_WAIT state. In order to avoid this the test now limits the number of concurrent requests and has a small change to the actual code to facilitate re-use of existing TCP connections used by the HTTP client. Although there is a change to non-test code I don't consider this a bug worth changelog because no user of the component should reach such high burst of "push metrics" in any reasonable production scenario. --- .../prometheusremotewriteexporter/exporter.go | 5 ++++- .../exporter_concurrency_test.go | 16 ++++++++++------ 2 files changed, 14 insertions(+), 7 deletions(-) diff --git a/exporter/prometheusremotewriteexporter/exporter.go b/exporter/prometheusremotewriteexporter/exporter.go index 6ad7d31981c0..6829bb57201c 100644 --- a/exporter/prometheusremotewriteexporter/exporter.go +++ b/exporter/prometheusremotewriteexporter/exporter.go @@ -353,7 +353,10 @@ func (prwe *prwExporter) execute(ctx context.Context, writeReq *prompb.WriteRequ if err != nil { return err } - defer resp.Body.Close() + defer func() { + _, _ = io.Copy(io.Discard, resp.Body) + resp.Body.Close() + }() // 2xx status code is considered a success // 5xx errors are recoverable and the exporter should retry diff --git a/exporter/prometheusremotewriteexporter/exporter_concurrency_test.go b/exporter/prometheusremotewriteexporter/exporter_concurrency_test.go index bf9fcbd968cc..5369b201e526 100644 --- a/exporter/prometheusremotewriteexporter/exporter_concurrency_test.go +++ b/exporter/prometheusremotewriteexporter/exporter_concurrency_test.go @@ -8,7 +8,7 @@ import ( "io" "net/http" "net/http/httptest" - "os" + "runtime" "strconv" "sync" "testing" @@ -32,9 +32,6 @@ import ( // Test everything works when there is more than one goroutine calling PushMetrics. // Today we only use 1 worker per exporter, but the intention of this test is to future-proof in case it changes. func Test_PushMetricsConcurrent(t *testing.T) { - if os.Getenv("ImageOs") == "win25" && os.Getenv("GITHUB_ACTIONS") == "true" { - t.Skip("Skipping test on Windows 2025 GH runners, see https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/37104") - } n := 1000 ms := make([]pmetric.Metrics, n) testIDKey := "test_id" @@ -137,15 +134,22 @@ func Test_PushMetricsConcurrent(t *testing.T) { resp, checkRequestErr := http.Get(server.URL) require.NoError(c, checkRequestErr) assert.NoError(c, resp.Body.Close()) - }, 5*time.Second, 100*time.Millisecond) + }, 15*time.Second, 100*time.Millisecond) var wg sync.WaitGroup wg.Add(n) + maxConcurrentGoroutines := runtime.NumCPU() * 4 + semaphore := make(chan struct{}, maxConcurrentGoroutines) for _, m := range ms { + semaphore <- struct{}{} go func() { + defer func() { + <-semaphore + wg.Done() + }() + err := prwe.PushMetrics(ctx, m) assert.NoError(t, err) - wg.Done() }() } wg.Wait() From 4542bbf97e7b0c10949852b50c96004d2b60846d Mon Sep 17 00:00:00 2001 From: Mauri de Souza Meneguzzo Date: Thu, 23 Jan 2025 12:14:52 -0300 Subject: [PATCH 12/12] [elasticsearchexporter]: Add dynamic document id support for logs (#37065) #### Description This PR adds a new config option `logs_dynamic_id` that when set to true reads the `elasticsearch.document_id` attribute from each log record and uses it as the final document id in Elasticsearch. This is only implemented for logs but I can open subsequent PRs supporting metrics and traces akin to the `*_dynamic_index` options. Fixes #36882 #### Testing Added tests to verify that the document ID attribute can be read from the log record and that the _id is properly forwarded to Elasticsearch. Also asserted that when there is no doc id attribute the current behavior is retained. #### Documentation Updated the readme to mention the new `logs_dynamic_id` config option. --------- Co-authored-by: Carson Ip Co-authored-by: Christos Markou --- ...elasticsearchexporter_logs_dynamic_id.yaml | 27 ++++++ exporter/elasticsearchexporter/README.md | 3 + exporter/elasticsearchexporter/bulkindexer.go | 10 ++- .../elasticsearchexporter/bulkindexer_test.go | 8 +- exporter/elasticsearchexporter/config.go | 7 ++ exporter/elasticsearchexporter/config_test.go | 9 ++ exporter/elasticsearchexporter/exporter.go | 27 +++++- .../elasticsearchexporter/exporter_test.go | 87 +++++++++++++++++++ exporter/elasticsearchexporter/factory.go | 3 + .../elasticsearchexporter/pdata_serializer.go | 2 +- .../pdata_serializer_test.go | 1 + 11 files changed, 171 insertions(+), 13 deletions(-) create mode 100644 .chloggen/elasticsearchexporter_logs_dynamic_id.yaml diff --git a/.chloggen/elasticsearchexporter_logs_dynamic_id.yaml b/.chloggen/elasticsearchexporter_logs_dynamic_id.yaml new file mode 100644 index 000000000000..84867eac2a07 --- /dev/null +++ b/.chloggen/elasticsearchexporter_logs_dynamic_id.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: elasticsearchexporter + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Add config `logs_dynamic_id` to dynamically set the document ID of log records using log record attribute `elasticsearch.document_id` + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [36882] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/exporter/elasticsearchexporter/README.md b/exporter/elasticsearchexporter/README.md index aa8bb6518ccd..d78d5cba8421 100644 --- a/exporter/elasticsearchexporter/README.md +++ b/exporter/elasticsearchexporter/README.md @@ -142,6 +142,9 @@ This can be customised through the following settings: - `prefix_separator`(default=`-`): Set a separator between logstash_prefix and date. - `date_format`(default=`%Y.%m.%d`): Time format (based on strftime) to generate the second part of the Index name. +- `logs_dynamic_id` (optional): Dynamically determines the document ID to be used in Elasticsearch based on a log record attribute. + - `enabled`(default=false): Enable/Disable dynamic ID for log records. If `elasticsearch.document_id` exists and is not an empty string in the log record attributes, it will be used as the document ID. Otherwise, the document ID will be generated by Elasticsearch. The attribute `elasticsearch.document_id` is removed from the final document. + ### Elasticsearch document mapping The Elasticsearch exporter supports several document schemas and preprocessing diff --git a/exporter/elasticsearchexporter/bulkindexer.go b/exporter/elasticsearchexporter/bulkindexer.go index 2200216be4ef..ded879d3a036 100644 --- a/exporter/elasticsearchexporter/bulkindexer.go +++ b/exporter/elasticsearchexporter/bulkindexer.go @@ -31,7 +31,7 @@ type bulkIndexer interface { type bulkIndexerSession interface { // Add adds a document to the bulk indexing session. - Add(ctx context.Context, index string, document io.WriterTo, dynamicTemplates map[string]string) error + Add(ctx context.Context, index string, docID string, document io.WriterTo, dynamicTemplates map[string]string) error // End must be called on the session object once it is no longer // needed, in order to release any associated resources. @@ -126,8 +126,9 @@ type syncBulkIndexerSession struct { } // Add adds an item to the sync bulk indexer session. -func (s *syncBulkIndexerSession) Add(ctx context.Context, index string, document io.WriterTo, dynamicTemplates map[string]string) error { - err := s.bi.Add(docappender.BulkIndexerItem{Index: index, Body: document, DynamicTemplates: dynamicTemplates}) +func (s *syncBulkIndexerSession) Add(ctx context.Context, index string, docID string, document io.WriterTo, dynamicTemplates map[string]string) error { + doc := docappender.BulkIndexerItem{Index: index, Body: document, DocumentID: docID, DynamicTemplates: dynamicTemplates} + err := s.bi.Add(doc) if err != nil { return err } @@ -248,10 +249,11 @@ func (a *asyncBulkIndexer) Close(ctx context.Context) error { // Add adds an item to the async bulk indexer session. // // Adding an item after a call to Close() will panic. -func (s asyncBulkIndexerSession) Add(ctx context.Context, index string, document io.WriterTo, dynamicTemplates map[string]string) error { +func (s asyncBulkIndexerSession) Add(ctx context.Context, index string, docID string, document io.WriterTo, dynamicTemplates map[string]string) error { item := docappender.BulkIndexerItem{ Index: index, Body: document, + DocumentID: docID, DynamicTemplates: dynamicTemplates, } select { diff --git a/exporter/elasticsearchexporter/bulkindexer_test.go b/exporter/elasticsearchexporter/bulkindexer_test.go index 2b3d86a30128..9f2139e83710 100644 --- a/exporter/elasticsearchexporter/bulkindexer_test.go +++ b/exporter/elasticsearchexporter/bulkindexer_test.go @@ -102,7 +102,7 @@ func TestAsyncBulkIndexer_flush(t *testing.T) { session, err := bulkIndexer.StartSession(context.Background()) require.NoError(t, err) - assert.NoError(t, session.Add(context.Background(), "foo", strings.NewReader(`{"foo": "bar"}`), nil)) + assert.NoError(t, session.Add(context.Background(), "foo", "", strings.NewReader(`{"foo": "bar"}`), nil)) // should flush time.Sleep(100 * time.Millisecond) assert.Equal(t, int64(1), bulkIndexer.stats.docsIndexed.Load()) @@ -229,7 +229,7 @@ func TestAsyncBulkIndexer_flush_error(t *testing.T) { session, err := bulkIndexer.StartSession(context.Background()) require.NoError(t, err) - assert.NoError(t, session.Add(context.Background(), "foo", strings.NewReader(`{"foo": "bar"}`), nil)) + assert.NoError(t, session.Add(context.Background(), "foo", "", strings.NewReader(`{"foo": "bar"}`), nil)) // should flush time.Sleep(100 * time.Millisecond) assert.Equal(t, int64(0), bulkIndexer.stats.docsIndexed.Load()) @@ -312,7 +312,7 @@ func runBulkIndexerOnce(t *testing.T, config *Config, client *elasticsearch.Clie session, err := bulkIndexer.StartSession(context.Background()) require.NoError(t, err) - assert.NoError(t, session.Add(context.Background(), "foo", strings.NewReader(`{"foo": "bar"}`), nil)) + assert.NoError(t, session.Add(context.Background(), "foo", "", strings.NewReader(`{"foo": "bar"}`), nil)) assert.NoError(t, bulkIndexer.Close(context.Background())) return bulkIndexer @@ -338,7 +338,7 @@ func TestSyncBulkIndexer_flushBytes(t *testing.T) { session, err := bi.StartSession(context.Background()) require.NoError(t, err) - assert.NoError(t, session.Add(context.Background(), "foo", strings.NewReader(`{"foo": "bar"}`), nil)) + assert.NoError(t, session.Add(context.Background(), "foo", "", strings.NewReader(`{"foo": "bar"}`), nil)) assert.Equal(t, int64(1), reqCnt.Load()) // flush due to flush::bytes assert.NoError(t, bi.Close(context.Background())) } diff --git a/exporter/elasticsearchexporter/config.go b/exporter/elasticsearchexporter/config.go index 40acdaf99497..bd246a398b8b 100644 --- a/exporter/elasticsearchexporter/config.go +++ b/exporter/elasticsearchexporter/config.go @@ -53,6 +53,9 @@ type Config struct { // fall back to pure TracesIndex, if 'elasticsearch.index.prefix' or 'elasticsearch.index.suffix' are not found in resource or attribute (prio: resource > attribute) TracesDynamicIndex DynamicIndexSetting `mapstructure:"traces_dynamic_index"` + // LogsDynamicID configures whether log record attribute `elasticsearch.document_id` is set as the document ID in ES. + LogsDynamicID DynamicIDSettings `mapstructure:"logs_dynamic_id"` + // Pipeline configures the ingest node pipeline name that should be used to process the // events. // @@ -112,6 +115,10 @@ type DynamicIndexSetting struct { Enabled bool `mapstructure:"enabled"` } +type DynamicIDSettings struct { + Enabled bool `mapstructure:"enabled"` +} + // AuthenticationSettings defines user authentication related settings. type AuthenticationSettings struct { // User is used to configure HTTP Basic Authentication. diff --git a/exporter/elasticsearchexporter/config_test.go b/exporter/elasticsearchexporter/config_test.go index 153001b149e2..51d3955ebbd7 100644 --- a/exporter/elasticsearchexporter/config_test.go +++ b/exporter/elasticsearchexporter/config_test.go @@ -73,6 +73,9 @@ func TestConfig(t *testing.T) { TracesDynamicIndex: DynamicIndexSetting{ Enabled: false, }, + LogsDynamicID: DynamicIDSettings{ + Enabled: false, + }, Pipeline: "mypipeline", ClientConfig: withDefaultHTTPClientConfig(func(cfg *confighttp.ClientConfig) { cfg.Timeout = 2 * time.Minute @@ -144,6 +147,9 @@ func TestConfig(t *testing.T) { TracesDynamicIndex: DynamicIndexSetting{ Enabled: false, }, + LogsDynamicID: DynamicIDSettings{ + Enabled: false, + }, Pipeline: "mypipeline", ClientConfig: withDefaultHTTPClientConfig(func(cfg *confighttp.ClientConfig) { cfg.Timeout = 2 * time.Minute @@ -215,6 +221,9 @@ func TestConfig(t *testing.T) { TracesDynamicIndex: DynamicIndexSetting{ Enabled: false, }, + LogsDynamicID: DynamicIDSettings{ + Enabled: false, + }, Pipeline: "mypipeline", ClientConfig: withDefaultHTTPClientConfig(func(cfg *confighttp.ClientConfig) { cfg.Timeout = 2 * time.Minute diff --git a/exporter/elasticsearchexporter/exporter.go b/exporter/elasticsearchexporter/exporter.go index b13d1336b94d..52bb13a599e3 100644 --- a/exporter/elasticsearchexporter/exporter.go +++ b/exporter/elasticsearchexporter/exporter.go @@ -22,6 +22,11 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/elasticsearchexporter/internal/pool" ) +const ( + // documentIDAttributeName is the attribute name used to specify the document ID. + documentIDAttributeName = "elasticsearch.document_id" +) + type elasticsearchExporter struct { component.TelemetrySettings userAgent string @@ -176,13 +181,15 @@ func (e *elasticsearchExporter) pushLogRecord( } buf := e.bufferPool.NewPooledBuffer() + docID := e.extractDocumentIDAttribute(record.Attributes()) err := e.model.encodeLog(resource, resourceSchemaURL, record, scope, scopeSchemaURL, fIndex, buf.Buffer) if err != nil { buf.Recycle() return fmt.Errorf("failed to encode log event: %w", err) } + // not recycling after Add returns an error as we don't know if it's already recycled - return bulkIndexerSession.Add(ctx, fIndex.Index, buf, nil) + return bulkIndexerSession.Add(ctx, fIndex.Index, docID, buf, nil) } func (e *elasticsearchExporter) pushMetricsData( @@ -299,7 +306,7 @@ func (e *elasticsearchExporter) pushMetricsData( errs = append(errs, err) continue } - if err := session.Add(ctx, fIndex.Index, buf, dynamicTemplates); err != nil { + if err := session.Add(ctx, fIndex.Index, "", buf, dynamicTemplates); err != nil { // not recycling after Add returns an error as we don't know if it's already recycled if cerr := ctx.Err(); cerr != nil { return cerr @@ -422,7 +429,7 @@ func (e *elasticsearchExporter) pushTraceRecord( return fmt.Errorf("failed to encode trace record: %w", err) } // not recycling after Add returns an error as we don't know if it's already recycled - return bulkIndexerSession.Add(ctx, fIndex.Index, buf, nil) + return bulkIndexerSession.Add(ctx, fIndex.Index, "", buf, nil) } func (e *elasticsearchExporter) pushSpanEvent( @@ -454,5 +461,17 @@ func (e *elasticsearchExporter) pushSpanEvent( return nil } // not recycling after Add returns an error as we don't know if it's already recycled - return bulkIndexerSession.Add(ctx, fIndex.Index, buf, nil) + return bulkIndexerSession.Add(ctx, fIndex.Index, "", buf, nil) +} + +func (e *elasticsearchExporter) extractDocumentIDAttribute(m pcommon.Map) string { + if !e.config.LogsDynamicID.Enabled { + return "" + } + + v, ok := m.Get(documentIDAttributeName) + if !ok { + return "" + } + return v.AsString() } diff --git a/exporter/elasticsearchexporter/exporter_test.go b/exporter/elasticsearchexporter/exporter_test.go index b045ccb325d1..74a6ec5dfcfb 100644 --- a/exporter/elasticsearchexporter/exporter_test.go +++ b/exporter/elasticsearchexporter/exporter_test.go @@ -736,6 +736,82 @@ func TestExporterLogs(t *testing.T) { assert.JSONEq(t, `{"a":"a","a.b":"a.b"}`, gjson.GetBytes(doc, `resource.attributes`).Raw) }) + t.Run("publish logs with dynamic id", func(t *testing.T) { + t.Parallel() + exampleDocID := "abc123" + tableTests := []struct { + name string + expectedDocID string // "" means the _id will not be set + recordAttrs map[string]any + }{ + { + name: "missing document id attribute should not set _id", + expectedDocID: "", + }, + { + name: "empty document id attribute should not set _id", + expectedDocID: "", + recordAttrs: map[string]any{ + documentIDAttributeName: "", + }, + }, + { + name: "record attributes", + expectedDocID: exampleDocID, + recordAttrs: map[string]any{ + documentIDAttributeName: exampleDocID, + }, + }, + } + + cfgs := map[string]func(*Config){ + "async": func(cfg *Config) { + batcherEnabled := false + cfg.Batcher.Enabled = &batcherEnabled + }, + "sync": func(cfg *Config) { + batcherEnabled := true + cfg.Batcher.Enabled = &batcherEnabled + cfg.Batcher.FlushTimeout = 10 * time.Millisecond + }, + } + for _, tt := range tableTests { + for cfgName, cfgFn := range cfgs { + t.Run(tt.name+"/"+cfgName, func(t *testing.T) { + t.Parallel() + rec := newBulkRecorder() + server := newESTestServer(t, func(docs []itemRequest) ([]itemResponse, error) { + rec.Record(docs) + + if tt.expectedDocID == "" { + assert.NotContains(t, string(docs[0].Action), "_id", "expected _id to not be set") + } else { + assert.Equal(t, tt.expectedDocID, actionJSONToID(t, docs[0].Action), "expected _id to be set") + } + + // Ensure the document id attribute is removed from the final document. + assert.NotContains(t, string(docs[0].Document), documentIDAttributeName, "expected document id attribute to be removed") + return itemsAllOK(docs) + }) + + exporter := newTestLogsExporter(t, server.URL, func(cfg *Config) { + cfg.Mapping.Mode = "otel" + cfg.LogsDynamicID.Enabled = true + cfgFn(cfg) + }) + logs := newLogsWithAttributes( + tt.recordAttrs, + map[string]any{}, + map[string]any{}, + ) + logs.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Body().SetStr("hello world") + mustSendLogs(t, exporter, logs) + + rec.WaitItems(1) + }) + } + } + }) t.Run("otel mode attribute complex value", func(t *testing.T) { rec := newBulkRecorder() server := newESTestServer(t, func(docs []itemRequest) ([]itemResponse, error) { @@ -1943,3 +2019,14 @@ func actionJSONToIndex(t *testing.T, actionJSON json.RawMessage) string { require.NoError(t, err) return action.Create.Index } + +func actionJSONToID(t *testing.T, actionJSON json.RawMessage) string { + action := struct { + Create struct { + ID string `json:"_id"` + } `json:"create"` + }{} + err := json.Unmarshal(actionJSON, &action) + require.NoError(t, err) + return action.Create.ID +} diff --git a/exporter/elasticsearchexporter/factory.go b/exporter/elasticsearchexporter/factory.go index 755a4e3d241b..c72ecbfc0fd1 100644 --- a/exporter/elasticsearchexporter/factory.go +++ b/exporter/elasticsearchexporter/factory.go @@ -62,6 +62,9 @@ func createDefaultConfig() component.Config { TracesDynamicIndex: DynamicIndexSetting{ Enabled: false, }, + LogsDynamicID: DynamicIDSettings{ + Enabled: false, + }, Retry: RetrySettings{ Enabled: true, MaxRetries: 0, // default is set in exporter code diff --git a/exporter/elasticsearchexporter/pdata_serializer.go b/exporter/elasticsearchexporter/pdata_serializer.go index 9d2fbf82f63c..76eb2a988372 100644 --- a/exporter/elasticsearchexporter/pdata_serializer.go +++ b/exporter/elasticsearchexporter/pdata_serializer.go @@ -298,7 +298,7 @@ func writeAttributes(v *json.Visitor, attributes pcommon.Map, stringifyMapValues _ = v.OnObjectStart(-1, structform.AnyType) attributes.Range(func(k string, val pcommon.Value) bool { switch k { - case dataStreamType, dataStreamDataset, dataStreamNamespace, elasticsearch.MappingHintsAttrKey: + case dataStreamType, dataStreamDataset, dataStreamNamespace, elasticsearch.MappingHintsAttrKey, documentIDAttributeName: return true } if isGeoAttribute(k, val) { diff --git a/exporter/elasticsearchexporter/pdata_serializer_test.go b/exporter/elasticsearchexporter/pdata_serializer_test.go index 6131ebbc6ee1..26d514757fd4 100644 --- a/exporter/elasticsearchexporter/pdata_serializer_test.go +++ b/exporter/elasticsearchexporter/pdata_serializer_test.go @@ -31,6 +31,7 @@ func TestSerializeLog(t *testing.T) { record.Attributes().PutDouble("double", 42.0) record.Attributes().PutInt("int", 42) record.Attributes().PutEmptyBytes("bytes").Append(42) + record.Attributes().PutStr(documentIDAttributeName, "my_id") _ = record.Attributes().PutEmptySlice("slice").FromRaw([]any{42, "foo"}) record.Attributes().PutEmptySlice("map_slice").AppendEmpty().SetEmptyMap().PutStr("foo.bar", "baz") mapAttr := record.Attributes().PutEmptyMap("map")