From 801cd88139bf420c54c3f3d3d0dede2a8abec12d Mon Sep 17 00:00:00 2001 From: musa-asad Date: Thu, 6 Mar 2025 00:20:52 -0500 Subject: [PATCH 01/17] Implement Kubernetes Metadata Extension Co-authored-by: Ping Xiang <> --- .github/workflows/build-test-artifacts.yml | 4 +- .github/workflows/clean-aws-resources.yml | 18 + .github/workflows/ec2-integration-test.yml | 36 +- .github/workflows/integration-test.yml | 1223 +--------------- .github/workflows/release-candidate-test.yml | 62 + .../workflows/repackage-release-artifacts.yml | 168 +++ .github/workflows/start-localstack.yml | 2 +- .github/workflows/stop-localstack.yml | 3 + .github/workflows/test-artifacts.yml | 1286 +++++++++++++++++ .github/workflows/upload-dependencies.yml | 57 + Makefile | 2 +- RELEASE_NOTES | 15 + cfg/aws/credentials.go | 36 +- cfg/aws/credentials_test.go | 89 ++ cfg/envconfig/envconfig.go | 4 + extension/agenthealth/factory.go | 2 +- extension/agenthealth/factory_test.go | 4 +- .../handler/useragent/useragent_test.go | 23 +- extension/entitystore/eksInfo_test.go | 4 +- extension/entitystore/extension_test.go | 8 +- extension/entitystore/factory.go | 2 +- extension/entitystore/factory_test.go | 4 +- extension/k8smetadata/README.md | 13 + extension/k8smetadata/config.go | 12 + extension/k8smetadata/config_test.go | 18 + extension/k8smetadata/extension.go | 104 ++ extension/k8smetadata/extension_test.go | 39 + extension/k8smetadata/factory.go | 50 + extension/k8smetadata/factory_test.go | 26 + extension/server/extension.go | 4 +- extension/server/factory.go | 2 +- extension/server/factory_test.go | 6 +- go.mod | 540 +++---- go.sum | 1056 ++++++++------ .../k8sclient/endpointslicewatcher.go | 307 ++++ .../k8sclient/endpointslicewatcher_test.go | 292 ++++ .../k8sCommon/k8sclient/kubernetes_utils.go | 120 ++ .../k8sclient/kubernetes_utils_test.go | 82 ++ internal/tls/testdata/.gitignore | 4 + internal/tls/testdata/server.crt | 24 - internal/tls/testdata/server.key | 28 - internal/tls/testdata/tls-ca.crt | 29 - plugins/inputs/logfile/README.md | 2 + plugins/inputs/logfile/fileconfig.go | 17 +- plugins/inputs/logfile/fileconfig_test.go | 124 +- plugins/inputs/logfile/logfile.go | 1 + plugins/inputs/logfile/tailersrc.go | 19 +- plugins/inputs/logfile/tailersrc_test.go | 4 +- plugins/inputs/prometheus/target_allocator.go | 1 - plugins/outputs/cloudwatch/factory.go | 4 +- plugins/outputs/cloudwatch/factory_test.go | 14 +- .../internal/pusher/convert_test.go | 24 +- .../cloudwatchlogs/internal/pusher/pool.go | 5 + .../internal/pusher/pool_test.go | 4 +- .../internal/pusher/pusher_test.go | 10 +- .../cloudwatchlogs/internal/pusher/queue.go | 15 +- .../internal/pusher/queue_test.go | 312 ++-- .../cloudwatchlogs/internal/pusher/retry.go | 10 +- .../cloudwatchlogs/internal/pusher/sender.go | 1 + .../internal/pusher/sender_test.go | 9 +- .../cloudwatchlogs/internal/pusher/target.go | 194 ++- .../internal/pusher/target_test.go | 202 ++- .../awsapplicationsignals/factory.go | 10 +- .../internal/resolver/endpointslicewatcher.go | 293 ++++ .../resolver/endpointslicewatcher_test.go | 296 ++++ .../internal/resolver/kubernetes.go | 551 ++----- .../internal/resolver/kubernetes_test.go | 972 +------------ .../internal/resolver/kubernetes_utils.go | 97 +- .../resolver/kubernetes_utils_test.go | 258 ++++ .../internal/resolver/podwatcher.go | 205 +++ .../internal/resolver/podwatcher_test.go | 517 +++++++ .../internal/resolver/servicetoworkload.go | 81 ++ .../resolver/servicetoworkload_test.go | 101 ++ .../internal/resolver/servicewatcher.go | 121 ++ .../internal/resolver/servicewatcher_test.go | 106 ++ plugins/processors/awsentity/factory.go | 4 +- plugins/processors/awsentity/factory_test.go | 14 +- .../k8sattributescraper.go | 31 +- .../k8sattributescraper_test.go | 128 +- plugins/processors/awsentity/processor.go | 23 +- plugins/processors/ec2tagger/README.md | 3 +- plugins/processors/ec2tagger/config.go | 3 +- plugins/processors/ec2tagger/ec2tagger.go | 93 +- .../processors/ec2tagger/ec2tagger_test.go | 37 +- plugins/processors/ec2tagger/factory.go | 4 +- plugins/processors/ec2tagger/factory_test.go | 14 +- plugins/processors/gpuattributes/factory.go | 4 +- .../processors/gpuattributes/factory_test.go | 14 +- plugins/processors/kueueattributes/factory.go | 4 +- .../kueueattributes/factory_test.go | 14 +- processor/rollupprocessor/factory.go | 4 +- processor/rollupprocessor/factory_test.go | 4 +- receiver/adapter/factory.go | 12 +- receiver/adapter/factory_test.go | 8 +- service/defaultcomponents/components.go | 4 +- service/defaultcomponents/components_test.go | 4 +- tool/clean/clean_ami/clean_ami.go | 3 - tool/clean/clean_log_group/clean_log_group.go | 284 ++++ .../clean_log_group/clean_log_group_test.go | 286 ++++ tool/clean/go.mod | 14 +- tool/clean/go.sum | 21 +- tool/testutil/testutil.go | 110 ++ translator/config/schema.json | 4 + .../sampleConfig/advanced_config_darwin.yaml | 10 +- .../sampleConfig/advanced_config_linux.yaml | 12 +- .../sampleConfig/advanced_config_windows.yaml | 10 +- .../sampleConfig/amp_config_linux.yaml | 12 +- .../appsignals_and_ecs_config.yaml | 9 +- .../appsignals_and_eks_config.yaml | 24 +- .../appsignals_and_k8s_config.yaml | 24 +- .../appsignals_fallback_and_eks_config.yaml | 24 +- .../appsignals_over_fallback_config.yaml | 24 +- .../sampleConfig/base_appsignals_config.yaml | 10 +- .../base_appsignals_fallback_config.yaml | 9 +- .../base_container_insights_config.yaml | 18 +- .../sampleConfig/basic_config_linux.yaml | 8 +- .../sampleConfig/basic_config_windows.yaml | 6 +- .../sampleConfig/collectd_config_linux.yaml | 3 +- .../sampleConfig/compass_linux_config.yaml | 12 +- .../sampleConfig/complete_darwin_config.yaml | 22 +- .../sampleConfig/complete_linux_config.conf | 1 + .../sampleConfig/complete_linux_config.json | 1 + .../sampleConfig/complete_linux_config.yaml | 30 +- .../sampleConfig/complete_windows_config.yaml | 10 +- .../sampleConfig/config_with_env.yaml | 3 +- .../sampleConfig/container_insights_jmx.yaml | 22 +- .../sampleConfig/delta_config_linux.yaml | 6 +- .../sampleConfig/delta_net_config_linux.yaml | 8 +- .../sampleConfig/drop_origin_linux.yaml | 6 +- .../emf_and_kubernetes_config.yaml | 18 +- .../emf_and_kubernetes_with_gpu_config.yaml | 278 ++-- .../emf_and_kubernetes_with_kueue_config.yaml | 33 +- .../ignore_append_dimensions.yaml | 6 +- .../sampleConfig/invalid_input_linux.yaml | 8 +- .../sampleConfig/jmx_config_linux.yaml | 11 +- .../sampleConfig/jmx_eks_config_linux.yaml | 11 +- .../kubernetes_on_prem_config.yaml | 18 +- .../kueue_container_insights_config.yaml | 33 +- .../sampleConfig/log_ecs_metric_only.yaml | 18 +- .../tocwconfig/sampleConfig/log_filter.yaml | 3 +- .../sampleConfig/log_only_config_windows.yaml | 3 +- .../logs_and_kubernetes_config.yaml | 18 +- .../sampleConfig/no_skip_log_timestamp.yaml | 3 +- .../no_skip_log_timestamp_windows.yaml | 3 +- .../otlp_metrics_cloudwatchlogs_config.yaml | 7 +- ...tlp_metrics_cloudwatchlogs_eks_config.yaml | 11 +- .../sampleConfig/otlp_metrics_config.yaml | 10 +- .../sampleConfig/otlp_metrics_eks_config.yaml | 14 +- .../procstat_memory_swap_config.yaml | 3 +- .../prometheus_combined_config_linux.yaml | 7 +- .../sampleConfig/prometheus_config_linux.yaml | 3 +- .../prometheus_config_windows.yaml | 3 +- .../prometheus_otel_config_linux.yaml | 7 +- .../sampleConfig/skip_log_timestamp.yaml | 3 +- .../skip_log_timestamp_default.yaml | 3 +- .../skip_log_timestamp_default_windows.yaml | 3 +- .../skip_log_timestamp_windows.yaml | 3 +- .../sampleConfig/standard_config_linux.yaml | 8 +- ...ndard_config_linux_with_common_config.yaml | 10 +- .../sampleConfig/standard_config_windows.yaml | 10 +- ...ard_config_windows_with_common_config.yaml | 10 +- .../sampleConfig/statsd_config_linux.yaml | 3 +- .../sampleConfig/statsd_config_windows.yaml | 3 +- .../sampleConfig/statsd_ecs_config.yaml | 3 +- .../sampleConfig/statsd_eks_config.yaml | 3 +- .../sampleConfig/trace_config_linux.yaml | 7 +- .../sampleConfig/trace_config_windows.yaml | 7 +- .../windows_eventlog_only_config.yaml | 3 +- translator/tocwconfig/tocwconfig_test.go | 93 +- .../totomlconfig/testdata/agentToml.conf | 1 + .../totomlconfig/testdata/agentToml.json | 3 +- .../tomlConfigTemplate/tomlConfig.go | 2 + .../files/collect_list/ruleTrimTimestamp.go | 33 + translator/translate/otel/common/common.go | 86 +- .../translate/otel/common/common_test.go | 2 +- .../otel/exporter/awscloudwatch/translator.go | 6 +- .../exporter/awscloudwatchlogs/translator.go | 4 +- .../awscloudwatchlogs/translator_test.go | 2 +- .../otel/exporter/awsemf/translator.go | 12 +- .../otel/exporter/awsemf/translator_test.go | 16 +- .../otel/exporter/awsxray/translator.go | 6 +- .../otel/exporter/awsxray/translator_test.go | 2 +- .../otel/exporter/debug/translator.go | 4 +- .../otel/exporter/debug/translator_test.go | 7 +- .../testdata/config.yaml | 10 +- .../prometheusremotewrite/translator.go | 6 +- .../translate/otel/exporter/translator.go | 4 +- .../otel/extension/agenthealth/translator.go | 26 +- .../extension/agenthealth/translator_test.go | 6 +- .../otel/extension/awsproxy/translator.go | 9 +- .../extension/awsproxy/translator_test.go | 2 + .../otel/extension/entitystore/translator.go | 4 +- .../otel/extension/k8smetadata/translator.go | 36 + .../otel/extension/server/translator.go | 4 +- .../otel/extension/sigv4auth/translator.go | 6 +- .../pipeline/applicationsignals/translator.go | 43 +- .../applicationsignals/translator_test.go | 9 +- .../pipeline/containerinsights/translator.go | 33 +- .../containerinsights/translator_test.go | 7 +- .../pipeline/containerinsights/translators.go | 9 +- .../containerinsights/translators_test.go | 2 +- .../containerinsightsjmx/translator.go | 14 +- .../otel/pipeline/emf_logs/translator.go | 19 +- .../otel/pipeline/host/translator.go | 33 +- .../otel/pipeline/host/translator_test.go | 6 +- .../otel/pipeline/host/translators.go | 22 +- .../otel/pipeline/host/translators_test.go | 3 +- .../translate/otel/pipeline/jmx/translator.go | 17 +- .../otel/pipeline/jmx/translator_test.go | 11 +- .../otel/pipeline/jmx/translators.go | 6 +- .../otel/pipeline/jmx/translators_test.go | 30 +- .../translate/otel/pipeline/nop/translator.go | 13 +- .../otel/pipeline/prometheus/translator.go | 14 +- .../pipeline/prometheus/translator_test.go | 5 +- .../otel/pipeline/prometheus/translators.go | 6 +- .../pipeline/prometheus/translators_test.go | 32 +- .../translate/otel/pipeline/translator.go | 23 +- .../otel/pipeline/translator_test.go | 18 +- .../otel/pipeline/xray/translator.go | 17 +- .../awsapplicationsignals/translator.go | 19 +- .../awsapplicationsignals/translator_test.go | 4 +- .../otel/processor/awsentity/translator.go | 4 +- .../processor/batchprocessor/translator.go | 4 +- .../batchprocessor/translator_test.go | 3 +- .../cumulativetodeltaprocessor/translator.go | 4 +- .../ec2taggerprocessor/translator.go | 10 +- .../ec2taggerprocessor/translator_test.go | 11 +- .../filter_containerinsights_config.yaml | 9 + .../processor/filterprocessor/translator.go | 15 +- .../filterprocessor/translator_test.go | 14 + .../otel/processor/gpu/translator.go | 4 +- .../otel/processor/kueue/translator.go | 4 +- .../processor/metricsdecorator/translator.go | 4 +- .../metricsdecorator/translator_unix_test.go | 6 +- .../translator_windows_test.go | 2 +- .../metricstransformprocessor/translator.go | 4 +- .../translator_test.go | 3 +- .../processor/resourcedetection/translator.go | 17 +- .../resourcedetection/translator_test.go | 4 +- .../processor/resourceprocessor/translator.go | 10 +- .../processor/rollupprocessor/translator.go | 6 +- .../processor/transformprocessor/translate.go | 4 +- .../translate/otel/processor/translator.go | 4 +- .../otel/receiver/adapter/translator.go | 6 +- .../otel/receiver/adapter/translators.go | 24 +- .../awscontainerinsight/translator.go | 6 +- .../awscontainerinsightskueue/translator.go | 6 +- .../otel/receiver/awsxray/translator.go | 6 +- .../translate/otel/receiver/jmx/translator.go | 4 +- .../otel/receiver/otlp/translator.go | 21 +- .../otel/receiver/otlp/translator_test.go | 16 +- .../otel/receiver/prometheus/translator.go | 4 +- .../otel/receiver/tcplog/translator.go | 6 +- .../translate/otel/receiver/translator.go | 4 +- .../otel/receiver/udplog/translator.go | 6 +- translator/translate/otel/translate_otel.go | 31 +- .../translate/otel/translate_otel_test.go | 20 +- translator/util/eksdetector/eksdetector.go | 47 +- .../util/eksdetector/eksdetector_test.go | 26 + .../util/eksdetector/eksdetectortestutil.go | 7 +- 260 files changed, 9105 insertions(+), 4556 deletions(-) create mode 100644 .github/workflows/release-candidate-test.yml create mode 100644 .github/workflows/repackage-release-artifacts.yml create mode 100644 .github/workflows/test-artifacts.yml create mode 100644 .github/workflows/upload-dependencies.yml create mode 100644 cfg/aws/credentials_test.go create mode 100644 extension/k8smetadata/README.md create mode 100644 extension/k8smetadata/config.go create mode 100644 extension/k8smetadata/config_test.go create mode 100644 extension/k8smetadata/extension.go create mode 100644 extension/k8smetadata/extension_test.go create mode 100644 extension/k8smetadata/factory.go create mode 100644 extension/k8smetadata/factory_test.go create mode 100644 internal/k8sCommon/k8sclient/endpointslicewatcher.go create mode 100644 internal/k8sCommon/k8sclient/endpointslicewatcher_test.go create mode 100644 internal/k8sCommon/k8sclient/kubernetes_utils.go create mode 100644 internal/k8sCommon/k8sclient/kubernetes_utils_test.go create mode 100644 internal/tls/testdata/.gitignore delete mode 100644 internal/tls/testdata/server.crt delete mode 100644 internal/tls/testdata/server.key delete mode 100644 internal/tls/testdata/tls-ca.crt create mode 100644 plugins/processors/awsapplicationsignals/internal/resolver/endpointslicewatcher.go create mode 100644 plugins/processors/awsapplicationsignals/internal/resolver/endpointslicewatcher_test.go create mode 100644 plugins/processors/awsapplicationsignals/internal/resolver/kubernetes_utils_test.go create mode 100644 plugins/processors/awsapplicationsignals/internal/resolver/podwatcher.go create mode 100644 plugins/processors/awsapplicationsignals/internal/resolver/podwatcher_test.go create mode 100644 plugins/processors/awsapplicationsignals/internal/resolver/servicetoworkload.go create mode 100644 plugins/processors/awsapplicationsignals/internal/resolver/servicetoworkload_test.go create mode 100644 plugins/processors/awsapplicationsignals/internal/resolver/servicewatcher.go create mode 100644 plugins/processors/awsapplicationsignals/internal/resolver/servicewatcher_test.go create mode 100644 tool/clean/clean_log_group/clean_log_group.go create mode 100644 tool/clean/clean_log_group/clean_log_group_test.go create mode 100644 translator/translate/logs/logs_collected/files/collect_list/ruleTrimTimestamp.go create mode 100644 translator/translate/otel/extension/k8smetadata/translator.go create mode 100644 translator/translate/otel/processor/filterprocessor/filter_containerinsights_config.yaml diff --git a/.github/workflows/build-test-artifacts.yml b/.github/workflows/build-test-artifacts.yml index 5509982f33..c9da753f01 100644 --- a/.github/workflows/build-test-artifacts.yml +++ b/.github/workflows/build-test-artifacts.yml @@ -98,7 +98,7 @@ jobs: StartIntegrationTests: needs: [ BuildAndUploadPackages, BuildAndUploadITAR, BuildAndUploadCN, BuildDocker ] - if: ${{ inputs.test-image-before-upload }} + if: ${{ github.event_name == 'push' || inputs.test-image-before-upload }} runs-on: ubuntu-latest steps: - run: gh workflow run integration-test.yml --ref ${{ github.ref_name }} --repo $GITHUB_REPOSITORY -f build_run_id=${{ github.run_id }} -f build_sha=${{ github.sha }} @@ -108,7 +108,7 @@ jobs: StartApplicationSignalsE2ETests: needs: [ BuildAndUploadPackages, BuildAndUploadITAR, BuildAndUploadCN, BuildDocker ] # Workflow only runs against main - if: ${{ contains(github.ref_name, 'main') && inputs.test-image-before-upload }} + if: ${{ contains(github.ref_name, 'main') && (github.event_name == 'push' || inputs.test-image-before-upload) }} runs-on: ubuntu-latest steps: - run: gh workflow run application-signals-e2e-test.yml --ref ${{ github.ref_name }} --repo $GITHUB_REPOSITORY -f build_run_id=${{ github.run_id }} -f build_sha=${{ github.sha }} diff --git a/.github/workflows/clean-aws-resources.yml b/.github/workflows/clean-aws-resources.yml index 6134c5727b..bc6d989284 100644 --- a/.github/workflows/clean-aws-resources.yml +++ b/.github/workflows/clean-aws-resources.yml @@ -270,3 +270,21 @@ jobs: - name: Clean old IAM roles working-directory: tool/clean run: go run ./clean_iam_roles/clean_iam_roles.go --tags=clean + clean-log-groups: + runs-on: ubuntu-latest + permissions: + id-token: write + contents: read + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v2 + with: + role-to-assume: ${{ secrets.TERRAFORM_AWS_ASSUME_ROLE }} + aws-region: us-west-2 + + - name: Clean old Log Groups + working-directory: tool/clean + run: go run ./clean_log_group/clean_log_group.go diff --git a/.github/workflows/ec2-integration-test.yml b/.github/workflows/ec2-integration-test.yml index 2a31c4d468..98f48025dc 100644 --- a/.github/workflows/ec2-integration-test.yml +++ b/.github/workflows/ec2-integration-test.yml @@ -11,7 +11,7 @@ env: on: workflow_call: inputs: - github_sha: + build_id: required: true type: string test_repo_name: @@ -43,7 +43,7 @@ on: jobs: EC2IntegrationTest: - name: 'Test' + name: ${{matrix.arrays.testName}} runs-on: ubuntu-latest strategy: fail-fast: false @@ -92,24 +92,27 @@ jobs: terraform init if terraform apply --auto-approve \ - -var="ssh_key_value=${{env.PRIVATE_KEY}}" -var="github_test_repo=${{ inputs.test_repo_url }}" \ - -var="test_name=${{ matrix.arrays.os }}" \ - -var="cwa_github_sha=${{inputs.github_sha}}" -var="install_agent=${{ matrix.arrays.installAgentCommand }}" \ - -var="github_test_repo_branch=${{inputs.test_repo_branch}}" \ - -var="ec2_instance_type=${{ matrix.arrays.instanceType }}" \ - -var="user=${{ matrix.arrays.username }}" \ + -var="agent_start=${{ matrix.arrays.agentStartCommand }}" \ -var="ami=${{ matrix.arrays.ami }}" \ - -var="ca_cert_path=${{ matrix.arrays.caCertPath }}" \ -var="arc=${{ matrix.arrays.arc }}" \ -var="binary_name=${{ matrix.arrays.binaryName }}" \ + -var="ca_cert_path=${{ matrix.arrays.caCertPath }}" \ + -var="cwa_github_sha=${{inputs.build_id}}" \ + -var="ec2_instance_type=${{ matrix.arrays.instanceType }}" \ + -var="excluded_tests='${{ matrix.arrays.excludedTests }}'" \ + -var="github_test_repo=${{ inputs.test_repo_url }}" \ + -var="github_test_repo_branch=${{inputs.test_repo_branch}}" \ + -var="install_agent=${{ matrix.arrays.installAgentCommand }}" \ -var="local_stack_host_name=${{ inputs.localstack_host }}" \ + -var="plugin_tests='${{ github.event.inputs.plugins }}'" \ -var="region=${{ inputs.region }}" \ -var="s3_bucket=${{ inputs.s3_integration_bucket }}" \ - -var="plugin_tests='${{ github.event.inputs.plugins }}'" \ - -var="excluded_tests='${{ matrix.arrays.excludedTests }}'" \ -var="ssh_key_name=${{env.KEY_NAME}}" \ + -var="ssh_key_value=${{env.PRIVATE_KEY}}" \ -var="test_dir=${{ matrix.arrays.test_dir }}" \ - -var="agent_start=${{ matrix.arrays.agentStartCommand }}"; then terraform destroy -var="region=${{ inputs.region }}" -var="ami=${{ matrix.arrays.ami }}" -auto-approve + -var="test_name=${{ matrix.arrays.os }}" \ + -var="user=${{ matrix.arrays.username }}"; then + terraform destroy -var="region=${{ inputs.region }}" -var="ami=${{ matrix.arrays.ami }}" -auto-approve else terraform destroy -var="region=${{ inputs.region }}" -var="ami=${{ matrix.arrays.ami }}" -auto-approve && exit 1 fi @@ -121,4 +124,11 @@ jobs: max_attempts: 2 timeout_minutes: 8 retry_wait_seconds: 5 - command: cd ${{ inputs.test_dir }} && terraform destroy -var="region=${{ inputs.region }}" -var="ami=${{ matrix.arrays.ami }}" --auto-approve \ No newline at end of file + command: | + if [ "${{ matrix.arrays.terraform_dir }}" != "" ]; then + cd "${{ matrix.arrays.terraform_dir }}" + else + cd ${{inputs.test_dir}} + fi + + terraform destroy -var="region=${{ inputs.region }}" -var="ami=${{ matrix.arrays.ami }}" --auto-approve diff --git a/.github/workflows/integration-test.yml b/.github/workflows/integration-test.yml index 32eac385d6..37823e93ea 100644 --- a/.github/workflows/integration-test.yml +++ b/.github/workflows/integration-test.yml @@ -30,10 +30,9 @@ on: description: 'The SHA of the build-test-artifacts workflow run' type: string required: true - -concurrency: - group: ${{ github.workflow }}-${{ github.ref_name }} - cancel-in-progress: true + test_repo_branch: + description: 'Override for the GitHub test repository branch to use (default is main)' + type: string jobs: CheckBuildTestArtifacts: @@ -57,1220 +56,14 @@ jobs: env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - GenerateTestMatrix: - needs: [ CheckBuildTestArtifacts ] - name: 'GenerateTestMatrix' - runs-on: ubuntu-latest - outputs: - ec2_gpu_matrix: ${{ steps.set-matrix.outputs.ec2_gpu_matrix }} - ec2_linux_matrix: ${{ steps.set-matrix.outputs.ec2_linux_matrix }} - ec2_windows_matrix: ${{ steps.set-matrix.outputs.ec2_windows_matrix }} - ec2_mac_matrix: ${{ steps.set-matrix.outputs.ec2_mac_matrix }} - ec2_performance_matrix: ${{steps.set-matrix.outputs.ec2_performance_matrix}} - ec2_windows_performance_matrix: ${{steps.set-matrix.outputs.ec2_windows_performance_matrix}} - ec2_stress_matrix: ${{steps.set-matrix.outputs.ec2_stress_matrix}} - ec2_windows_stress_matrix: ${{steps.set-matrix.outputs.ec2_windows_stress_matrix}} - ecs_ec2_launch_daemon_matrix: ${{ steps.set-matrix.outputs.ecs_ec2_launch_daemon_matrix }} - ecs_fargate_matrix: ${{ steps.set-matrix.outputs.ecs_fargate_matrix }} - eks_daemon_matrix: ${{ steps.set-matrix.outputs.eks_daemon_matrix }} - eks_deployment_matrix: ${{ steps.set-matrix.outputs.eks_deployment_matrix }} - ec2_linux_itar_matrix: ${{ steps.set-matrix.outputs.ec2_linux_itar_matrix }} - ec2_linux_china_matrix: ${{ steps.set-matrix.outputs.ec2_linux_china_matrix }} - eks_addon_matrix: ${{ steps.set-matrix.outputs.eks_addon_matrix }} - steps: - - uses: actions/checkout@v3 - with: - repository: ${{env.CWA_GITHUB_TEST_REPO_NAME}} - ref: ${{env.CWA_GITHUB_TEST_REPO_BRANCH}} - - - name: Set up Go 1.x - uses: actions/setup-go@v4 - with: - go-version: ~1.22.2 - - - name: Generate matrix - id: set-matrix - run: | - go run --tags=generator generator/test_case_generator.go - echo "::set-output name=ec2_gpu_matrix::$(echo $(cat generator/resources/ec2_gpu_complete_test_matrix.json))" - echo "::set-output name=eks_addon_matrix::$(echo $(cat generator/resources/eks_addon_complete_test_matrix.json))" - echo "::set-output name=ec2_linux_matrix::$(echo $(cat generator/resources/ec2_linux_complete_test_matrix.json))" - echo "::set-output name=ec2_windows_matrix::$(echo $(cat generator/resources/ec2_windows_complete_test_matrix.json))" - echo "::set-output name=ec2_mac_matrix::$(echo $(cat generator/resources/ec2_mac_complete_test_matrix.json))" - echo "::set-output name=ec2_performance_matrix::$(echo $(cat generator/resources/ec2_performance_complete_test_matrix.json))" - echo "::set-output name=ec2_windows_performance_matrix::$(echo $(cat generator/resources/ec2_windows_performance_complete_test_matrix.json))" - echo "::set-output name=ec2_stress_matrix::$(echo $(cat generator/resources/ec2_stress_complete_test_matrix.json))" - echo "::set-output name=ec2_windows_stress_matrix::$(echo $(cat generator/resources/ec2_windows_stress_complete_test_matrix.json))" - echo "::set-output name=ecs_ec2_launch_daemon_matrix::$(echo $(cat generator/resources/ecs_ec2_daemon_complete_test_matrix.json))" - echo "::set-output name=ecs_fargate_matrix::$(echo $(cat generator/resources/ecs_fargate_complete_test_matrix.json))" - echo "::set-output name=eks_daemon_matrix::$(echo $(cat generator/resources/eks_daemon_complete_test_matrix.json))" - echo "::set-output name=eks_deployment_matrix::$(echo $(cat generator/resources/eks_deployment_complete_test_matrix.json))" - echo "::set-output name=ec2_linux_itar_matrix::$(echo $(cat generator/resources/ec2_linux_itar_complete_test_matrix.json))" - echo "::set-output name=ec2_linux_china_matrix::$(echo $(cat generator/resources/ec2_linux_china_complete_test_matrix.json))" - - - name: Echo test plan matrix - run: | - echo "ec2_gpu_matrix: ${{ steps.set-matrix.outputs.ec2_gpu_matrix }}" - echo "eks_addon_matrix: ${{ steps.set-matrix.outputs.eks_addon_matrix }}" - echo "ec2_linux_matrix: ${{ steps.set-matrix.outputs.ec2_linux_matrix }}" - echo "ec2_windows_matrix: ${{ steps.set-matrix.outputs.ec2_windows_matrix }}" - echo "ec2_mac_matrix: ${{ steps.set-matrix.outputs.ec2_mac_matrix }}" - echo "ec2_performance_matrix: ${{ steps.set-matrix.outputs.ec2_performance_matrix}}" - echo "ec2_windows_performance_matrix: ${{ steps.set-matrix.outputs.ec2_windows_performance_matrix}}" - echo "ec2_stress_matrix: ${{ steps.set-matrix.outputs.ec2_stress_matrix}}" - echo "ec2_windows_stress_matrix: ${{ steps.set-matrix.outputs.ec2_windows_stress_matrix}}" - echo "ecs_ec2_launch_daemon_matrix: ${{ steps.set-matrix.outputs.ecs_ec2_launch_daemon_matrix }}" - echo "ecs_fargate_matrix: ${{ steps.set-matrix.outputs.ecs_fargate_matrix }}" - echo "eks_daemon_matrix: ${{ steps.set-matrix.outputs.eks_daemon_matrix }}" - echo "eks_deployment_matrix: ${{ steps.set-matrix.outputs.eks_deployment_matrix }}" - echo "ec2_linux_itar_matrix: ${{ steps.set-matrix.outputs.ec2_linux_itar_matrix }}" - echo "ec2_linux_china_matrix: ${{ steps.set-matrix.outputs.ec2_linux_china_matrix }}" - - CloudformationTest: - needs: [GenerateTestMatrix] - name: 'CFTest' - runs-on: ubuntu-latest - strategy: - fail-fast: false - permissions: - id-token: write - contents: read - steps: - - uses: actions/checkout@v3 - with: - repository: ${{env.CWA_GITHUB_TEST_REPO_NAME}} - ref: ${{env.CWA_GITHUB_TEST_REPO_BRANCH}} - path: test - - - name: Set up Go 1.x - uses: actions/setup-go@v2 - with: - go-version: ~1.22.2 - - - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v2 - with: - role-to-assume: ${{ env.TERRAFORM_AWS_ASSUME_ROLE }} - aws-region: us-west-2 - role-duration-seconds: ${{ env.TERRAFORM_AWS_ASSUME_ROLE_DURATION }} - - - name: Test cf - run: | - cd test/test/cloudformation - go test -timeout 1h -package_path=s3://${S3_INTEGRATION_BUCKET}/integration-test/binary/${{ github.sha }}/linux/amd64/amazon-cloudwatch-agent.rpm -iam_role=${CF_IAM_ROLE} -key_name=${CF_KEY_NAME} -metric_name=mem_used_percent - - StartLocalStack: - name: 'StartLocalStack' - needs: [OutputEnvVariables] - uses: ./.github/workflows/start-localstack.yml - secrets: inherit - permissions: - id-token: write - contents: read - with: - region: us-west-2 - test_repo_name: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_NAME }} - test_repo_branch: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_BRANCH }} - terraform_assume_role: ${{ vars.TERRAFORM_AWS_ASSUME_ROLE }} - test_repo_url: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_URL }} - github_sha: ${{github.sha}} - s3_integration_bucket: ${{ vars.S3_INTEGRATION_BUCKET }} - - StartLocalStackITAR: - name: 'StartLocalStackITAR' - needs: [OutputEnvVariables] - uses: ./.github/workflows/start-localstack.yml - secrets: inherit - permissions: - id-token: write - contents: read - with: - region: us-gov-east-1 - test_repo_name: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_NAME }} - test_repo_branch: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_BRANCH }} - terraform_assume_role: ${{ vars.TERRAFORM_AWS_ASSUME_ROLE_ITAR }} - test_repo_url: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_URL }} - github_sha: ${{github.sha}} - s3_integration_bucket: ${{ vars.S3_INTEGRATION_BUCKET_ITAR }} - - StartLocalStackCN: - name: 'StartLocalStackCN' - needs: [ OutputEnvVariables ] - uses: ./.github/workflows/start-localstack.yml - secrets: inherit - permissions: - id-token: write - contents: read - with: - region: cn-north-1 - test_repo_name: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_NAME }} - test_repo_branch: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_BRANCH }} - terraform_assume_role: ${{ vars.TERRAFORM_AWS_ASSUME_ROLE_CN }} - test_repo_url: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_URL }} - github_sha: ${{github.sha}} - s3_integration_bucket: ${{ vars.S3_INTEGRATION_BUCKET_CN }} - - - - EC2NvidiaGPUIntegrationTest: - needs: [ StartLocalStack, GenerateTestMatrix ] - name: 'EC2NVIDIAGPUIntegrationTest' - runs-on: ubuntu-latest - strategy: - fail-fast: false - matrix: - arrays: ${{ fromJson(needs.GenerateTestMatrix.outputs.ec2_gpu_matrix) }} - permissions: - id-token: write - contents: read - steps: - - uses: actions/checkout@v3 - with: - repository: ${{env.CWA_GITHUB_TEST_REPO_NAME}} - ref: ${{env.CWA_GITHUB_TEST_REPO_BRANCH}} - - - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v2 - with: - role-to-assume: ${{ env.TERRAFORM_AWS_ASSUME_ROLE }} - aws-region: us-west-2 - role-duration-seconds: ${{ env.TERRAFORM_AWS_ASSUME_ROLE_DURATION }} - - - name: Echo Test Info - run: echo run on ec2 instance os ${{ matrix.arrays.os }} arc ${{ matrix.arrays.arc }} test dir ${{ matrix.arrays.test_dir }} - - - name: Install Terraform - uses: hashicorp/setup-terraform@v3 - - - name: Verify Terraform version - run: terraform --version - - # nick-fields/retry@v2 starts at base dir - - name: Terraform apply - if: ${{ matrix.arrays.family == 'linux' }} - uses: nick-fields/retry@v2 - with: - max_attempts: 3 - timeout_minutes: 30 - retry_wait_seconds: 5 - command: | - if [ "${{ matrix.arrays.terraform_dir }}" != "" ]; then - cd "${{ matrix.arrays.terraform_dir }}" - else - cd terraform/ec2/linux - fi - - terraform init - if terraform apply --auto-approve \ - -var="ssh_key_value=${PRIVATE_KEY}" -var="github_test_repo=${{env.CWA_GITHUB_TEST_REPO_URL}}" \ - -var="test_name=${{ matrix.arrays.os }}" \ - -var="cwa_github_sha=${GITHUB_SHA}" -var="install_agent=${{ matrix.arrays.installAgentCommand }}" \ - -var="github_test_repo_branch=${{env.CWA_GITHUB_TEST_REPO_BRANCH}}" \ - -var="ec2_instance_type=${{ matrix.arrays.instanceType }}" \ - -var="user=${{ matrix.arrays.username }}" \ - -var="ami=${{ matrix.arrays.ami }}" \ - -var="ca_cert_path=${{ matrix.arrays.caCertPath }}" \ - -var="arc=${{ matrix.arrays.arc }}" \ - -var="binary_name=${{ matrix.arrays.binaryName }}" \ - -var="local_stack_host_name=${{ needs.StartLocalStack.outputs.local_stack_host_name }}" \ - -var="s3_bucket=${S3_INTEGRATION_BUCKET}" \ - -var="ssh_key_name=${KEY_NAME}" \ - -var="test_dir=${{ matrix.arrays.test_dir }}" ; then terraform destroy -auto-approve - else - terraform destroy -auto-approve && exit 1 - fi - - name: Terraform apply - if: ${{ matrix.arrays.family == 'window' }} - uses: nick-fields/retry@v2 - with: - max_attempts: 3 - timeout_minutes: 30 - retry_wait_seconds: 5 - command: | - if [ "${{ matrix.arrays.terraform_dir }}" != "" ]; then - cd "${{ matrix.arrays.terraform_dir }}" - else - cd terraform/ec2/win - fi - terraform init - if terraform apply --auto-approve \ - -var="ssh_key_value=${PRIVATE_KEY}" -var="ssh_key_name=${KEY_NAME}" \ - -var="github_repo=${{env.CWA_GITHUB_TEST_REPO_URL}}" \ - -var="cwa_github_sha=${GITHUB_SHA}" -var="ami=${{ matrix.arrays.ami }}" \ - -var="test_dir=${{ matrix.arrays.test_dir }}" \ - -var="ec2_instance_type=${{ matrix.arrays.instanceType }}" \ - -var="github_test_repo=${{env.CWA_GITHUB_TEST_REPO_URL}}" \ - -var="github_test_repo_branch=${{env.CWA_GITHUB_TEST_REPO_BRANCH}}" \ - -var="s3_bucket=${S3_INTEGRATION_BUCKET}" ; then terraform destroy -auto-approve - else - terraform destroy -auto-approve && exit 1 - fi - #This is here just in case workflow cancel - - name: Terraform destroy - if: ${{ cancelled() || failure() }} - uses: nick-fields/retry@v2 - with: - max_attempts: 3 - timeout_minutes: 8 - retry_wait_seconds: 5 - command: | - if [ "${{ matrix.arrays.terraform_dir }}" != "" ]; then - cd "${{ matrix.arrays.terraform_dir }}" - elif if "${{ matrix.arrays.os }}" == window; then - cd terraform/ec2/win - else - cd terraform/ec2/linux - fi - terraform destroy --auto-approve - - OutputEnvVariables: + TestArtifacts: + name: 'TestArtifacts' needs: [CheckBuildTestArtifacts] - name: 'OutputEnvVariables' - runs-on: ubuntu-latest - outputs: - CWA_GITHUB_TEST_REPO_NAME: ${{ steps.set-outputs.outputs.CWA_GITHUB_TEST_REPO_NAME }} - CWA_GITHUB_TEST_REPO_URL: ${{ steps.set-outputs.outputs.CWA_GITHUB_TEST_REPO_URL }} - CWA_GITHUB_TEST_REPO_BRANCH: ${{ steps.set-outputs.outputs.CWA_GITHUB_TEST_REPO_BRANCH }} - steps: - - uses: actions/checkout@v3 - with: - repository: ${{env.CWA_GITHUB_TEST_REPO_NAME}} - ref: ${{env.CWA_GITHUB_TEST_REPO_BRANCH}} - - - name: Set up Go 1.x - uses: actions/setup-go@v4 - with: - go-version: ~1.22.2 - - - name: SetOutputs - id: set-outputs - run: | - echo "::set-output name=CWA_GITHUB_TEST_REPO_NAME::${{ env.CWA_GITHUB_TEST_REPO_NAME }}" - echo "::set-output name=CWA_GITHUB_TEST_REPO_URL::${{ env.CWA_GITHUB_TEST_REPO_URL }}" - echo "::set-output name=CWA_GITHUB_TEST_REPO_BRANCH::${{ env.CWA_GITHUB_TEST_REPO_BRANCH }}" - - - name: Echo test variables - run: | - echo "CWA_GITHUB_TEST_REPO_NAME: ${{ steps.set-outputs.outputs.CWA_GITHUB_TEST_REPO_NAME }}" - echo "CWA_GITHUB_TEST_REPO_URL: ${{ steps.set-outputs.outputs.CWA_GITHUB_TEST_REPO_URL }}" - echo "CWA_GITHUB_TEST_REPO_BRANCH: ${{ steps.set-outputs.outputs.CWA_GITHUB_TEST_REPO_BRANCH }}" - - EC2LinuxIntegrationTest: - needs: [ StartLocalStack, GenerateTestMatrix, OutputEnvVariables ] - name: 'EC2Linux' - uses: ./.github/workflows/ec2-integration-test.yml - with: - github_sha: ${{github.sha}} - test_dir: terraform/ec2/linux - job_id: ec2-linux-integration-test - test_props: ${{needs.GenerateTestMatrix.outputs.ec2_linux_matrix}} - test_repo_name: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_NAME }} - test_repo_url: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_URL }} - test_repo_branch: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_BRANCH }} - localstack_host: ${{needs.StartLocalStack.outputs.local_stack_host_name}} - region: us-west-2 - terraform_assume_role: ${{ vars.TERRAFORM_AWS_ASSUME_ROLE }} - s3_integration_bucket: ${{ vars.S3_INTEGRATION_BUCKET }} - secrets: inherit - - EC2LinuxIntegrationTestITAR: - needs: [ StartLocalStackITAR, GenerateTestMatrix, OutputEnvVariables ] - name: 'EC2LinuxITAR' - uses: ./.github/workflows/ec2-integration-test.yml - with: - github_sha: ${{github.sha}} - test_dir: terraform/ec2/linux - job_id: ec2-linux-integration-test - test_props: ${{needs.GenerateTestMatrix.outputs.ec2_linux_itar_matrix}} - test_repo_name: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_NAME }} - test_repo_url: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_URL }} - test_repo_branch: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_BRANCH }} - localstack_host: ${{needs.StartLocalStackITAR.outputs.local_stack_host_name}} - region: us-gov-east-1 - terraform_assume_role: ${{ vars.TERRAFORM_AWS_ASSUME_ROLE_ITAR }} - s3_integration_bucket: ${{ vars.S3_INTEGRATION_BUCKET_ITAR }} - secrets: inherit - - EC2LinuxIntegrationTestCN: - needs: [ StartLocalStackCN, GenerateTestMatrix, OutputEnvVariables ] - name: 'EC2LinuxCN' - uses: ./.github/workflows/ec2-integration-test.yml - with: - github_sha: ${{github.sha}} - test_dir: terraform/ec2/linux - job_id: ec2-linux-integration-test - test_props: ${{needs.GenerateTestMatrix.outputs.ec2_linux_china_matrix}} - test_repo_name: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_NAME }} - test_repo_url: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_URL }} - test_repo_branch: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_BRANCH }} - localstack_host: ${{needs.StartLocalStackCN.outputs.local_stack_host_name}} - region: cn-north-1 - terraform_assume_role: ${{ vars.TERRAFORM_AWS_ASSUME_ROLE_CN }} - s3_integration_bucket: ${{ vars.S3_INTEGRATION_BUCKET_CN }} - secrets: inherit - - - LinuxOnPremIntegrationTest: - needs: [StartLocalStack, GenerateTestMatrix, OutputEnvVariables] - name: 'OnpremLinux' - uses: ./.github/workflows/ec2-integration-test.yml - with: - github_sha: ${{github.sha}} - test_dir: terraform/ec2/linux_onprem - job_id: linux-onprem-integration-test - test_props: ${{needs.GenerateTestMatrix.outputs.ec2_linux_onprem_matrix}} - test_repo_name: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_NAME }} - test_repo_url: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_URL }} - test_repo_branch: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_BRANCH }} - localstack_host: ${{needs.StartLocalStack.outputs.local_stack_host_name}} - region: us-west-2 - secrets: inherit - - EC2WinIntegrationTest: - needs: [GenerateTestMatrix] - name: 'EC2WinIntegrationTest' - runs-on: ubuntu-latest - strategy: - fail-fast: false - matrix: - arrays: ${{ fromJson(needs.GenerateTestMatrix.outputs.ec2_windows_matrix) }} - permissions: - id-token: write - contents: read - steps: - - uses: actions/checkout@v3 - with: - repository: ${{env.CWA_GITHUB_TEST_REPO_NAME}} - ref: ${{env.CWA_GITHUB_TEST_REPO_BRANCH}} - - - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v2 - with: - role-to-assume: ${{ env.TERRAFORM_AWS_ASSUME_ROLE }} - aws-region: us-west-2 - role-duration-seconds: ${{ env.TERRAFORM_AWS_ASSUME_ROLE_DURATION }} - - - name: Echo Test Info - run: echo run on ec2 instance os ${{ matrix.arrays.os }} use ssm ${{ matrix.arrays.useSSM }} test ${{ matrix.arrays.test_dir }} - - - name: Install Terraform - uses: hashicorp/setup-terraform@v3 - - - name: Verify Terraform version - run: terraform --version - - # nick-fields/retry@v2 starts at base dir - - name: Terraform apply - uses: nick-fields/retry@v2 - with: - max_attempts: 3 - timeout_minutes: 30 - retry_wait_seconds: 5 - command: | - if [ "${{ matrix.arrays.terraform_dir }}" != "" ]; then - cd "${{ matrix.arrays.terraform_dir }}" - else - cd terraform/ec2/win - fi - terraform init - if terraform apply --auto-approve \ - -var="ec2_instance_type=${{ matrix.arrays.instanceType }}" \ - -var="ssh_key_value=${PRIVATE_KEY}" -var="ssh_key_name=${KEY_NAME}" \ - -var="test_name=${{ matrix.arrays.os }}" \ - -var="cwa_github_sha=${GITHUB_SHA}" \ - -var="test_dir=${{ matrix.arrays.test_dir }}" \ - -var="ami=${{ matrix.arrays.ami }}" \ - -var="use_ssm=${{ matrix.arrays.useSSM }}" \ - -var="s3_bucket=${S3_INTEGRATION_BUCKET}" ; then - terraform destroy -auto-approve - else - terraform destroy -auto-approve && exit 1 - fi - - #This is here just in case workflow cancel - - name: Terraform destroy - if: ${{ cancelled() || failure() }} - uses: nick-fields/retry@v2 - with: - max_attempts: 3 - timeout_minutes: 8 - retry_wait_seconds: 5 - command: | - if [ "${{ matrix.arrays.terraform_dir }}" != "" ]; then - cd "${{ matrix.arrays.terraform_dir }}" - else - cd terraform/ec2/win - fi - terraform destroy --auto-approve - - EC2DarwinIntegrationTest: - needs: [GenerateTestMatrix] - name: 'EC2DarwinIntegrationTest' - runs-on: ubuntu-latest - strategy: - fail-fast: false - matrix: - arrays: ${{ fromJson(needs.GenerateTestMatrix.outputs.ec2_mac_matrix) }} - permissions: - id-token: write - contents: read - steps: - - uses: actions/checkout@v3 - with: - repository: ${{env.CWA_GITHUB_TEST_REPO_NAME}} - ref: ${{env.CWA_GITHUB_TEST_REPO_BRANCH}} - - - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v2 - with: - role-to-assume: ${{ env.TERRAFORM_AWS_ASSUME_ROLE }} - aws-region: us-west-2 - role-duration-seconds: ${{ env.TERRAFORM_AWS_ASSUME_ROLE_DURATION }} - - - name: Echo OS - run: echo run on ec2 instance os ${{ matrix.arrays.os }} - - - name: Install Terraform - uses: hashicorp/setup-terraform@v3 - - - name: Verify Terraform version - run: terraform --version - - # nick-fields/retry@v2 starts at base dir - - name: Terraform apply - uses: nick-fields/retry@v2 - with: - max_attempts: 3 - timeout_minutes: 60 - retry_wait_seconds: 5 - command: | - if [ "${{ matrix.arrays.terraform_dir }}" != "" ]; then - cd "${{ matrix.arrays.terraform_dir }}" - else - cd terraform/ec2/mac - fi - terraform init - if terraform apply --auto-approve \ - -var="ssh_key_value=${PRIVATE_KEY}" -var="ssh_key_name=${KEY_NAME}" \ - -var="arc=${{ matrix.arrays.arc }}" \ - -var="ec2_instance_type=${{ matrix.arrays.instanceType }}" \ - -var="cwa_github_sha=${GITHUB_SHA}" -var="ami=${{ matrix.arrays.ami }}" \ - -var="test_dir=${{ matrix.arrays.test_dir }}" \ - -var="license_manager_arn=${{ env.LICENSE_MANAGER_ARN }}" \ - -var="s3_bucket=${S3_INTEGRATION_BUCKET}" ; then - terraform destroy -auto-approve - else - terraform destroy -auto-approve && exit 1 - fi - - #This is here just in case workflow cancel - - name: Terraform destroy - if: ${{ cancelled() || failure() }} - uses: nick-fields/retry@v2 - with: - max_attempts: 3 - timeout_minutes: 8 - retry_wait_seconds: 5 - command: | - if [ "${{ matrix.arrays.terraform_dir }}" != "" ]; then - cd "${{ matrix.arrays.terraform_dir }}" - else - cd terraform/ec2/mac - fi - terraform destroy --auto-approve - - StopLocalStack: - name: 'StopLocalStack' - if: ${{ always() && needs.StartLocalStack.result == 'success' }} - needs: [ StartLocalStack, EC2LinuxIntegrationTest, LinuxOnPremIntegrationTest, OutputEnvVariables ] - uses: ./.github/workflows/stop-localstack.yml + uses: ./.github/workflows/test-artifacts.yml secrets: inherit permissions: id-token: write contents: read with: - region: us-west-2 - test_repo_name: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_NAME }} - test_repo_branch: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_BRANCH }} - terraform_assume_role: ${{ vars.TERRAFORM_AWS_ASSUME_ROLE }} - github_sha: ${{github.sha}} - s3_integration_bucket: ${{ vars.S3_INTEGRATION_BUCKET }} - - StopLocalStackITAR: - name: 'StopLocalStackITAR' - if: ${{ always() && needs.StartLocalStackITAR.result == 'success' }} - needs: [ StartLocalStackITAR, EC2LinuxIntegrationTestITAR, OutputEnvVariables ] - uses: ./.github/workflows/stop-localstack.yml - secrets: inherit - permissions: - id-token: write - contents: read - with: - region: us-gov-east-1 - test_repo_name: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_NAME }} - test_repo_branch: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_BRANCH }} - terraform_assume_role: ${{ vars.TERRAFORM_AWS_ASSUME_ROLE_ITAR }} - github_sha: ${{github.sha}} - s3_integration_bucket: ${{ vars.S3_INTEGRATION_BUCKET_ITAR }} - - StopLocalStackCN: - name: 'StopLocalStackCN' - if: ${{ always() && needs.StartLocalStackCN.result == 'success' }} - needs: [ StartLocalStackCN, EC2LinuxIntegrationTestCN ] - uses: ./.github/workflows/stop-localstack.yml - secrets: inherit - permissions: - id-token: write - contents: read - with: - region: cn-north-1 - test_repo_name: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_NAME }} - test_repo_branch: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_BRANCH }} - terraform_assume_role: ${{ vars.TERRAFORM_AWS_ASSUME_ROLE_CN }} - github_sha: ${{github.sha}} - s3_integration_bucket: ${{ vars.S3_INTEGRATION_BUCKET_CN }} - - ECSEC2IntegrationTest: - name: 'ECSEC2IntegrationTest' - runs-on: ubuntu-latest - needs: [ GenerateTestMatrix ] - strategy: - fail-fast: false - matrix: - arrays: ${{ fromJson(needs.GenerateTestMatrix.outputs.ecs_ec2_launch_daemon_matrix) }} - permissions: - id-token: write - contents: read - steps: - - uses: actions/checkout@v3 - with: - repository: ${{env.CWA_GITHUB_TEST_REPO_NAME}} - ref: ${{env.CWA_GITHUB_TEST_REPO_BRANCH}} - - - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v2 - with: - role-to-assume: ${{ env.TERRAFORM_AWS_ASSUME_ROLE }} - aws-region: us-west-2 - role-duration-seconds: ${{ env.TERRAFORM_AWS_ASSUME_ROLE_DURATION }} - - - name: Login ECR - id: login-ecr - uses: aws-actions/amazon-ecr-login@v2 - - - name: Install Terraform - uses: hashicorp/setup-terraform@v3 - - - name: Verify Terraform version - run: terraform --version - - - name: Terraform apply - uses: nick-fields/retry@v2 - with: - max_attempts: 3 - timeout_minutes: 15 - retry_wait_seconds: 5 - command: | - if [ "${{ matrix.arrays.terraform_dir }}" != "" ]; then - cd "${{ matrix.arrays.terraform_dir }}" - else - cd terraform/ecs_ec2/daemon - fi - - terraform init - if terraform apply --auto-approve\ - -var="test_dir=${{ matrix.arrays.test_dir }}"\ - -var="ec2_instance_type=${{ matrix.arrays.instanceType }}" \ - -var="cwagent_image_repo=${{ steps.login-ecr.outputs.registry }}/${{ env.ECR_INTEGRATION_TEST_REPO }}"\ - -var="cwagent_image_tag=${{ github.sha }}"\ - -var="ec2_instance_type=${{ matrix.arrays.instanceType }}" \ - -var="metadataEnabled=${{ matrix.arrays.metadataEnabled }}" \ - -var="ami=${{ matrix.arrays.ami }}" ; then - terraform destroy -auto-approve - else - terraform destroy -auto-approve && exit 1 - fi - - - name: Terraform destroy - if: ${{ cancelled() || failure() }} - uses: nick-fields/retry@v2 - with: - max_attempts: 3 - timeout_minutes: 8 - retry_wait_seconds: 5 - command: | - if [ "${{ matrix.arrays.terraform_dir }}" != "" ]; then - cd "${{ matrix.arrays.terraform_dir }}" - else - cd terraform/ecs_ec2/daemon - fi - terraform destroy --auto-approve - - ECSFargateIntegrationTest: - name: 'ECSFargateIntegrationTest' - runs-on: ubuntu-latest - needs: [GenerateTestMatrix] - strategy: - fail-fast: false - matrix: - arrays: ${{ fromJson(needs.GenerateTestMatrix.outputs.ecs_fargate_matrix) }} - permissions: - id-token: write - contents: read - steps: - - uses: actions/checkout@v3 - with: - repository: ${{env.CWA_GITHUB_TEST_REPO_NAME}} - ref: ${{env.CWA_GITHUB_TEST_REPO_BRANCH}} - - - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v2 - with: - role-to-assume: ${{ env.TERRAFORM_AWS_ASSUME_ROLE }} - aws-region: us-west-2 - role-duration-seconds: ${{ env.TERRAFORM_AWS_ASSUME_ROLE_DURATION }} - - - name: Login ECR - id: login-ecr - uses: aws-actions/amazon-ecr-login@v2 - - - name: Install Terraform - uses: hashicorp/setup-terraform@v3 - - - name: Verify Terraform version - run: terraform --version - - - name: Terraform apply - uses: nick-fields/retry@v2 - with: - max_attempts: 3 - timeout_minutes: 15 - retry_wait_seconds: 5 - command: | - if [ "${{ matrix.arrays.terraform_dir }}" != "" ]; then - cd "${{ matrix.arrays.terraform_dir }}" - else - cd terraform/ecs_fargate/linux - fi - - terraform init - if terraform apply --auto-approve\ - -var="test_dir=${{ matrix.arrays.test_dir }}"\ - -var="cwagent_image_repo=${{ steps.login-ecr.outputs.registry }}/${{ env.ECR_INTEGRATION_TEST_REPO }}"\ - -var="cwagent_image_tag=${{ github.sha }}"; then - terraform destroy -auto-approve - else - terraform destroy -auto-approve && exit 1 - fi - - name: Terraform destroy - if: ${{ cancelled() || failure() }} - uses: nick-fields/retry@v2 - with: - max_attempts: 3 - timeout_minutes: 8 - retry_wait_seconds: 5 - command: | - if [ "${{ matrix.arrays.terraform_dir }}" != "" ]; then - cd "${{ matrix.arrays.terraform_dir }}" - else - cd terraform/ecs_fargate/linux - fi - terraform destroy --auto-approve - - EKSIntegrationTest: - name: 'EKSIntegrationTest' - runs-on: ubuntu-latest - needs: [ GenerateTestMatrix ] - strategy: - fail-fast: false - matrix: - arrays: ${{ fromJson(needs.GenerateTestMatrix.outputs.eks_daemon_matrix) }} - permissions: - id-token: write - contents: read - steps: - - uses: actions/checkout@v3 - with: - repository: ${{env.CWA_GITHUB_TEST_REPO_NAME}} - ref: ${{env.CWA_GITHUB_TEST_REPO_BRANCH}} - - - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v2 - with: - role-to-assume: ${{ env.TERRAFORM_AWS_ASSUME_ROLE }} - aws-region: us-west-2 - role-duration-seconds: ${{ env.TERRAFORM_AWS_ASSUME_ROLE_DURATION }} - - - name: Login ECR - id: login-ecr - uses: aws-actions/amazon-ecr-login@v2 - - - name: Install Terraform - uses: hashicorp/setup-terraform@v3 - - - name: Verify Terraform version - run: terraform --version - - - name: Terraform apply - uses: nick-fields/retry@v2 - with: - max_attempts: 2 - timeout_minutes: 90 # EKS takes about 20 minutes to spin up a cluster and service on the cluster - retry_wait_seconds: 5 - command: | - if [ "${{ matrix.arrays.terraform_dir }}" != "" ]; then - cd "${{ matrix.arrays.terraform_dir }}" - else - cd terraform/eks/daemon - fi - - terraform init - if terraform apply --auto-approve \ - -var="test_dir=${{ matrix.arrays.test_dir }}"\ - -var="cwagent_image_repo=${{ steps.login-ecr.outputs.registry }}/${{ env.ECR_INTEGRATION_TEST_REPO }}" \ - -var="cwagent_image_tag=${{ github.sha }}" \ - -var="ami_type=${{ matrix.arrays.ami }}" \ - -var="instance_type=${{ matrix.arrays.instanceType }}" \ - -var="k8s_version=${{ matrix.arrays.k8sVersion }}"; then - terraform destroy -auto-approve - else - terraform destroy -auto-approve && exit 1 - fi - - - name: Terraform destroy - if: ${{ cancelled() || failure() }} - uses: nick-fields/retry@v2 - with: - max_attempts: 3 - timeout_minutes: 8 - retry_wait_seconds: 5 - command: | - if [ "${{ matrix.arrays.terraform_dir }}" != "" ]; then - cd "${{ matrix.arrays.terraform_dir }}" - else - cd terraform/eks/daemon - fi - terraform destroy --auto-approve - - EKSPrometheusIntegrationTest: - name: 'EKSPrometheusIntegrationTest' - runs-on: ubuntu-latest - needs: [ GenerateTestMatrix ] - strategy: - fail-fast: false - matrix: - arrays: ${{ fromJson(needs.GenerateTestMatrix.outputs.eks_deployment_matrix) }} - permissions: - id-token: write - contents: read - steps: - - uses: actions/checkout@v3 - with: - repository: ${{env.CWA_GITHUB_TEST_REPO_NAME}} - ref: ${{env.CWA_GITHUB_TEST_REPO_BRANCH}} - - - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v2 - with: - role-to-assume: ${{ env.TERRAFORM_AWS_ASSUME_ROLE }} - aws-region: us-west-2 - role-duration-seconds: ${{ env.TERRAFORM_AWS_ASSUME_ROLE_DURATION }} - - - name: Login ECR - id: login-ecr - uses: aws-actions/amazon-ecr-login@v2 - - - name: Install Terraform - uses: hashicorp/setup-terraform@v3 - - - name: Verify Terraform version - run: terraform --version - - - name: Terraform apply - uses: nick-fields/retry@v2 - with: - max_attempts: 3 - timeout_minutes: 60 # EKS takes about 20 minutes to spin up a cluster and service on the cluster - retry_wait_seconds: 5 - command: | - if [ "${{ matrix.arrays.terraform_dir }}" != "" ]; then - cd "${{ matrix.arrays.terraform_dir }}" - else - cd terraform/eks/deployment - fi - - terraform init - if terraform apply --auto-approve \ - -var="test_dir=${{ matrix.arrays.test_dir }}"\ - -var="cwagent_image_repo=${{ steps.login-ecr.outputs.registry }}/${{ env.ECR_INTEGRATION_TEST_REPO }}" \ - -var="cwagent_image_tag=${{ github.sha }}" \ - -var="k8s_version=${{ matrix.arrays.k8s_version }}"; then - terraform destroy -auto-approve - else - terraform destroy -auto-approve && exit 1 - fi - - - name: Terraform destroy - if: ${{ cancelled() || failure() }} - uses: nick-fields/retry@v2 - with: - max_attempts: 3 - timeout_minutes: 8 - retry_wait_seconds: 5 - command: | - if [ "${{ matrix.arrays.terraform_dir }}" != "" ]; then - cd "${{ matrix.arrays.terraform_dir }}" - else - cd terraform/eks/deployment - fi - terraform destroy --auto-approve - - PerformanceTrackingTest: - name: "PerformanceTrackingTest" - needs: [GenerateTestMatrix] - runs-on: ubuntu-latest - strategy: - fail-fast: false - matrix: - arrays: ${{ fromJson(needs.GenerateTestMatrix.outputs.ec2_performance_matrix) }} - permissions: - id-token: write - contents: read - steps: - - uses: actions/checkout@v3 - with: - repository: ${{env.CWA_GITHUB_TEST_REPO_NAME}} - ref: ${{env.CWA_GITHUB_TEST_REPO_BRANCH}} - - - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v2 - with: - role-to-assume: ${{ env.TERRAFORM_AWS_ASSUME_ROLE }} - aws-region: us-west-2 - role-duration-seconds: ${{ env.TERRAFORM_AWS_ASSUME_ROLE_DURATION }} - - - name: Install Terraform - uses: hashicorp/setup-terraform@v3 - - - name: Verify Terraform version - run: terraform --version - - - name: Terraform apply - uses: nick-fields/retry@v2 - with: - max_attempts: 1 - timeout_minutes: 60 - retry_wait_seconds: 5 - command: | - cd terraform/performance - terraform init - if terraform apply --auto-approve \ - -var="ssh_key_value=${PRIVATE_KEY}" \ - -var="cwa_github_sha=${GITHUB_SHA}" \ - -var="ami=${{ matrix.arrays.ami }}" \ - -var="arc=${{ matrix.arrays.arc }}" \ - -var="s3_bucket=${S3_INTEGRATION_BUCKET}" \ - -var="ssh_key_name=${KEY_NAME}" \ - -var="values_per_minute=${{ matrix.arrays.values_per_minute}}"\ - -var="family=${{ matrix.arrays.family}}"\ - -var="test_dir=${{ matrix.arrays.test_dir }}" ; then terraform destroy -auto-approve - else - terraform destroy -auto-approve && exit 1 - fi - - - name: Terraform destroy - if: ${{ cancelled() || failure() }} - uses: nick-fields/retry@v2 - with: - max_attempts: 3 - timeout_minutes: 8 - retry_wait_seconds: 5 - command: cd terraform/performance && terraform destroy --auto-approve - - EC2WinPerformanceTest: - name: "EC2WinPerformanceTest" - needs: [ GenerateTestMatrix ] - runs-on: ubuntu-latest - strategy: - fail-fast: false - matrix: - arrays: ${{ fromJson(needs.GenerateTestMatrix.outputs.ec2_windows_performance_matrix) }} - permissions: - id-token: write - contents: read - steps: - - uses: actions/checkout@v3 - with: - repository: ${{env.CWA_GITHUB_TEST_REPO_NAME}} - ref: ${{env.CWA_GITHUB_TEST_REPO_BRANCH}} - - - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v2 - with: - role-to-assume: ${{ env.TERRAFORM_AWS_ASSUME_ROLE }} - aws-region: us-west-2 - role-duration-seconds: ${{ env.TERRAFORM_AWS_ASSUME_ROLE_DURATION }} - - - name: Install Terraform - uses: hashicorp/setup-terraform@v3 - - - name: Verify Terraform version - run: terraform --version - - - name: Terraform apply - uses: nick-fields/retry@v2 - with: - max_attempts: 1 - timeout_minutes: 60 - retry_wait_seconds: 5 - command: | - cd terraform/performance - terraform init - if terraform apply --auto-approve \ - -var="ssh_key_value=${PRIVATE_KEY}" \ - -var="cwa_github_sha=${GITHUB_SHA}" \ - -var="ami=${{ matrix.arrays.ami }}" \ - -var="arc=${{ matrix.arrays.arc }}" \ - -var="s3_bucket=${S3_INTEGRATION_BUCKET}" \ - -var="ssh_key_name=${KEY_NAME}" \ - -var="values_per_minute=${{ matrix.arrays.values_per_minute}}"\ - -var="family=${{ matrix.arrays.family}}"\ - -var="test_dir=${{ matrix.arrays.test_dir }}" ; then terraform destroy -auto-approve - else - terraform destroy -auto-approve && exit 1 - fi - - - name: Terraform destroy - if: ${{ cancelled() || failure() }} - uses: nick-fields/retry@v2 - with: - max_attempts: 3 - timeout_minutes: 8 - retry_wait_seconds: 5 - command: cd terraform/performance && terraform destroy --auto-approve - - StressTrackingTest: - name: "StressTrackingTest" - needs: [GenerateTestMatrix] - runs-on: ubuntu-latest - strategy: - fail-fast: false - matrix: - arrays: ${{ fromJson(needs.GenerateTestMatrix.outputs.ec2_stress_matrix) }} - permissions: - id-token: write - contents: read - steps: - - uses: actions/checkout@v3 - with: - repository: ${{env.CWA_GITHUB_TEST_REPO_NAME}} - ref: ${{env.CWA_GITHUB_TEST_REPO_BRANCH}} - - - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v2 - with: - role-to-assume: ${{ env.TERRAFORM_AWS_ASSUME_ROLE }} - aws-region: us-west-2 - role-duration-seconds: ${{ env.TERRAFORM_AWS_ASSUME_ROLE_DURATION }} - - - name: Install Terraform - uses: hashicorp/setup-terraform@v3 - - - name: Verify Terraform version - run: terraform --version - - - name: Echo Test Info - run: echo run on ec2 instance os ${{ matrix.arrays.os }} arc ${{ matrix.arrays.arc }} test dir ${{ matrix.arrays.test_dir }} values per minute ${{ matrix.arrays.values_per_minute }} - - - name: Terraform apply - uses: nick-fields/retry@v2 - with: - max_attempts: 1 - timeout_minutes: 60 - retry_wait_seconds: 5 - command: | - cd terraform/stress - terraform init - if terraform apply --auto-approve \ - -var="ssh_key_value=${PRIVATE_KEY}" \ - -var="cwa_github_sha=${GITHUB_SHA}" \ - -var="ami=${{ matrix.arrays.ami }}" \ - -var="arc=${{ matrix.arrays.arc }}" \ - -var="s3_bucket=${S3_INTEGRATION_BUCKET}" \ - -var="ssh_key_name=${KEY_NAME}" \ - -var="values_per_minute=${{ matrix.arrays.values_per_minute}}"\ - -var="test_dir=${{ matrix.arrays.test_dir }}" ; then terraform destroy -auto-approve - else - terraform destroy -auto-approve && exit 1 - fi - - - name: Terraform destroy - if: ${{ cancelled() || failure() }} - uses: nick-fields/retry@v2 - with: - max_attempts: 3 - timeout_minutes: 8 - retry_wait_seconds: 5 - command: cd terraform/stress && terraform destroy --auto-approve - - EC2WinStressTrackingTest: - name: "EC2WinStressTrackingTest" - needs: [GenerateTestMatrix] - runs-on: ubuntu-latest - strategy: - fail-fast: false - matrix: - arrays: ${{ fromJson(needs.GenerateTestMatrix.outputs.ec2_windows_stress_matrix) }} - permissions: - id-token: write - contents: read - steps: - - uses: actions/checkout@v3 - with: - repository: ${{env.CWA_GITHUB_TEST_REPO_NAME}} - ref: ${{env.CWA_GITHUB_TEST_REPO_BRANCH}} - - - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v2 - with: - role-to-assume: ${{ env.TERRAFORM_AWS_ASSUME_ROLE }} - aws-region: us-west-2 - role-duration-seconds: ${{ env.TERRAFORM_AWS_ASSUME_ROLE_DURATION }} - - - name: Install Terraform - uses: hashicorp/setup-terraform@v3 - - - name: Verify Terraform version - run: terraform --version - - - name: Echo Test Info - run: echo run on ec2 instance os ${{ matrix.arrays.os }} arc ${{ matrix.arrays.arc }} test dir ${{ matrix.arrays.test_dir }} values per minute ${{ matrix.arrays.values_per_minute }} - - - name: Terraform apply - uses: nick-fields/retry@v2 - with: - max_attempts: 1 - timeout_minutes: 60 - retry_wait_seconds: 5 - command: | - cd terraform/stress - terraform init - if terraform apply --auto-approve \ - -var="ssh_key_value=${PRIVATE_KEY}" \ - -var="cwa_github_sha=${GITHUB_SHA}" \ - -var="ami=${{ matrix.arrays.ami }}" \ - -var="arc=${{ matrix.arrays.arc }}" \ - -var="s3_bucket=${S3_INTEGRATION_BUCKET}" \ - -var="ssh_key_name=${KEY_NAME}" \ - -var="values_per_minute=${{ matrix.arrays.values_per_minute}}"\ - -var="family=${{ matrix.arrays.family}}"\ - -var="test_dir=${{ matrix.arrays.test_dir }}" ; then terraform destroy -auto-approve - else - terraform destroy -auto-approve && exit 1 - fi - - - name: Terraform destroy - if: ${{ cancelled() || failure() }} - uses: nick-fields/retry@v2 - with: - max_attempts: 3 - timeout_minutes: 8 - retry_wait_seconds: 5 - command: cd terraform/stress && terraform destroy --auto-approve - - GPUEndToEndTest: - name: "GPU E2E Test" - needs: [ GenerateTestMatrix, OutputEnvVariables ] - runs-on: ubuntu-latest - strategy: - fail-fast: false - matrix: - arrays: ${{ fromJson(needs.GenerateTestMatrix.outputs.eks_addon_matrix) }} - permissions: - id-token: write - contents: read - steps: - - uses: actions/checkout@v3 - with: - repository: ${{env.CWA_GITHUB_TEST_REPO_NAME}} - ref: ${{env.CWA_GITHUB_TEST_REPO_BRANCH}} - - - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v2 - with: - role-to-assume: ${{ env.TERRAFORM_AWS_ASSUME_ROLE }} - aws-region: us-west-2 - role-duration-seconds: ${{ env.TERRAFORM_AWS_ASSUME_ROLE_DURATION }} - - - name: Install Terraform - uses: hashicorp/setup-terraform@v3 - - - name: Verify Terraform version - run: terraform --version - - - name: Terraform apply and setup - run: | - if [ "${{ matrix.arrays.terraform_dir }}" != "" ]; then - cd "${{ matrix.arrays.terraform_dir }}" - else - cd terraform/eks/addon/gpu - fi - - terraform init - if terraform apply --auto-approve \ - -var="beta=true" \ - -var="ami_type=${{ matrix.arrays.ami }}" \ - -var="instance_type=${{ matrix.arrays.instanceType }}" \ - -var="k8s_version=${{ matrix.arrays.k8sVersion }}"; then - echo "Terraform apply successful." - - # Capture the output - echo "Getting EKS cluster name" - EKS_CLUSTER_NAME=$(terraform output -raw eks_cluster_name) - echo "Cluster name is ${EKS_CLUSTER_NAME}" - kubectl create -f https://raw.githubusercontent.com/NVIDIA/k8s-device-plugin/v0.17.0/deployments/static/nvidia-device-plugin.yml - kubectl patch amazoncloudwatchagents -n amazon-cloudwatch cloudwatch-agent --type='json' -p='[{"op": "replace", "path": "/spec/image", "value": ${{ secrets.AWS_ECR_PRIVATE_REGISTRY }}/${{ env.ECR_INTEGRATION_TEST_REPO }}:${{ github.sha }}}]' - # wait nvidia device plugin to be ready - sleep 10 - kubectl apply -f ./gpuBurner.yaml - else - terraform destroy -auto-approve && exit 1 - fi - - - name: Run Go tests with retry - uses: nick-fields/retry@v2 - with: - max_attempts: 5 - timeout_minutes: 60 - retry_wait_seconds: 30 - command: | - if [ "${{ matrix.arrays.terraform_dir }}" != "" ]; then - cd "${{ matrix.arrays.terraform_dir }}" - else - cd terraform/eks/addon/gpu - fi - echo "Getting EKS cluster name" - EKS_CLUSTER_NAME=$(terraform output -raw eks_cluster_name) - echo "Cluster name is ${EKS_CLUSTER_NAME}" - - if go test ${{ matrix.arrays.test_dir }} -eksClusterName ${EKS_CLUSTER_NAME} -computeType=EKS -v -eksDeploymentStrategy=DAEMON -eksGpuType=nvidia -useE2EMetrics; then - echo "Tests passed" - else - echo "Tests failed" - exit 1 - fi - - - name: Terraform destroy - if: always() - uses: nick-fields/retry@v2 - with: - max_attempts: 3 - timeout_minutes: 8 - retry_wait_seconds: 5 - command: | - if [ "${{ matrix.arrays.terraform_dir }}" != "" ]; then - cd "${{ matrix.arrays.terraform_dir }}" - else - cd terraform/eks/addon/gpu - fi - terraform destroy -auto-approve + build_id: ${{ inputs.build_sha }} + test_repo_branch: ${{ inputs.test_repo_branch }} diff --git a/.github/workflows/release-candidate-test.yml b/.github/workflows/release-candidate-test.yml new file mode 100644 index 0000000000..b7c2b108e9 --- /dev/null +++ b/.github/workflows/release-candidate-test.yml @@ -0,0 +1,62 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: MIT + +name: Test Release Candidate +env: + TERRAFORM_AWS_ASSUME_ROLE: ${{ vars.TERRAFORM_AWS_ASSUME_ROLE }} + TERRAFORM_AWS_ASSUME_ROLE_DURATION: 14400 # 4 hours + S3_INTEGRATION_BUCKET: ${{ vars.S3_INTEGRATION_BUCKET }} + S3_RELEASE_BUCKET: amazon-cloud-watch-agent + S3_RELEASE_REPO: cloudwatch-agent + CWA_GITHUB_TEST_REPO_BRANCH: "main" + TERRAFORM_AWS_ASSUME_ROLE_ITAR: ${{ vars.TERRAFORM_AWS_ASSUME_ROLE_ITAR }} + S3_INTEGRATION_BUCKET_ITAR: ${{ vars.S3_INTEGRATION_BUCKET_ITAR }} + TERRAFORM_AWS_ASSUME_ROLE_CN: ${{ vars.TERRAFORM_AWS_ASSUME_ROLE_CN }} + S3_INTEGRATION_BUCKET_CN: ${{ vars.S3_INTEGRATION_BUCKET_CN }} + +on: + workflow_dispatch: + inputs: + build_id: + description: 'The build ID (release candidate build number or GitHub commit SHA)' + type: string + required: true + test_repo_branch: + description: 'Override for the test repo branch (default is main)' + type: string + +jobs: + OutputEnvVariables: + name: 'OutputEnvVariables' + runs-on: ubuntu-latest + outputs: + CWA_GITHUB_TEST_REPO_BRANCH: ${{ steps.set-outputs.outputs.CWA_GITHUB_TEST_REPO_BRANCH }} + steps: + - name: SetOutputs + id: set-outputs + run: | + CWA_GITHUB_TEST_REPO_BRANCH=${{ inputs.test_repo_branch }} + echo "CWA_GITHUB_TEST_REPO_BRANCH=${CWA_GITHUB_TEST_REPO_BRANCH:-${{ env.CWA_GITHUB_TEST_REPO_BRANCH }}}" >> "$GITHUB_OUTPUT" + + - name: Echo test variables + run: | + echo "CWA_GITHUB_TEST_REPO_BRANCH: ${{ steps.set-outputs.outputs.CWA_GITHUB_TEST_REPO_BRANCH }}" + + RepackageArtifacts: + name: 'RepackageArtifacts' + uses: ./.github/workflows/repackage-release-artifacts.yml + secrets: inherit + permissions: + id-token: write + contents: read + with: + build_id: ${{ inputs.build_id }} + + StartIntegrationTests: + needs: [ RepackageArtifacts, OutputEnvVariables ] + runs-on: ubuntu-latest + steps: + # Avoid the limit of 5 nested workflows by executing the workflow in this manner + - run: gh workflow run test-artifacts.yml --ref ${{ github.ref_name }} --repo $GITHUB_REPOSITORY -f build_id=${{ inputs.build_id }} -f test_repo_branch=${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_BRANCH }} + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/repackage-release-artifacts.yml b/.github/workflows/repackage-release-artifacts.yml new file mode 100644 index 0000000000..75b2acabcf --- /dev/null +++ b/.github/workflows/repackage-release-artifacts.yml @@ -0,0 +1,168 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: MIT + +name: Repackage Release Artifacts +env: + TERRAFORM_AWS_ASSUME_ROLE: ${{ vars.TERRAFORM_AWS_ASSUME_ROLE }} + TERRAFORM_AWS_ASSUME_ROLE_DURATION: 14400 # 4 hours + S3_INTEGRATION_BUCKET: ${{ vars.S3_INTEGRATION_BUCKET }} + S3_RELEASE_BUCKET: amazon-cloud-watch-agent + S3_RELEASE_REPO: cloudwatch-agent + TERRAFORM_AWS_ASSUME_ROLE_ITAR: ${{ vars.TERRAFORM_AWS_ASSUME_ROLE_ITAR }} + S3_INTEGRATION_BUCKET_ITAR: ${{ vars.S3_INTEGRATION_BUCKET_ITAR }} + TERRAFORM_AWS_ASSUME_ROLE_CN: ${{ vars.TERRAFORM_AWS_ASSUME_ROLE_CN }} + S3_INTEGRATION_BUCKET_CN: ${{ vars.S3_INTEGRATION_BUCKET_CN }} + +on: + workflow_call: + inputs: + build_id: + description: 'The build ID (release candidate build number or GitHub commit SHA)' + type: string + required: true + +jobs: + RepackageS3Artifcats: + name: 'RepackageS3Artifcats' + runs-on: ubuntu-latest + strategy: + fail-fast: false + permissions: + id-token: write + contents: read + steps: + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ env.TERRAFORM_AWS_ASSUME_ROLE }} + aws-region: us-west-2 + role-duration-seconds: ${{ env.TERRAFORM_AWS_ASSUME_ROLE_DURATION }} + + - name: Download Artifacts + id: download-artifacts + run: | + mkdir -p windows/amd64/ + aws s3 cp --no-progress s3://${{ env.S3_RELEASE_BUCKET }}/windows/amd64/${{ inputs.build_id }}/AmazonCloudWatchAgent.zip ./windows/amd64/ + + mkdir -p linux/amd64 + aws s3 cp --no-progress s3://${{ env.S3_RELEASE_BUCKET }}/linux/amd64/${{ inputs.build_id }}/AmazonCloudWatchAgent.zip ./linux/amd64 + + mkdir -p linux/arm64 + aws s3 cp --no-progress s3://${{ env.S3_RELEASE_BUCKET }}/linux/arm64/${{ inputs.build_id }}/AmazonCloudWatchAgent.zip ./linux/arm64 + + mkdir -p darwin/amd64 + aws s3 cp --no-progress s3://${{ env.S3_RELEASE_BUCKET }}/darwin/amd64/${{ inputs.build_id }}/AmazonCloudWatchAgent.zip ./darwin/amd64 + + mkdir -p darwin/arm64 + aws s3 cp --no-progress s3://${{ env.S3_RELEASE_BUCKET }}/darwin/arm64/${{ inputs.build_id }}/AmazonCloudWatchAgent.zip ./darwin/arm64 + + - name: Re-upload Artifacts + id: upload-artifacts + run: | + pushd windows/amd64/ + unzip AmazonCloudWatchAgent.zip + aws s3 cp --no-progress ./amazon-cloudwatch-agent.msi s3://${{ env.S3_INTEGRATION_BUCKET }}/integration-test/packaging/${{ inputs.build_id }}/amazon-cloudwatch-agent.msi + popd + + pushd linux/amd64 + unzip AmazonCloudWatchAgent.zip + aws s3 cp --no-progress ./amazon-cloudwatch-agent.rpm s3://${{ env.S3_INTEGRATION_BUCKET }}/integration-test/binary/${{ inputs.build_id }}/linux/amd64/amazon-cloudwatch-agent.rpm + aws s3 cp --no-progress ./amazon-cloudwatch-agent.deb s3://${{ env.S3_INTEGRATION_BUCKET }}/integration-test/binary/${{ inputs.build_id }}/linux/amd64/amazon-cloudwatch-agent.deb + popd + + pushd linux/arm64 + unzip AmazonCloudWatchAgent.zip + aws s3 cp --no-progress ./amazon-cloudwatch-agent.rpm s3://${{ env.S3_INTEGRATION_BUCKET }}/integration-test/binary/${{ inputs.build_id }}/linux/arm64/amazon-cloudwatch-agent.rpm + aws s3 cp --no-progress ./amazon-cloudwatch-agent.deb s3://${{ env.S3_INTEGRATION_BUCKET }}/integration-test/binary/${{ inputs.build_id }}/linux/arm64/amazon-cloudwatch-agent.deb + popd + + pushd darwin/amd64 + unzip AmazonCloudWatchAgent.zip + aws s3 cp --no-progress ./amazon-cloudwatch-agent.pkg s3://${{ env.S3_INTEGRATION_BUCKET }}/integration-test/packaging/${{ inputs.build_id }}/arm64/amazon-cloudwatch-agent.pkg + popd + + pushd darwin/arm64 + unzip AmazonCloudWatchAgent.zip + aws s3 cp --no-progress ./amazon-cloudwatch-agent.pkg s3://${{ env.S3_INTEGRATION_BUCKET }}/integration-test/packaging/${{ inputs.build_id }}/arm64/amazon-cloudwatch-agent.pkg + popd + + - name: Configure AWS Credentials (CN) + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ env.TERRAFORM_AWS_ASSUME_ROLE_CN }} + aws-region: cn-north-1 + role-duration-seconds: ${{ env.TERRAFORM_AWS_ASSUME_ROLE_DURATION }} + + - name: Re-upload Artifacts (CN) + id: upload-artifacts-cn + run: | + pushd linux/amd64 + aws --region cn-north-1 s3 cp --no-progress ./amazon-cloudwatch-agent.rpm s3://${{ env.S3_INTEGRATION_BUCKET_CN }}/integration-test/binary/${{ inputs.build_id }}/linux/amd64/amazon-cloudwatch-agent.rpm + aws --region cn-north-1 s3 cp --no-progress ./amazon-cloudwatch-agent.deb s3://${{ env.S3_INTEGRATION_BUCKET_CN }}/integration-test/binary/${{ inputs.build_id }}/linux/amd64/amazon-cloudwatch-agent.deb + popd + + pushd linux/arm64 + aws --region cn-north-1 s3 cp --no-progress ./amazon-cloudwatch-agent.rpm s3://${{ env.S3_INTEGRATION_BUCKET_CN }}/integration-test/binary/${{ inputs.build_id }}/linux/arm64/amazon-cloudwatch-agent.rpm + aws --region cn-north-1 s3 cp --no-progress ./amazon-cloudwatch-agent.deb s3://${{ env.S3_INTEGRATION_BUCKET_CN }}/integration-test/binary/${{ inputs.build_id }}/linux/arm64/amazon-cloudwatch-agent.deb + popd + + - name: Configure AWS Credentials (ITAR) + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ env.TERRAFORM_AWS_ASSUME_ROLE_ITAR }} + aws-region: us-gov-east-1 + role-duration-seconds: ${{ env.TERRAFORM_AWS_ASSUME_ROLE_DURATION }} + + - name: Re-upload Artifacts (ITAR) + id: upload-artifacts-itar + run: | + pushd linux/amd64 + aws --region us-gov-east-1 s3 cp --no-progress ./amazon-cloudwatch-agent.rpm s3://${{ env.S3_INTEGRATION_BUCKET_ITAR }}/integration-test/binary/${{ inputs.build_id }}/linux/amd64/amazon-cloudwatch-agent.rpm + aws --region us-gov-east-1 s3 cp --no-progress ./amazon-cloudwatch-agent.deb s3://${{ env.S3_INTEGRATION_BUCKET_ITAR }}/integration-test/binary/${{ inputs.build_id }}/linux/amd64/amazon-cloudwatch-agent.deb + popd + + pushd linux/arm64 + aws --region us-gov-east-1 s3 cp --no-progress ./amazon-cloudwatch-agent.rpm s3://${{ env.S3_INTEGRATION_BUCKET_ITAR }}/integration-test/binary/${{ inputs.build_id }}/linux/arm64/amazon-cloudwatch-agent.rpm + aws --region us-gov-east-1 s3 cp --no-progress ./amazon-cloudwatch-agent.deb s3://${{ env.S3_INTEGRATION_BUCKET_ITAR }}/integration-test/binary/${{ inputs.build_id }}/linux/arm64/amazon-cloudwatch-agent.deb + popd + + RepackageECRImage: + name: 'RepackageECRImage' + runs-on: ubuntu-latest + strategy: + fail-fast: false + permissions: + id-token: write + contents: read + steps: + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ env.TERRAFORM_AWS_ASSUME_ROLE }} + aws-region: us-west-2 + role-duration-seconds: ${{ env.TERRAFORM_AWS_ASSUME_ROLE_DURATION }} + + - name: Login to Release Artifacts Amazon ECR + id: login-artifacts-ecr + uses: aws-actions/amazon-ecr-login@v2 + with: + registries: ${{ secrets.RELEASE_ARTIFACTS_ACCOUNT_ID }} + + - name: Pull Image + id: pull-image + env: + ARTIFACTS_REGISTRY: ${{ steps.login-artifacts-ecr.outputs.registry }} + run: | + docker pull ${{ env.ARTIFACTS_REGISTRY }}/cloudwatch-agent:${{ inputs.build_id }} + + - name: Login to Integ Test Amazon ECR + id: login-integ-test-ecr + uses: aws-actions/amazon-ecr-login@v2 + + - name: Push Image + id: push-image + env: + ARTIFACTS_REGISTRY: ${{ steps.login-artifacts-ecr.outputs.registry }} + INTEG_TEST_REGISTRY: ${{ steps.login-integ-test-ecr.outputs.registry }} + run: | + docker buildx imagetools create -t ${{ env.INTEG_TEST_REGISTRY }}/cwagent-integration-test:${{ inputs.build_id }} ${{ env.ARTIFACTS_REGISTRY }}/cloudwatch-agent:${{ inputs.build_id }} diff --git a/.github/workflows/start-localstack.yml b/.github/workflows/start-localstack.yml index 53123dac76..4a187cdcc4 100644 --- a/.github/workflows/start-localstack.yml +++ b/.github/workflows/start-localstack.yml @@ -83,5 +83,5 @@ jobs: -var="ssh_key_name=${{env.KEY_NAME}}" && LOCAL_STACK_HOST_NAME=$(terraform output -raw public_dns) && echo $LOCAL_STACK_HOST_NAME && - echo "::set-output name=local_stack_host_name::$LOCAL_STACK_HOST_NAME" && + echo "local_stack_host_name=$LOCAL_STACK_HOST_NAME" >> "$GITHUB_OUTPUT" && aws s3 cp terraform.tfstate s3://${{inputs.s3_integration_bucket}}/integration-test/local-stack-terraform-state/${{inputs.github_sha}}/terraform.tfstate diff --git a/.github/workflows/stop-localstack.yml b/.github/workflows/stop-localstack.yml index 248e3e85ca..8f0f100ca6 100644 --- a/.github/workflows/stop-localstack.yml +++ b/.github/workflows/stop-localstack.yml @@ -53,6 +53,9 @@ jobs: - name: Copy state run: aws s3 cp s3://${{inputs.s3_integration_bucket}}/integration-test/local-stack-terraform-state/${{inputs.github_sha}}/terraform.tfstate . + - name: Install Terraform + uses: hashicorp/setup-terraform@v3 + - name: Verify Terraform version run: terraform --version diff --git a/.github/workflows/test-artifacts.yml b/.github/workflows/test-artifacts.yml new file mode 100644 index 0000000000..b8434afcf6 --- /dev/null +++ b/.github/workflows/test-artifacts.yml @@ -0,0 +1,1286 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: MIT + +name: Test Artifacts +env: + PRIVATE_KEY: ${{ secrets.AWS_PRIVATE_KEY }} + TERRAFORM_AWS_ASSUME_ROLE: ${{ vars.TERRAFORM_AWS_ASSUME_ROLE }} + TERRAFORM_AWS_ASSUME_ROLE_DURATION: 14400 # 4 hours + S3_INTEGRATION_BUCKET: ${{ vars.S3_INTEGRATION_BUCKET }} + KEY_NAME: ${{ secrets.KEY_NAME }} + CF_IAM_ROLE: ${{ secrets.CF_IAM_ROLE }} + CF_KEY_NAME: ${{ secrets.CF_KEY_NAME }} + ECR_INTEGRATION_TEST_REPO: "cwagent-integration-test" + CWA_GITHUB_TEST_REPO_NAME: "aws/amazon-cloudwatch-agent-test" + CWA_GITHUB_TEST_REPO_URL: "https://github.com/aws/amazon-cloudwatch-agent-test.git" + CWA_GITHUB_TEST_REPO_BRANCH: "main" + TERRAFORM_AWS_ASSUME_ROLE_ITAR: ${{ vars.TERRAFORM_AWS_ASSUME_ROLE_ITAR }} + S3_INTEGRATION_BUCKET_ITAR: ${{ vars.S3_INTEGRATION_BUCKET_ITAR }} + TERRAFORM_AWS_ASSUME_ROLE_CN: ${{ vars.TERRAFORM_AWS_ASSUME_ROLE_CN }} + S3_INTEGRATION_BUCKET_CN: ${{ vars.S3_INTEGRATION_BUCKET_CN }} + +on: + workflow_dispatch: + inputs: + build_id: + description: 'The build ID (release candidate build number or GitHub commit SHA)' + type: string + required: true + test_repo_branch: + description: 'Override for the GitHub test repository branch to use (default is main)' + type: string + workflow_call: + inputs: + build_id: + description: 'The build ID (release candidate build number or GitHub commit SHA)' + type: string + required: true + test_repo_branch: + description: 'Override for the GitHub test repository branch to use (default is main)' + type: string + +concurrency: + group: ${{ github.workflow }}-${{ inputs.build_id }} + cancel-in-progress: true + +jobs: + OutputEnvVariables: + name: 'OutputEnvVariables' + runs-on: ubuntu-latest + outputs: + CWA_GITHUB_TEST_REPO_NAME: ${{ steps.set-outputs.outputs.CWA_GITHUB_TEST_REPO_NAME }} + CWA_GITHUB_TEST_REPO_URL: ${{ steps.set-outputs.outputs.CWA_GITHUB_TEST_REPO_URL }} + CWA_GITHUB_TEST_REPO_BRANCH: ${{ steps.set-outputs.outputs.CWA_GITHUB_TEST_REPO_BRANCH }} + steps: + - name: SetOutputs + id: set-outputs + run: | + CWA_GITHUB_TEST_REPO_BRANCH=${{ inputs.test_repo_branch }} + + echo "CWA_GITHUB_TEST_REPO_NAME=${{ env.CWA_GITHUB_TEST_REPO_NAME }}" >> "$GITHUB_OUTPUT" + echo "CWA_GITHUB_TEST_REPO_URL=${{ env.CWA_GITHUB_TEST_REPO_URL }}" >> "$GITHUB_OUTPUT" + echo "CWA_GITHUB_TEST_REPO_BRANCH=${CWA_GITHUB_TEST_REPO_BRANCH:-${{ env.CWA_GITHUB_TEST_REPO_BRANCH }}}" >> "$GITHUB_OUTPUT" + + - name: Echo test variables + run: | + echo "build_id: ${{ inputs.build_id }}" + echo "CWA_GITHUB_TEST_REPO_NAME: ${{ steps.set-outputs.outputs.CWA_GITHUB_TEST_REPO_NAME }}" + echo "CWA_GITHUB_TEST_REPO_URL: ${{ steps.set-outputs.outputs.CWA_GITHUB_TEST_REPO_URL }}" + echo "CWA_GITHUB_TEST_REPO_BRANCH: ${{ steps.set-outputs.outputs.CWA_GITHUB_TEST_REPO_BRANCH }}" + + - uses: actions/checkout@v3 + with: + repository: ${{ steps.set-outputs.outputs.CWA_GITHUB_TEST_REPO_NAME }} + ref: ${{ steps.set-outputs.outputs.CWA_GITHUB_TEST_REPO_BRANCH }} + + - name: Set up Go 1.x + uses: actions/setup-go@v4 + with: + go-version: ~1.22.2 + + GenerateTestMatrix: + name: 'GenerateTestMatrix' + needs: [OutputEnvVariables] + runs-on: ubuntu-latest + outputs: + ec2_gpu_matrix: ${{ steps.set-matrix.outputs.ec2_gpu_matrix }} + ec2_linux_matrix: ${{ steps.set-matrix.outputs.ec2_linux_matrix }} + ec2_windows_matrix: ${{ steps.set-matrix.outputs.ec2_windows_matrix }} + ec2_mac_matrix: ${{ steps.set-matrix.outputs.ec2_mac_matrix }} + ec2_performance_matrix: ${{steps.set-matrix.outputs.ec2_performance_matrix}} + ec2_windows_performance_matrix: ${{steps.set-matrix.outputs.ec2_windows_performance_matrix}} + ec2_stress_matrix: ${{steps.set-matrix.outputs.ec2_stress_matrix}} + ec2_windows_stress_matrix: ${{steps.set-matrix.outputs.ec2_windows_stress_matrix}} + ecs_ec2_launch_daemon_matrix: ${{ steps.set-matrix.outputs.ecs_ec2_launch_daemon_matrix }} + ecs_fargate_matrix: ${{ steps.set-matrix.outputs.ecs_fargate_matrix }} + eks_daemon_matrix: ${{ steps.set-matrix.outputs.eks_daemon_matrix }} + eks_deployment_matrix: ${{ steps.set-matrix.outputs.eks_deployment_matrix }} + ec2_linux_itar_matrix: ${{ steps.set-matrix.outputs.ec2_linux_itar_matrix }} + ec2_linux_china_matrix: ${{ steps.set-matrix.outputs.ec2_linux_china_matrix }} + eks_addon_matrix: ${{ steps.set-matrix.outputs.eks_addon_matrix }} + steps: + - uses: actions/checkout@v3 + with: + repository: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_NAME }} + ref: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_BRANCH }} + + - name: Set up Go 1.x + uses: actions/setup-go@v4 + with: + go-version: ~1.22.2 + + - name: Generate matrix + id: set-matrix + run: | + go run --tags=generator generator/test_case_generator.go + echo "ec2_gpu_matrix=$(echo $(cat generator/resources/ec2_gpu_complete_test_matrix.json))" >> "$GITHUB_OUTPUT" + echo "eks_addon_matrix=$(echo $(cat generator/resources/eks_addon_complete_test_matrix.json))" >> "$GITHUB_OUTPUT" + echo "ec2_linux_matrix=$(echo $(cat generator/resources/ec2_linux_complete_test_matrix.json))" >> "$GITHUB_OUTPUT" + echo "ec2_windows_matrix=$(echo $(cat generator/resources/ec2_windows_complete_test_matrix.json))" >> "$GITHUB_OUTPUT" + echo "ec2_mac_matrix=$(echo $(cat generator/resources/ec2_mac_complete_test_matrix.json))" >> "$GITHUB_OUTPUT" + echo "ec2_performance_matrix=$(echo $(cat generator/resources/ec2_performance_complete_test_matrix.json))" >> "$GITHUB_OUTPUT" + echo "ec2_windows_performance_matrix=$(echo $(cat generator/resources/ec2_windows_performance_complete_test_matrix.json))" >> "$GITHUB_OUTPUT" + echo "ec2_stress_matrix=$(echo $(cat generator/resources/ec2_stress_complete_test_matrix.json))" >> "$GITHUB_OUTPUT" + echo "ec2_windows_stress_matrix=$(echo $(cat generator/resources/ec2_windows_stress_complete_test_matrix.json))" >> "$GITHUB_OUTPUT" + echo "ecs_ec2_launch_daemon_matrix=$(echo $(cat generator/resources/ecs_ec2_daemon_complete_test_matrix.json))" >> "$GITHUB_OUTPUT" + echo "ecs_fargate_matrix=$(echo $(cat generator/resources/ecs_fargate_complete_test_matrix.json))" >> "$GITHUB_OUTPUT" + echo "eks_daemon_matrix=$(echo $(cat generator/resources/eks_daemon_complete_test_matrix.json))" >> "$GITHUB_OUTPUT" + echo "eks_deployment_matrix=$(echo $(cat generator/resources/eks_deployment_complete_test_matrix.json))" >> "$GITHUB_OUTPUT" + echo "ec2_linux_itar_matrix=$(echo $(cat generator/resources/ec2_linux_itar_complete_test_matrix.json))" >> "$GITHUB_OUTPUT" + echo "ec2_linux_china_matrix=$(echo $(cat generator/resources/ec2_linux_china_complete_test_matrix.json))" >> "$GITHUB_OUTPUT" + + - name: Echo test plan matrix + run: | + echo "ec2_gpu_matrix: ${{ steps.set-matrix.outputs.ec2_gpu_matrix }}" + echo "eks_addon_matrix: ${{ steps.set-matrix.outputs.eks_addon_matrix }}" + echo "ec2_linux_matrix: ${{ steps.set-matrix.outputs.ec2_linux_matrix }}" + echo "ec2_windows_matrix: ${{ steps.set-matrix.outputs.ec2_windows_matrix }}" + echo "ec2_mac_matrix: ${{ steps.set-matrix.outputs.ec2_mac_matrix }}" + echo "ec2_performance_matrix: ${{ steps.set-matrix.outputs.ec2_performance_matrix}}" + echo "ec2_windows_performance_matrix: ${{ steps.set-matrix.outputs.ec2_windows_performance_matrix}}" + echo "ec2_stress_matrix: ${{ steps.set-matrix.outputs.ec2_stress_matrix}}" + echo "ec2_windows_stress_matrix: ${{ steps.set-matrix.outputs.ec2_windows_stress_matrix}}" + echo "ecs_ec2_launch_daemon_matrix: ${{ steps.set-matrix.outputs.ecs_ec2_launch_daemon_matrix }}" + echo "ecs_fargate_matrix: ${{ steps.set-matrix.outputs.ecs_fargate_matrix }}" + echo "eks_daemon_matrix: ${{ steps.set-matrix.outputs.eks_daemon_matrix }}" + echo "eks_deployment_matrix: ${{ steps.set-matrix.outputs.eks_deployment_matrix }}" + echo "ec2_linux_itar_matrix: ${{ steps.set-matrix.outputs.ec2_linux_itar_matrix }}" + echo "ec2_linux_china_matrix: ${{ steps.set-matrix.outputs.ec2_linux_china_matrix }}" + + CloudformationTest: + needs: [OutputEnvVariables, GenerateTestMatrix] + name: 'CFTest' + runs-on: ubuntu-latest + strategy: + fail-fast: false + permissions: + id-token: write + contents: read + steps: + - uses: actions/checkout@v3 + with: + repository: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_NAME }} + ref: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_BRANCH }} + path: test + + - name: Set up Go 1.x + uses: actions/setup-go@v2 + with: + go-version: ~1.22.2 + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v2 + with: + role-to-assume: ${{ env.TERRAFORM_AWS_ASSUME_ROLE }} + aws-region: us-west-2 + role-duration-seconds: ${{ env.TERRAFORM_AWS_ASSUME_ROLE_DURATION }} + + - name: Test cf + run: | + cd test/test/cloudformation + go test -timeout 1h -package_path=s3://${S3_INTEGRATION_BUCKET}/integration-test/binary/${{ inputs.build_id }}/linux/amd64/amazon-cloudwatch-agent.rpm -iam_role=${CF_IAM_ROLE} -key_name=${CF_KEY_NAME} -metric_name=mem_used_percent + + StartLocalStack: + name: 'StartLocalStack' + needs: [OutputEnvVariables] + uses: ./.github/workflows/start-localstack.yml + secrets: inherit + permissions: + id-token: write + contents: read + with: + region: us-west-2 + test_repo_name: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_NAME }} + test_repo_branch: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_BRANCH }} + terraform_assume_role: ${{ vars.TERRAFORM_AWS_ASSUME_ROLE }} + test_repo_url: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_URL }} + github_sha: ${{ inputs.build_id }} + s3_integration_bucket: ${{ vars.S3_INTEGRATION_BUCKET }} + + StartLocalStackITAR: + name: 'StartLocalStackITAR' + needs: [OutputEnvVariables] + uses: ./.github/workflows/start-localstack.yml + secrets: inherit + permissions: + id-token: write + contents: read + with: + region: us-gov-east-1 + test_repo_name: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_NAME }} + test_repo_branch: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_BRANCH }} + terraform_assume_role: ${{ vars.TERRAFORM_AWS_ASSUME_ROLE_ITAR }} + test_repo_url: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_URL }} + github_sha: ${{ inputs.build_id }} + s3_integration_bucket: ${{ vars.S3_INTEGRATION_BUCKET_ITAR }} + + StartLocalStackCN: + name: 'StartLocalStackCN' + needs: [ OutputEnvVariables, UploadDependenciesCN ] + uses: ./.github/workflows/start-localstack.yml + secrets: inherit + permissions: + id-token: write + contents: read + with: + region: cn-north-1 + test_repo_name: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_NAME }} + test_repo_branch: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_BRANCH }} + terraform_assume_role: ${{ vars.TERRAFORM_AWS_ASSUME_ROLE_CN }} + test_repo_url: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_URL }} + github_sha: ${{ inputs.build_id }} + s3_integration_bucket: ${{ vars.S3_INTEGRATION_BUCKET_CN }} + + UploadDependenciesCN: + name: 'UploadDependenciesCN' + needs: [ OutputEnvVariables ] + uses: ./.github/workflows/upload-dependencies.yml + secrets: inherit + permissions: + id-token: write + contents: read + with: + region: cn-north-1 + test_repo_name: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_NAME }} + test_repo_branch: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_BRANCH }} + github_sha: ${{github.sha}} + s3_integration_bucket: ${{ vars.S3_INTEGRATION_BUCKET_CN }} + terraform_assume_role: ${{ vars.TERRAFORM_AWS_ASSUME_ROLE_CN }} + + EC2NvidiaGPUIntegrationTest: + needs: [ StartLocalStack, GenerateTestMatrix, OutputEnvVariables ] + name: ${{matrix.arrays.testName}} + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + arrays: ${{ fromJson(needs.GenerateTestMatrix.outputs.ec2_gpu_matrix) }} + permissions: + id-token: write + contents: read + steps: + - uses: actions/checkout@v3 + with: + repository: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_NAME }} + ref: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_BRANCH }} + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v2 + with: + role-to-assume: ${{ env.TERRAFORM_AWS_ASSUME_ROLE }} + aws-region: us-west-2 + role-duration-seconds: ${{ env.TERRAFORM_AWS_ASSUME_ROLE_DURATION }} + + - name: Echo Test Info + run: echo run on ec2 instance os ${{ matrix.arrays.os }} arc ${{ matrix.arrays.arc }} test dir ${{ matrix.arrays.test_dir }} + + - name: Install Terraform + uses: hashicorp/setup-terraform@v3 + + - name: Verify Terraform version + run: terraform --version + + # nick-fields/retry@v2 starts at base dir + - name: Terraform apply + if: ${{ matrix.arrays.family == 'linux' }} + uses: nick-fields/retry@v2 + with: + max_attempts: 3 + timeout_minutes: 30 + retry_wait_seconds: 5 + command: | + if [ "${{ matrix.arrays.terraform_dir }}" != "" ]; then + cd "${{ matrix.arrays.terraform_dir }}" + else + cd terraform/ec2/linux + fi + + terraform init + if terraform apply --auto-approve \ + -var="ssh_key_value=${PRIVATE_KEY}" \ + -var="github_test_repo=${{env.CWA_GITHUB_TEST_REPO_URL}}" \ + -var="test_name=${{ matrix.arrays.os }}" \ + -var="cwa_github_sha=${{ inputs.build_id }}" \ + -var="install_agent=${{ matrix.arrays.installAgentCommand }}" \ + -var="github_test_repo_branch=${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_BRANCH }}" \ + -var="ec2_instance_type=${{ matrix.arrays.instanceType }}" \ + -var="user=${{ matrix.arrays.username }}" \ + -var="ami=${{ matrix.arrays.ami }}" \ + -var="ca_cert_path=${{ matrix.arrays.caCertPath }}" \ + -var="arc=${{ matrix.arrays.arc }}" \ + -var="binary_name=${{ matrix.arrays.binaryName }}" \ + -var="local_stack_host_name=${{ needs.StartLocalStack.outputs.local_stack_host_name }}" \ + -var="s3_bucket=${S3_INTEGRATION_BUCKET}" \ + -var="ssh_key_name=${KEY_NAME}" \ + -var="test_dir=${{ matrix.arrays.test_dir }}" ; then terraform destroy -auto-approve + else + terraform destroy -auto-approve && exit 1 + fi + - name: Terraform apply + if: ${{ matrix.arrays.family == 'window' }} + uses: nick-fields/retry@v2 + with: + max_attempts: 3 + timeout_minutes: 30 + retry_wait_seconds: 5 + command: | + if [ "${{ matrix.arrays.terraform_dir }}" != "" ]; then + cd "${{ matrix.arrays.terraform_dir }}" + else + cd terraform/ec2/win + fi + terraform init + if terraform apply --auto-approve \ + -var="ssh_key_value=${PRIVATE_KEY}" \ + -var="ssh_key_name=${KEY_NAME}" \ + -var="github_repo=${{env.CWA_GITHUB_TEST_REPO_URL}}" \ + -var="cwa_github_sha=${{ inputs.build_id }}" \ + -var="ami=${{ matrix.arrays.ami }}" \ + -var="test_dir=${{ matrix.arrays.test_dir }}" \ + -var="ec2_instance_type=${{ matrix.arrays.instanceType }}" \ + -var="github_test_repo=${{env.CWA_GITHUB_TEST_REPO_URL}}" \ + -var="github_test_repo_branch=${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_BRANCH }}" \ + -var="s3_bucket=${S3_INTEGRATION_BUCKET}" ; then terraform destroy -auto-approve + else + terraform destroy -auto-approve && exit 1 + fi + #This is here just in case workflow cancel + - name: Terraform destroy + if: ${{ cancelled() || failure() }} + uses: nick-fields/retry@v2 + with: + max_attempts: 3 + timeout_minutes: 8 + retry_wait_seconds: 5 + command: | + if [ "${{ matrix.arrays.terraform_dir }}" != "" ]; then + cd "${{ matrix.arrays.terraform_dir }}" + elif if "${{ matrix.arrays.os }}" == window; then + cd terraform/ec2/win + else + cd terraform/ec2/linux + fi + terraform destroy --auto-approve + + EC2LinuxIntegrationTest: + needs: [ StartLocalStack, GenerateTestMatrix, OutputEnvVariables ] + name: 'EC2Linux' + uses: ./.github/workflows/ec2-integration-test.yml + with: + build_id: ${{ inputs.build_id }} + test_dir: terraform/ec2/linux + job_id: ec2-linux-integration-test + test_props: ${{ needs.GenerateTestMatrix.outputs.ec2_linux_matrix }} + test_repo_name: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_NAME }} + test_repo_url: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_URL }} + test_repo_branch: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_BRANCH }} + localstack_host: ${{ needs.StartLocalStack.outputs.local_stack_host_name }} + region: us-west-2 + terraform_assume_role: ${{ vars.TERRAFORM_AWS_ASSUME_ROLE }} + s3_integration_bucket: ${{ vars.S3_INTEGRATION_BUCKET }} + secrets: inherit + + EC2LinuxIntegrationTestITAR: + needs: [ StartLocalStackITAR, GenerateTestMatrix, OutputEnvVariables ] + name: 'EC2LinuxITAR' + uses: ./.github/workflows/ec2-integration-test.yml + with: + build_id: ${{ inputs.build_id }} + test_dir: terraform/ec2/linux + job_id: ec2-linux-integration-test + test_props: ${{needs.GenerateTestMatrix.outputs.ec2_linux_itar_matrix}} + test_repo_name: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_NAME }} + test_repo_url: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_URL }} + test_repo_branch: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_BRANCH }} + localstack_host: ${{needs.StartLocalStackITAR.outputs.local_stack_host_name}} + region: us-gov-east-1 + terraform_assume_role: ${{ vars.TERRAFORM_AWS_ASSUME_ROLE_ITAR }} + s3_integration_bucket: ${{ vars.S3_INTEGRATION_BUCKET_ITAR }} + secrets: inherit + + EC2LinuxIntegrationTestCN: + needs: [ StartLocalStackCN, GenerateTestMatrix, OutputEnvVariables ] + name: 'EC2LinuxCN' + uses: ./.github/workflows/ec2-integration-test.yml + with: + build_id: ${{ inputs.build_id }} + test_dir: terraform/ec2/linux + job_id: ec2-linux-integration-test + test_props: ${{needs.GenerateTestMatrix.outputs.ec2_linux_china_matrix}} + test_repo_name: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_NAME }} + test_repo_url: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_URL }} + test_repo_branch: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_BRANCH }} + localstack_host: ${{needs.StartLocalStackCN.outputs.local_stack_host_name}} + region: cn-north-1 + terraform_assume_role: ${{ vars.TERRAFORM_AWS_ASSUME_ROLE_CN }} + s3_integration_bucket: ${{ vars.S3_INTEGRATION_BUCKET_CN }} + secrets: inherit + + + LinuxOnPremIntegrationTest: + needs: [StartLocalStack, GenerateTestMatrix, OutputEnvVariables] + name: 'OnpremLinux' + uses: ./.github/workflows/ec2-integration-test.yml + with: + build_id: ${{ inputs.build_id }} + test_dir: terraform/ec2/linux_onprem + job_id: linux-onprem-integration-test + test_props: ${{needs.GenerateTestMatrix.outputs.ec2_linux_onprem_matrix}} + test_repo_name: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_NAME }} + test_repo_url: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_URL }} + test_repo_branch: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_BRANCH }} + localstack_host: ${{needs.StartLocalStack.outputs.local_stack_host_name}} + region: us-west-2 + secrets: inherit + + EC2WinIntegrationTest: + needs: [OutputEnvVariables, GenerateTestMatrix] + name: ${{matrix.arrays.testName}} + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + arrays: ${{ fromJson(needs.GenerateTestMatrix.outputs.ec2_windows_matrix) }} + permissions: + id-token: write + contents: read + steps: + - uses: actions/checkout@v3 + with: + repository: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_NAME }} + ref: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_BRANCH }} + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v2 + with: + role-to-assume: ${{ env.TERRAFORM_AWS_ASSUME_ROLE }} + aws-region: us-west-2 + role-duration-seconds: ${{ env.TERRAFORM_AWS_ASSUME_ROLE_DURATION }} + + - name: Echo Test Info + run: echo run on ec2 instance os ${{ matrix.arrays.os }} use ssm ${{ matrix.arrays.useSSM }} test ${{ matrix.arrays.test_dir }} + + - name: Install Terraform + uses: hashicorp/setup-terraform@v3 + + - name: Verify Terraform version + run: terraform --version + + # nick-fields/retry@v2 starts at base dir + - name: Terraform apply + uses: nick-fields/retry@v2 + with: + max_attempts: 3 + timeout_minutes: 30 + retry_wait_seconds: 5 + command: | + if [ "${{ matrix.arrays.terraform_dir }}" != "" ]; then + cd "${{ matrix.arrays.terraform_dir }}" + else + cd terraform/ec2/win + fi + terraform init + if terraform apply --auto-approve \ + -var="ec2_instance_type=${{ matrix.arrays.instanceType }}" \ + -var="ssh_key_value=${PRIVATE_KEY}" \ + -var="ssh_key_name=${KEY_NAME}" \ + -var="test_name=${{ matrix.arrays.os }}" \ + -var="cwa_github_sha=${{ inputs.build_id }}" \ + -var="test_dir=${{ matrix.arrays.test_dir }}" \ + -var="ami=${{ matrix.arrays.ami }}" \ + -var="use_ssm=${{ matrix.arrays.useSSM }}" \ + -var="s3_bucket=${S3_INTEGRATION_BUCKET}" ; then + terraform destroy -auto-approve + else + terraform destroy -auto-approve && exit 1 + fi + + #This is here just in case workflow cancel + - name: Terraform destroy + if: ${{ cancelled() || failure() }} + uses: nick-fields/retry@v2 + with: + max_attempts: 3 + timeout_minutes: 8 + retry_wait_seconds: 5 + command: | + if [ "${{ matrix.arrays.terraform_dir }}" != "" ]; then + cd "${{ matrix.arrays.terraform_dir }}" + else + cd terraform/ec2/win + fi + terraform destroy --auto-approve + + EC2DarwinIntegrationTest: + needs: [GenerateTestMatrix, OutputEnvVariables] + name: ${{matrix.arrays.testName}} + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + arrays: ${{ fromJson(needs.GenerateTestMatrix.outputs.ec2_mac_matrix) }} + permissions: + id-token: write + contents: read + steps: + - uses: actions/checkout@v3 + with: + repository: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_NAME }} + ref: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_BRANCH }} + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v2 + with: + role-to-assume: ${{ env.TERRAFORM_AWS_ASSUME_ROLE }} + aws-region: us-west-2 + role-duration-seconds: ${{ env.TERRAFORM_AWS_ASSUME_ROLE_DURATION }} + + - name: Echo OS + run: echo run on ec2 instance os ${{ matrix.arrays.os }} + + - name: Install Terraform + uses: hashicorp/setup-terraform@v3 + + - name: Verify Terraform version + run: terraform --version + + # nick-fields/retry@v2 starts at base dir + - name: Terraform apply + uses: nick-fields/retry@v2 + with: + max_attempts: 3 + timeout_minutes: 60 + retry_wait_seconds: 5 + command: | + if [ "${{ matrix.arrays.terraform_dir }}" != "" ]; then + cd "${{ matrix.arrays.terraform_dir }}" + else + cd terraform/ec2/mac + fi + terraform init + if terraform apply --auto-approve \ + -var="ssh_key_value=${PRIVATE_KEY}" \ + -var="ssh_key_name=${KEY_NAME}" \ + -var="arc=${{ matrix.arrays.arc }}" \ + -var="ec2_instance_type=${{ matrix.arrays.instanceType }}" \ + -var="cwa_github_sha=${{ inputs.build_id }}" \ + -var="ami=${{ matrix.arrays.ami }}" \ + -var="test_dir=${{ matrix.arrays.test_dir }}" \ + -var="license_manager_arn=${{ env.LICENSE_MANAGER_ARN }}" \ + -var="s3_bucket=${S3_INTEGRATION_BUCKET}"; then + terraform destroy -auto-approve + else + terraform destroy -auto-approve && exit 1 + fi + + #This is here just in case workflow cancel + - name: Terraform destroy + if: ${{ cancelled() || failure() }} + uses: nick-fields/retry@v2 + with: + max_attempts: 3 + timeout_minutes: 8 + retry_wait_seconds: 5 + command: | + if [ "${{ matrix.arrays.terraform_dir }}" != "" ]; then + cd "${{ matrix.arrays.terraform_dir }}" + else + cd terraform/ec2/mac + fi + terraform destroy --auto-approve + + StopLocalStack: + name: 'StopLocalStack' + if: ${{ always() && needs.StartLocalStack.result == 'success' }} + needs: [ StartLocalStack, EC2LinuxIntegrationTest, LinuxOnPremIntegrationTest, OutputEnvVariables ] + uses: ./.github/workflows/stop-localstack.yml + secrets: inherit + permissions: + id-token: write + contents: read + with: + region: us-west-2 + test_repo_name: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_NAME }} + test_repo_branch: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_BRANCH }} + terraform_assume_role: ${{ vars.TERRAFORM_AWS_ASSUME_ROLE }} + github_sha: ${{inputs.build_id}} + s3_integration_bucket: ${{ vars.S3_INTEGRATION_BUCKET }} + + StopLocalStackITAR: + name: 'StopLocalStackITAR' + if: ${{ always() && needs.StartLocalStackITAR.result == 'success' }} + needs: [ StartLocalStackITAR, EC2LinuxIntegrationTestITAR, OutputEnvVariables ] + uses: ./.github/workflows/stop-localstack.yml + secrets: inherit + permissions: + id-token: write + contents: read + with: + region: us-gov-east-1 + test_repo_name: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_NAME }} + test_repo_branch: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_BRANCH }} + terraform_assume_role: ${{ vars.TERRAFORM_AWS_ASSUME_ROLE_ITAR }} + github_sha: ${{inputs.build_id}} + s3_integration_bucket: ${{ vars.S3_INTEGRATION_BUCKET_ITAR }} + + StopLocalStackCN: + name: 'StopLocalStackCN' + if: ${{ always() && needs.StartLocalStackCN.result == 'success' }} + needs: [ StartLocalStackCN, EC2LinuxIntegrationTestCN, OutputEnvVariables] + uses: ./.github/workflows/stop-localstack.yml + secrets: inherit + permissions: + id-token: write + contents: read + with: + region: cn-north-1 + test_repo_name: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_NAME }} + test_repo_branch: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_BRANCH }} + terraform_assume_role: ${{ vars.TERRAFORM_AWS_ASSUME_ROLE_CN }} + github_sha: ${{inputs.build_id}} + s3_integration_bucket: ${{ vars.S3_INTEGRATION_BUCKET_CN }} + + ECSEC2IntegrationTest: + name: ${{matrix.arrays.testName}} + runs-on: ubuntu-latest + needs: [ GenerateTestMatrix, OutputEnvVariables ] + strategy: + fail-fast: false + matrix: + arrays: ${{ fromJson(needs.GenerateTestMatrix.outputs.ecs_ec2_launch_daemon_matrix) }} + permissions: + id-token: write + contents: read + steps: + - uses: actions/checkout@v3 + with: + repository: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_NAME }} + ref: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_BRANCH }} + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v2 + with: + role-to-assume: ${{ env.TERRAFORM_AWS_ASSUME_ROLE }} + aws-region: us-west-2 + role-duration-seconds: ${{ env.TERRAFORM_AWS_ASSUME_ROLE_DURATION }} + + - name: Login ECR + id: login-ecr + uses: aws-actions/amazon-ecr-login@v2 + + - name: Install Terraform + uses: hashicorp/setup-terraform@v3 + + - name: Verify Terraform version + run: terraform --version + + - name: Terraform apply + uses: nick-fields/retry@v2 + with: + max_attempts: 3 + timeout_minutes: 15 + retry_wait_seconds: 5 + command: | + if [ "${{ matrix.arrays.terraform_dir }}" != "" ]; then + cd "${{ matrix.arrays.terraform_dir }}" + else + cd terraform/ecs_ec2/daemon + fi + + terraform init + if terraform apply --auto-approve\ + -var="test_dir=${{ matrix.arrays.test_dir }}"\ + -var="ec2_instance_type=${{ matrix.arrays.instanceType }}" \ + -var="cwagent_image_repo=${{ steps.login-ecr.outputs.registry }}/${{ env.ECR_INTEGRATION_TEST_REPO }}"\ + -var="cwagent_image_tag=${{ inputs.build_id }}"\ + -var="ec2_instance_type=${{ matrix.arrays.instanceType }}" \ + -var="metadataEnabled=${{ matrix.arrays.metadataEnabled }}" \ + -var="ami=${{ matrix.arrays.ami }}" ; then + terraform destroy -auto-approve + else + terraform destroy -auto-approve && exit 1 + fi + + - name: Terraform destroy + if: ${{ cancelled() || failure() }} + uses: nick-fields/retry@v2 + with: + max_attempts: 3 + timeout_minutes: 8 + retry_wait_seconds: 5 + command: | + if [ "${{ matrix.arrays.terraform_dir }}" != "" ]; then + cd "${{ matrix.arrays.terraform_dir }}" + else + cd terraform/ecs_ec2/daemon + fi + terraform destroy --auto-approve + + ECSFargateIntegrationTest: + name: ${{matrix.arrays.testName}} + runs-on: ubuntu-latest + needs: [GenerateTestMatrix, OutputEnvVariables] + strategy: + fail-fast: false + matrix: + arrays: ${{ fromJson(needs.GenerateTestMatrix.outputs.ecs_fargate_matrix) }} + permissions: + id-token: write + contents: read + steps: + - uses: actions/checkout@v3 + with: + repository: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_NAME }} + ref: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_BRANCH }} + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v2 + with: + role-to-assume: ${{ env.TERRAFORM_AWS_ASSUME_ROLE }} + aws-region: us-west-2 + role-duration-seconds: ${{ env.TERRAFORM_AWS_ASSUME_ROLE_DURATION }} + + - name: Login ECR + id: login-ecr + uses: aws-actions/amazon-ecr-login@v2 + + - name: Install Terraform + uses: hashicorp/setup-terraform@v3 + + - name: Verify Terraform version + run: terraform --version + + - name: Terraform apply + uses: nick-fields/retry@v2 + with: + max_attempts: 3 + timeout_minutes: 15 + retry_wait_seconds: 5 + command: | + if [ "${{ matrix.arrays.terraform_dir }}" != "" ]; then + cd "${{ matrix.arrays.terraform_dir }}" + else + cd terraform/ecs_fargate/linux + fi + + terraform init + if terraform apply --auto-approve\ + -var="test_dir=${{ matrix.arrays.test_dir }}"\ + -var="cwagent_image_repo=${{ steps.login-ecr.outputs.registry }}/${{ env.ECR_INTEGRATION_TEST_REPO }}"\ + -var="cwagent_image_tag=${{ inputs.build_id }}"; then + terraform destroy -auto-approve + else + terraform destroy -auto-approve && exit 1 + fi + - name: Terraform destroy + if: ${{ cancelled() || failure() }} + uses: nick-fields/retry@v2 + with: + max_attempts: 3 + timeout_minutes: 8 + retry_wait_seconds: 5 + command: | + if [ "${{ matrix.arrays.terraform_dir }}" != "" ]; then + cd "${{ matrix.arrays.terraform_dir }}" + else + cd terraform/ecs_fargate/linux + fi + terraform destroy --auto-approve + + EKSIntegrationTest: + name: ${{matrix.arrays.testName}} + runs-on: ubuntu-latest + needs: [ GenerateTestMatrix, OutputEnvVariables ] + strategy: + fail-fast: false + matrix: + arrays: ${{ fromJson(needs.GenerateTestMatrix.outputs.eks_daemon_matrix) }} + permissions: + id-token: write + contents: read + steps: + - uses: actions/checkout@v3 + with: + repository: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_NAME }} + ref: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_BRANCH }} + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v2 + with: + role-to-assume: ${{ env.TERRAFORM_AWS_ASSUME_ROLE }} + aws-region: us-west-2 + role-duration-seconds: ${{ env.TERRAFORM_AWS_ASSUME_ROLE_DURATION }} + + - name: Login ECR + id: login-ecr + uses: aws-actions/amazon-ecr-login@v2 + + - name: Install Terraform + uses: hashicorp/setup-terraform@v3 + + - name: Verify Terraform version + run: terraform --version + + - name: Terraform apply + uses: nick-fields/retry@v2 + with: + max_attempts: 2 + timeout_minutes: 90 # EKS takes about 20 minutes to spin up a cluster and service on the cluster + retry_wait_seconds: 5 + command: | + if [ "${{ matrix.arrays.terraform_dir }}" != "" ]; then + cd "${{ matrix.arrays.terraform_dir }}" + else + cd terraform/eks/daemon + fi + + terraform init + if terraform apply --auto-approve \ + -var="test_dir=${{ matrix.arrays.test_dir }}"\ + -var="cwagent_image_repo=${{ steps.login-ecr.outputs.registry }}/${{ env.ECR_INTEGRATION_TEST_REPO }}" \ + -var="cwagent_image_tag=${{ inputs.build_id }}" \ + -var="ami_type=${{ matrix.arrays.ami }}" \ + -var="instance_type=${{ matrix.arrays.instanceType }}" \ + -var="k8s_version=${{ matrix.arrays.k8sVersion }}"; then + terraform destroy -auto-approve + else + terraform destroy -auto-approve && exit 1 + fi + + - name: Terraform destroy + if: ${{ cancelled() || failure() }} + uses: nick-fields/retry@v2 + with: + max_attempts: 3 + timeout_minutes: 8 + retry_wait_seconds: 5 + command: | + if [ "${{ matrix.arrays.terraform_dir }}" != "" ]; then + cd "${{ matrix.arrays.terraform_dir }}" + else + cd terraform/eks/daemon + fi + terraform destroy --auto-approve + + EKSPrometheusIntegrationTest: + name: ${{matrix.arrays.testName}} + runs-on: ubuntu-latest + needs: [ GenerateTestMatrix, OutputEnvVariables ] + strategy: + fail-fast: false + matrix: + arrays: ${{ fromJson(needs.GenerateTestMatrix.outputs.eks_deployment_matrix) }} + permissions: + id-token: write + contents: read + steps: + - uses: actions/checkout@v3 + with: + repository: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_NAME }} + ref: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_BRANCH }} + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v2 + with: + role-to-assume: ${{ env.TERRAFORM_AWS_ASSUME_ROLE }} + aws-region: us-west-2 + role-duration-seconds: ${{ env.TERRAFORM_AWS_ASSUME_ROLE_DURATION }} + + - name: Login ECR + id: login-ecr + uses: aws-actions/amazon-ecr-login@v2 + + - name: Install Terraform + uses: hashicorp/setup-terraform@v3 + + - name: Verify Terraform version + run: terraform --version + + - name: Terraform apply + uses: nick-fields/retry@v2 + with: + max_attempts: 3 + timeout_minutes: 60 # EKS takes about 20 minutes to spin up a cluster and service on the cluster + retry_wait_seconds: 5 + command: | + if [ "${{ matrix.arrays.terraform_dir }}" != "" ]; then + cd "${{ matrix.arrays.terraform_dir }}" + else + cd terraform/eks/deployment + fi + + terraform init + if terraform apply --auto-approve \ + -var="test_dir=${{ matrix.arrays.test_dir }}"\ + -var="cwagent_image_repo=${{ steps.login-ecr.outputs.registry }}/${{ env.ECR_INTEGRATION_TEST_REPO }}" \ + -var="cwagent_image_tag=${{ inputs.build_id }}" \ + -var="k8s_version=${{ matrix.arrays.k8s_version }}"; then + terraform destroy -auto-approve + else + terraform destroy -auto-approve && exit 1 + fi + + - name: Terraform destroy + if: ${{ cancelled() || failure() }} + uses: nick-fields/retry@v2 + with: + max_attempts: 3 + timeout_minutes: 8 + retry_wait_seconds: 5 + command: | + if [ "${{ matrix.arrays.terraform_dir }}" != "" ]; then + cd "${{ matrix.arrays.terraform_dir }}" + else + cd terraform/eks/deployment + fi + terraform destroy --auto-approve + + PerformanceTrackingTest: + name: ${{matrix.arrays.testName}} + needs: [GenerateTestMatrix, OutputEnvVariables] + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + arrays: ${{ fromJson(needs.GenerateTestMatrix.outputs.ec2_performance_matrix) }} + permissions: + id-token: write + contents: read + steps: + - uses: actions/checkout@v3 + with: + repository: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_NAME }} + ref: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_BRANCH }} + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v2 + with: + role-to-assume: ${{ env.TERRAFORM_AWS_ASSUME_ROLE }} + aws-region: us-west-2 + role-duration-seconds: ${{ env.TERRAFORM_AWS_ASSUME_ROLE_DURATION }} + + - name: Install Terraform + uses: hashicorp/setup-terraform@v3 + + - name: Verify Terraform version + run: terraform --version + + - name: Terraform apply + uses: nick-fields/retry@v2 + with: + max_attempts: 1 + timeout_minutes: 60 + retry_wait_seconds: 5 + command: | + cd terraform/performance + terraform init + if terraform apply --auto-approve \ + -var="ssh_key_value=${PRIVATE_KEY}" \ + -var="cwa_github_sha=${{ inputs.build_id }}" \ + -var="ami=${{ matrix.arrays.ami }}" \ + -var="arc=${{ matrix.arrays.arc }}" \ + -var="s3_bucket=${S3_INTEGRATION_BUCKET}" \ + -var="ssh_key_name=${KEY_NAME}" \ + -var="values_per_minute=${{ matrix.arrays.values_per_minute}}"\ + -var="family=${{ matrix.arrays.family}}"\ + -var="test_dir=${{ matrix.arrays.test_dir }}" ; then terraform destroy -auto-approve + else + terraform destroy -auto-approve && exit 1 + fi + + - name: Terraform destroy + if: ${{ cancelled() || failure() }} + uses: nick-fields/retry@v2 + with: + max_attempts: 3 + timeout_minutes: 8 + retry_wait_seconds: 5 + command: cd terraform/performance && terraform destroy --auto-approve + + EC2WinPerformanceTest: + name: ${{matrix.arrays.testName}} + needs: [ GenerateTestMatrix, OutputEnvVariables ] + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + arrays: ${{ fromJson(needs.GenerateTestMatrix.outputs.ec2_windows_performance_matrix) }} + permissions: + id-token: write + contents: read + steps: + - uses: actions/checkout@v3 + with: + repository: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_NAME }} + ref: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_BRANCH }} + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v2 + with: + role-to-assume: ${{ env.TERRAFORM_AWS_ASSUME_ROLE }} + aws-region: us-west-2 + role-duration-seconds: ${{ env.TERRAFORM_AWS_ASSUME_ROLE_DURATION }} + + - name: Install Terraform + uses: hashicorp/setup-terraform@v3 + + - name: Verify Terraform version + run: terraform --version + + - name: Terraform apply + uses: nick-fields/retry@v2 + with: + max_attempts: 1 + timeout_minutes: 60 + retry_wait_seconds: 5 + command: | + cd terraform/performance + terraform init + if terraform apply --auto-approve \ + -var="ssh_key_value=${PRIVATE_KEY}" \ + -var="cwa_github_sha=${{ inputs.build_id }}" \ + -var="ami=${{ matrix.arrays.ami }}" \ + -var="arc=${{ matrix.arrays.arc }}" \ + -var="s3_bucket=${S3_INTEGRATION_BUCKET}" \ + -var="ssh_key_name=${KEY_NAME}" \ + -var="values_per_minute=${{ matrix.arrays.values_per_minute}}"\ + -var="family=${{ matrix.arrays.family}}"\ + -var="test_dir=${{ matrix.arrays.test_dir }}" ; then terraform destroy -auto-approve + else + terraform destroy -auto-approve && exit 1 + fi + + - name: Terraform destroy + if: ${{ cancelled() || failure() }} + uses: nick-fields/retry@v2 + with: + max_attempts: 3 + timeout_minutes: 8 + retry_wait_seconds: 5 + command: cd terraform/performance && terraform destroy --auto-approve + + StressTrackingTest: + name: ${{matrix.arrays.testName}} + needs: [GenerateTestMatrix, OutputEnvVariables ] + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + arrays: ${{ fromJson(needs.GenerateTestMatrix.outputs.ec2_stress_matrix) }} + permissions: + id-token: write + contents: read + steps: + - uses: actions/checkout@v3 + with: + repository: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_NAME }} + ref: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_BRANCH }} + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v2 + with: + role-to-assume: ${{ env.TERRAFORM_AWS_ASSUME_ROLE }} + aws-region: us-west-2 + role-duration-seconds: ${{ env.TERRAFORM_AWS_ASSUME_ROLE_DURATION }} + + - name: Install Terraform + uses: hashicorp/setup-terraform@v3 + + - name: Verify Terraform version + run: terraform --version + + - name: Echo Test Info + run: echo run on ec2 instance os ${{ matrix.arrays.os }} arc ${{ matrix.arrays.arc }} test dir ${{ matrix.arrays.test_dir }} values per minute ${{ matrix.arrays.values_per_minute }} + + - name: Terraform apply + uses: nick-fields/retry@v2 + with: + max_attempts: 1 + timeout_minutes: 60 + retry_wait_seconds: 5 + command: | + cd terraform/stress + terraform init + if terraform apply --auto-approve \ + -var="ssh_key_value=${PRIVATE_KEY}" \ + -var="cwa_github_sha=${{ inputs.build_id }}" \ + -var="ami=${{ matrix.arrays.ami }}" \ + -var="arc=${{ matrix.arrays.arc }}" \ + -var="s3_bucket=${S3_INTEGRATION_BUCKET}" \ + -var="ssh_key_name=${KEY_NAME}" \ + -var="values_per_minute=${{ matrix.arrays.values_per_minute}}"\ + -var="test_dir=${{ matrix.arrays.test_dir }}" ; then terraform destroy -auto-approve + else + terraform destroy -auto-approve && exit 1 + fi + + - name: Terraform destroy + if: ${{ cancelled() || failure() }} + uses: nick-fields/retry@v2 + with: + max_attempts: 3 + timeout_minutes: 8 + retry_wait_seconds: 5 + command: cd terraform/stress && terraform destroy --auto-approve + + EC2WinStressTrackingTest: + name: ${{matrix.arrays.testName}} + needs: [GenerateTestMatrix, OutputEnvVariables] + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + arrays: ${{ fromJson(needs.GenerateTestMatrix.outputs.ec2_windows_stress_matrix) }} + permissions: + id-token: write + contents: read + steps: + - uses: actions/checkout@v3 + with: + repository: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_NAME }} + ref: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_BRANCH }} + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v2 + with: + role-to-assume: ${{ env.TERRAFORM_AWS_ASSUME_ROLE }} + aws-region: us-west-2 + role-duration-seconds: ${{ env.TERRAFORM_AWS_ASSUME_ROLE_DURATION }} + + - name: Install Terraform + uses: hashicorp/setup-terraform@v3 + + - name: Verify Terraform version + run: terraform --version + + - name: Echo Test Info + run: echo run on ec2 instance os ${{ matrix.arrays.os }} arc ${{ matrix.arrays.arc }} test dir ${{ matrix.arrays.test_dir }} values per minute ${{ matrix.arrays.values_per_minute }} + + - name: Terraform apply + uses: nick-fields/retry@v2 + with: + max_attempts: 1 + timeout_minutes: 60 + retry_wait_seconds: 5 + command: | + cd terraform/stress + terraform init + if terraform apply --auto-approve \ + -var="ssh_key_value=${PRIVATE_KEY}" \ + -var="cwa_github_sha=${{ inputs.build_id }}" \ + -var="ami=${{ matrix.arrays.ami }}" \ + -var="arc=${{ matrix.arrays.arc }}" \ + -var="s3_bucket=${S3_INTEGRATION_BUCKET}" \ + -var="ssh_key_name=${KEY_NAME}" \ + -var="values_per_minute=${{ matrix.arrays.values_per_minute}}"\ + -var="family=${{ matrix.arrays.family}}"\ + -var="test_dir=${{ matrix.arrays.test_dir }}" ; then terraform destroy -auto-approve + else + terraform destroy -auto-approve && exit 1 + fi + + - name: Terraform destroy + if: ${{ cancelled() || failure() }} + uses: nick-fields/retry@v2 + with: + max_attempts: 3 + timeout_minutes: 8 + retry_wait_seconds: 5 + command: cd terraform/stress && terraform destroy --auto-approve + + GPUEndToEndTest: + name: ${{matrix.arrays.testName}} + needs: [ GenerateTestMatrix, OutputEnvVariables ] + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + arrays: ${{ fromJson(needs.GenerateTestMatrix.outputs.eks_addon_matrix) }} + permissions: + id-token: write + contents: read + steps: + - uses: actions/checkout@v3 + with: + repository: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_NAME }} + ref: ${{ needs.OutputEnvVariables.outputs.CWA_GITHUB_TEST_REPO_BRANCH }} + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v2 + with: + role-to-assume: ${{ env.TERRAFORM_AWS_ASSUME_ROLE }} + aws-region: us-west-2 + role-duration-seconds: ${{ env.TERRAFORM_AWS_ASSUME_ROLE_DURATION }} + + - name: Install Terraform + uses: hashicorp/setup-terraform@v3 + + - name: Verify Terraform version + run: terraform --version + + - name: Terraform apply and setup + run: | + if [ "${{ matrix.arrays.terraform_dir }}" != "" ]; then + cd "${{ matrix.arrays.terraform_dir }}" + else + cd terraform/eks/addon/gpu + fi + + terraform init + if terraform apply --auto-approve \ + -var="beta=true" \ + -var="ami_type=${{ matrix.arrays.ami }}" \ + -var="instance_type=${{ matrix.arrays.instanceType }}" \ + -var="k8s_version=${{ matrix.arrays.k8sVersion }}"; then + echo "Terraform apply successful." + + # Capture the output + echo "Getting EKS cluster name" + EKS_CLUSTER_NAME=$(terraform output -raw eks_cluster_name) + echo "Cluster name is ${EKS_CLUSTER_NAME}" + kubectl create -f https://raw.githubusercontent.com/NVIDIA/k8s-device-plugin/v0.17.0/deployments/static/nvidia-device-plugin.yml + kubectl patch amazoncloudwatchagents -n amazon-cloudwatch cloudwatch-agent --type='json' -p='[{"op": "replace", "path": "/spec/image", "value": ${{ secrets.AWS_ECR_PRIVATE_REGISTRY }}/${{ env.ECR_INTEGRATION_TEST_REPO }}:${{ inputs.build_id }}}]' + # wait nvidia device plugin to be ready + sleep 10 + kubectl apply -f ./gpuBurner.yaml + else + terraform destroy -auto-approve && exit 1 + fi + + - name: Run Go tests with retry + uses: nick-fields/retry@v2 + with: + max_attempts: 5 + timeout_minutes: 60 + retry_wait_seconds: 30 + command: | + if [ "${{ matrix.arrays.terraform_dir }}" != "" ]; then + cd "${{ matrix.arrays.terraform_dir }}" + else + cd terraform/eks/addon/gpu + fi + echo "Getting EKS cluster name" + EKS_CLUSTER_NAME=$(terraform output -raw eks_cluster_name) + echo "Cluster name is ${EKS_CLUSTER_NAME}" + + if go test ${{ matrix.arrays.test_dir }} -eksClusterName ${EKS_CLUSTER_NAME} -computeType=EKS -v -eksDeploymentStrategy=DAEMON -eksGpuType=nvidia -useE2EMetrics; then + echo "Tests passed" + else + echo "Tests failed" + exit 1 + fi + + - name: Terraform destroy + if: always() + uses: nick-fields/retry@v2 + with: + max_attempts: 3 + timeout_minutes: 8 + retry_wait_seconds: 5 + command: | + if [ "${{ matrix.arrays.terraform_dir }}" != "" ]; then + cd "${{ matrix.arrays.terraform_dir }}" + else + cd terraform/eks/addon/gpu + fi + terraform destroy -auto-approve diff --git a/.github/workflows/upload-dependencies.yml b/.github/workflows/upload-dependencies.yml new file mode 100644 index 0000000000..0e297fee2f --- /dev/null +++ b/.github/workflows/upload-dependencies.yml @@ -0,0 +1,57 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: MIT + +name: Upload Dependencies and Test Repo to S3 + +env: + PRIVATE_KEY: ${{ secrets.AWS_PRIVATE_KEY }} + KEY_NAME: ${{ secrets.KEY_NAME }} + +on: + workflow_call: + inputs: + region: + type: string + test_repo_name: + required: true + type: string + test_repo_branch: + required: true + type: string + github_sha: + required: true + type: string + s3_integration_bucket: + required: true + type: string + terraform_assume_role: + type: string + +jobs: + UploadDependenciesAndTestRepo: + runs-on: ubuntu-latest + steps: + - name: Checkout Code + uses: actions/checkout@v3 + with: + repository: ${{ inputs.test_repo_name }} + ref: ${{ inputs.test_repo_branch }} + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v2 + with: + role-to-assume: ${{ inputs.terraform_assume_role }} + aws-region: ${{ inputs.region }} + + - name: Set up Go + uses: actions/setup-go@v3 + with: + go-version: ~1.22.2 + + - name: Upload Dependencies and Test Repo + run: | + go mod tidy + go mod vendor + mkdir test-repo + tar -czf ./test-repo/amazon-cloudwatch-agent-test.tar.gz --exclude='test-repo' . + aws s3 cp ./test-repo/amazon-cloudwatch-agent-test.tar.gz s3://${{ inputs.s3_integration_bucket }}/integration-test/cloudwatch-agent-test-repo/${{ inputs.github_sha }}.tar.gz --quiet \ No newline at end of file diff --git a/Makefile b/Makefile index 9e5dd23d3f..9bfd55a958 100644 --- a/Makefile +++ b/Makefile @@ -210,7 +210,7 @@ PKG_WITH_DATA_RACE += internal/retryer PKG_WITH_DATA_RACE += internal/tls PKG_WITH_DATA_RACE += plugins/inputs/logfile PKG_WITH_DATA_RACE += plugins/inputs/logfile/tail -PKG_WITH_DATA_RACE += plugins/outputs/cloudwatch +PKG_WITH_DATA_RACE += plugins/outputs/cloudwatch$$ PKG_WITH_DATA_RACE += plugins/processors/awsapplicationsignals PKG_WITH_DATA_RACE += plugins/processors/ec2tagger PKG_WITH_DATA_RACE_PATTERN := $(shell echo '$(PKG_WITH_DATA_RACE)' | tr ' ' '|') diff --git a/RELEASE_NOTES b/RELEASE_NOTES index 2215b65de5..a761171316 100644 --- a/RELEASE_NOTES +++ b/RELEASE_NOTES @@ -1,3 +1,18 @@ +======================================================================== +Amazon CloudWatch Agent 1.300053.0 (2024-02-26) +======================================================================== +Bug Fixes: +* [Related Telemetry] Fix EKS cluster detection when aws-auth config map does not exist +* [Logs] Reduce PutRetentionPolicy calls by checking existing policy +* [Logs] Fix data races in cloudwatchlogs +* [Logs] Only add to cache if log stream is created + +Enhancements: +* [Application Signals] Use an endpoint slice watcher instead of a pod watcher +* [Related Telemetry] Scrape auto scaling group attributes from resource metrics +* [Security] Add Confused Deputy Prevention +* [OpenTelemetry] Bump otel dependencies to v0.115.0/v1.21.0 + ======================================================================== Amazon CloudWatch Agent 1.300052.0 (2024-01-21) ======================================================================== diff --git a/cfg/aws/credentials.go b/cfg/aws/credentials.go index c0867ba258..7701d6b7f3 100644 --- a/cfg/aws/credentials.go +++ b/cfg/aws/credentials.go @@ -16,9 +16,11 @@ import ( "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" "github.com/aws/aws-sdk-go/aws/credentials/stscreds" "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/sts" + "github.com/aws/amazon-cloudwatch-agent/cfg/envconfig" "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth/handler/stats/agent" ) @@ -174,7 +176,7 @@ func (s *stsCredentialProvider) Retrieve() (credentials.Value, error) { func newStsCredentials(c client.ConfigProvider, roleARN string, region string) *credentials.Credentials { regional := &stscreds.AssumeRoleProvider{ - Client: sts.New(c, &aws.Config{ + Client: newStsClient(c, &aws.Config{ Region: aws.String(region), STSRegionalEndpoint: endpoints.RegionalSTSEndpoint, HTTPClient: &http.Client{Timeout: 1 * time.Minute}, @@ -188,7 +190,7 @@ func newStsCredentials(c client.ConfigProvider, roleARN string, region string) * fallbackRegion := getFallbackRegion(region) partitional := &stscreds.AssumeRoleProvider{ - Client: sts.New(c, &aws.Config{ + Client: newStsClient(c, &aws.Config{ Region: aws.String(fallbackRegion), Endpoint: aws.String(getFallbackEndpoint(fallbackRegion)), STSRegionalEndpoint: endpoints.RegionalSTSEndpoint, @@ -203,6 +205,36 @@ func newStsCredentials(c client.ConfigProvider, roleARN string, region string) * return credentials.NewCredentials(&stsCredentialProvider{regional: regional, partitional: partitional}) } +const ( + SourceArnHeaderKey = "x-amz-source-arn" + SourceAccountHeaderKey = "x-amz-source-account" +) + +// newStsClient creates a new STS client with the provided config and options. +// Additionally, if specific environment variables are set, it also appends the confused deputy headers to requests +// made by the client. These headers allow resource-based policies to limit the permissions that a service has to +// a specific resource. Note that BOTH environment variables need to contain non-empty values in order for the headers +// to be set. +// +// See https://docs.aws.amazon.com/IAM/latest/UserGuide/confused-deputy.html#cross-service-confused-deputy-prevention +func newStsClient(p client.ConfigProvider, cfgs ...*aws.Config) *sts.STS { + + sourceAccount := os.Getenv(envconfig.AmzSourceAccount) + sourceArn := os.Getenv(envconfig.AmzSourceArn) + + client := sts.New(p, cfgs...) + if sourceAccount != "" && sourceArn != "" { + client.Handlers.Sign.PushFront(func(r *request.Request) { + r.HTTPRequest.Header.Set(SourceArnHeaderKey, sourceArn) + r.HTTPRequest.Header.Set(SourceAccountHeaderKey, sourceAccount) + }) + + log.Printf("I! Found confused deputy header environment variables: source account: %q, source arn: %q", sourceAccount, sourceArn) + } + + return client +} + // The partitional STS endpoint used to fallback when regional STS endpoint is not activated. func getFallbackEndpoint(region string) string { partition := getPartition(region) diff --git a/cfg/aws/credentials_test.go b/cfg/aws/credentials_test.go new file mode 100644 index 0000000000..722590db60 --- /dev/null +++ b/cfg/aws/credentials_test.go @@ -0,0 +1,89 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package aws + +import ( + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/awstesting/mock" + "github.com/aws/aws-sdk-go/service/sts" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/aws/amazon-cloudwatch-agent/cfg/envconfig" +) + +func TestConfusedDeputyHeaders(t *testing.T) { + tests := []struct { + name string + envSourceArn string + envSourceAccount string + expectedHeaderArn string + expectedHeaderAccount string + }{ + { + name: "unpopulated", + envSourceArn: "", + envSourceAccount: "", + expectedHeaderArn: "", + expectedHeaderAccount: "", + }, + { + name: "both populated", + envSourceArn: "arn:aws:ec2:us-east-1:474668408639:instance/i-08293cd9825754f7c", + envSourceAccount: "539247453986", + expectedHeaderArn: "arn:aws:ec2:us-east-1:474668408639:instance/i-08293cd9825754f7c", + expectedHeaderAccount: "539247453986", + }, + { + name: "only source arn populated", + envSourceArn: "arn:aws:ec2:us-east-1:474668408639:instance/i-08293cd9825754f7c", + envSourceAccount: "", + expectedHeaderArn: "", + expectedHeaderAccount: "", + }, + { + name: "only source account populated", + envSourceArn: "", + envSourceAccount: "539247453986", + expectedHeaderArn: "", + expectedHeaderAccount: "", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + + t.Setenv(envconfig.AmzSourceAccount, tt.envSourceAccount) + t.Setenv(envconfig.AmzSourceArn, tt.envSourceArn) + + client := newStsClient(mock.Session, &aws.Config{ + // These are examples credentials pulled from: + // https://docs.aws.amazon.com/STS/latest/APIReference/API_GetAccessKeyInfo.html + Credentials: credentials.NewStaticCredentials("AKIAIOSFODNN7EXAMPLE", "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", ""), + Region: aws.String("us-east-1"), + }) + + request, _ := client.AssumeRoleRequest(&sts.AssumeRoleInput{ + // We aren't going to actually make the assume role call, we are just going + // to verify the headers are present once signed so the RoleArn and RoleSessionName + // arguments are irrelevant. Fill them out with something so the request is valid. + RoleArn: aws.String("arn:aws:iam::012345678912:role/XXXXXXXX"), + RoleSessionName: aws.String("MockSession"), + }) + + // Headers are generated after the request is signed (but before it's sent) + err := request.Sign() + require.NoError(t, err) + + headerSourceArn := request.HTTPRequest.Header.Get(SourceArnHeaderKey) + assert.Equal(t, tt.expectedHeaderArn, headerSourceArn) + + headerSourceAccount := request.HTTPRequest.Header.Get(SourceAccountHeaderKey) + assert.Equal(t, tt.expectedHeaderAccount, headerSourceAccount) + }) + } + +} diff --git a/cfg/envconfig/envconfig.go b/cfg/envconfig/envconfig.go index 3fd4b637d2..afbf4918de 100644 --- a/cfg/envconfig/envconfig.go +++ b/cfg/envconfig/envconfig.go @@ -32,6 +32,10 @@ const ( CWConfigContent = "CW_CONFIG_CONTENT" CWOtelConfigContent = "CW_OTEL_CONFIG_CONTENT" CWAgentMergedOtelConfig = "CWAGENT_MERGED_OTEL_CONFIG" + + // confused deputy prevention related headers + AmzSourceAccount = "AMZ_SOURCE_ACCOUNT" // populates the "x-amz-source-account" header + AmzSourceArn = "AMZ_SOURCE_ARN" // populates the "x-amz-source-arn" header ) const ( diff --git a/extension/agenthealth/factory.go b/extension/agenthealth/factory.go index e075846c08..cbb012496d 100644 --- a/extension/agenthealth/factory.go +++ b/extension/agenthealth/factory.go @@ -30,6 +30,6 @@ func createDefaultConfig() component.Config { } } -func createExtension(_ context.Context, settings extension.CreateSettings, cfg component.Config) (extension.Extension, error) { +func createExtension(_ context.Context, settings extension.Settings, cfg component.Config) (extension.Extension, error) { return NewAgentHealth(settings.Logger, cfg.(*Config)), nil } diff --git a/extension/agenthealth/factory_test.go b/extension/agenthealth/factory_test.go index a36a81f246..b1784492c6 100644 --- a/extension/agenthealth/factory_test.go +++ b/extension/agenthealth/factory_test.go @@ -18,9 +18,9 @@ func TestCreateDefaultConfig(t *testing.T) { assert.NoError(t, componenttest.CheckConfigStruct(cfg)) } -func TestCreateExtension(t *testing.T) { +func TestCreate(t *testing.T) { cfg := &Config{} - got, err := NewFactory().CreateExtension(context.Background(), extensiontest.NewNopCreateSettings(), cfg) + got, err := NewFactory().Create(context.Background(), extensiontest.NewNopSettings(), cfg) assert.NoError(t, err) assert.NotNil(t, got) } diff --git a/extension/agenthealth/handler/useragent/useragent_test.go b/extension/agenthealth/handler/useragent/useragent_test.go index c572718714..05de1dbbf1 100644 --- a/extension/agenthealth/handler/useragent/useragent_test.go +++ b/extension/agenthealth/handler/useragent/useragent_test.go @@ -14,6 +14,7 @@ import ( "github.com/stretchr/testify/assert" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/otelcol" + "go.opentelemetry.io/collector/pipeline" "go.opentelemetry.io/collector/service" "go.opentelemetry.io/collector/service/pipelines" @@ -23,7 +24,6 @@ import ( ) func TestSetComponents(t *testing.T) { - metricsType, _ := component.NewType("metrics") telegrafCPUType, _ := component.NewType(adapter.TelegrafPrefix + "cpu") prometheusType, _ := component.NewType("prometheus") batchType, _ := component.NewType("batch") @@ -31,8 +31,8 @@ func TestSetComponents(t *testing.T) { cloudwatchType, _ := component.NewType("cloudwatch") otelCfg := &otelcol.Config{ Service: service.Config{ - Pipelines: map[component.ID]*pipelines.PipelineConfig{ - component.NewID(metricsType): { + Pipelines: map[pipeline.ID]*pipelines.PipelineConfig{ + pipeline.NewID(pipeline.SignalMetrics): { Receivers: []component.ID{ component.NewID(telegrafCPUType), component.NewID(prometheusType), @@ -108,13 +108,12 @@ func TestAlternateUserAgent(t *testing.T) { } func TestEmf(t *testing.T) { - metricsType, _ := component.NewType("metrics") nopType, _ := component.NewType("nop") awsEMFType, _ := component.NewType("awsemf") otelCfg := &otelcol.Config{ Service: service.Config{ - Pipelines: map[component.ID]*pipelines.PipelineConfig{ - component.NewID(metricsType): { + Pipelines: map[pipeline.ID]*pipelines.PipelineConfig{ + pipeline.NewID(pipeline.SignalMetrics): { Receivers: []component.ID{ component.NewID(nopType), }, @@ -142,8 +141,8 @@ func TestEmf(t *testing.T) { func TestMissingEmfExporterConfig(t *testing.T) { otelCfg := &otelcol.Config{ Service: service.Config{ - Pipelines: map[component.ID]*pipelines.PipelineConfig{ - component.NewID(component.MustNewType("metrics")): { + Pipelines: map[pipeline.ID]*pipelines.PipelineConfig{ + pipeline.NewID(pipeline.SignalMetrics): { Receivers: []component.ID{ component.NewID(component.MustNewType("nop")), }, @@ -170,10 +169,10 @@ func TestJmx(t *testing.T) { jmxOther := "jmxOther" nopType, _ := component.NewType("nop") jmxType, _ := component.NewType(jmx) - pipelineType, _ := component.NewType("pipeline") - pipelineTypeOther, _ := component.NewType("pipelineOther") + pipelineID := pipeline.NewIDWithName(pipeline.SignalMetrics, "pipeline") + pipelineIDOther := pipeline.NewIDWithName(pipeline.SignalMetrics, "pipelineOther") pls := make(pipelines.Config) - pls[component.NewID(pipelineType)] = &pipelines.PipelineConfig{ + pls[pipelineID] = &pipelines.PipelineConfig{ Receivers: []component.ID{ component.NewIDWithName(jmxType, jmx), }, @@ -181,7 +180,7 @@ func TestJmx(t *testing.T) { component.NewID(nopType), }, } - pls[component.NewID(pipelineTypeOther)] = &pipelines.PipelineConfig{ + pls[pipelineIDOther] = &pipelines.PipelineConfig{ Receivers: []component.ID{ component.NewIDWithName(jmxType, jmxOther), }, diff --git a/extension/entitystore/eksInfo_test.go b/extension/entitystore/eksInfo_test.go index c5644a77e3..8b3a8ec0ba 100644 --- a/extension/entitystore/eksInfo_test.go +++ b/extension/entitystore/eksInfo_test.go @@ -169,7 +169,9 @@ func TestTTLServicePodEnvironmentMapping(t *testing.T) { ServiceName: "service", Environment: "environment", }, - }, time.Microsecond) + }, 500*time.Millisecond) + // this assertion relies on the speed of your computer to get this done before + // the cache evicts the item based on the TTL assert.Equal(t, 1, ei.podToServiceEnvMap.Len()) //starting the ttl cache like we do in code. This will automatically evict expired pods. diff --git a/extension/entitystore/extension_test.go b/extension/entitystore/extension_test.go index 9bb4da8365..6c9cd37708 100644 --- a/extension/entitystore/extension_test.go +++ b/extension/entitystore/extension_test.go @@ -519,7 +519,7 @@ func TestEntityStore_ClearTerminatedPodsFromServiceMap(t *testing.T) { func TestEntityStore_StartPodToServiceEnvironmentMappingTtlCache(t *testing.T) { e := EntityStore{eksInfo: newEKSInfo(zap.NewExample())} e.done = make(chan struct{}) - e.eksInfo.podToServiceEnvMap = setupTTLCacheForTesting(map[string]ServiceEnvironment{}, time.Microsecond) + e.eksInfo.podToServiceEnvMap = setupTTLCacheForTesting(map[string]ServiceEnvironment{}, 500*time.Millisecond) go e.StartPodToServiceEnvironmentMappingTtlCache() assert.Equal(t, 0, e.GetPodServiceEnvironmentMapping().Len()) @@ -546,9 +546,9 @@ func TestEntityStore_StopPodToServiceEnvironmentMappingTtlCache(t *testing.T) { assert.Equal(t, 1, e.GetPodServiceEnvironmentMapping().Len()) time.Sleep(time.Millisecond) - assert.NoError(t, e.Shutdown(nil)) + assert.NoError(t, e.Shutdown(context.TODO())) //cache should be cleared - time.Sleep(time.Second) + time.Sleep(500 * time.Millisecond) assert.Equal(t, 1, e.GetPodServiceEnvironmentMapping().Len()) } @@ -627,7 +627,7 @@ func TestEntityStore_LogMessageDoesNotIncludeResourceInfo(t *testing.T) { metadataprovider: tt.args.metadataProvider, config: config, } - go es.Start(nil, nil) + go es.Start(context.TODO(), nil) time.Sleep(2 * time.Second) logOutput := buf.String() diff --git a/extension/entitystore/factory.go b/extension/entitystore/factory.go index 95de9bdb80..6e67e7db1f 100644 --- a/extension/entitystore/factory.go +++ b/extension/entitystore/factory.go @@ -39,7 +39,7 @@ func createDefaultConfig() component.Config { return &Config{} } -func createExtension(_ context.Context, settings extension.CreateSettings, cfg component.Config) (extension.Extension, error) { +func createExtension(_ context.Context, settings extension.Settings, cfg component.Config) (extension.Extension, error) { mutex.Lock() defer mutex.Unlock() entityStore = &EntityStore{ diff --git a/extension/entitystore/factory_test.go b/extension/entitystore/factory_test.go index f0bd0305b5..fd02d8afaa 100644 --- a/extension/entitystore/factory_test.go +++ b/extension/entitystore/factory_test.go @@ -18,9 +18,9 @@ func TestCreateDefaultConfig(t *testing.T) { assert.NoError(t, componenttest.CheckConfigStruct(cfg)) } -func TestCreateExtension(t *testing.T) { +func TestCreate(t *testing.T) { cfg := &Config{} - got, err := NewFactory().CreateExtension(context.Background(), extensiontest.NewNopCreateSettings(), cfg) + got, err := NewFactory().Create(context.Background(), extensiontest.NewNopSettings(), cfg) assert.NoError(t, err) assert.NotNil(t, got) } diff --git a/extension/k8smetadata/README.md b/extension/k8smetadata/README.md new file mode 100644 index 0000000000..c1df67490f --- /dev/null +++ b/extension/k8smetadata/README.md @@ -0,0 +1,13 @@ +# Kubernetes Metadata + +The Kubernetes Metadata utilizes a Kubernetes client to start an informer, which queries the Kubernetes API for EndpointSlices. The EndpointSlices are transformed to reduce storage and periodically updated. + +> Kubernetes' EndpointSlice API provides a way to track network endpoints within a Kubernetes cluster. (https://kubernetes.io/docs/concepts/services-networking/endpoint-slices/) + +These network endpoints expose relevant Kubernetes metadata for service-exposed applications. + +Pod IP → {Workload, Namespace, Node} mappings are stored. +- Workload: This is the application's name. +- Namespace: This is the Kubernetes namespace the application is in. +- Node: This is the Kubernetes node the application is in. + diff --git a/extension/k8smetadata/config.go b/extension/k8smetadata/config.go new file mode 100644 index 0000000000..9760ca7b0e --- /dev/null +++ b/extension/k8smetadata/config.go @@ -0,0 +1,12 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package k8smetadata + +import ( + "go.opentelemetry.io/collector/component" +) + +type Config struct{} + +var _ component.Config = (*Config)(nil) diff --git a/extension/k8smetadata/config_test.go b/extension/k8smetadata/config_test.go new file mode 100644 index 0000000000..d0fa985dc8 --- /dev/null +++ b/extension/k8smetadata/config_test.go @@ -0,0 +1,18 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package k8smetadata + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/confmap" +) + +func TestUnmarshalDefaultConfig(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + assert.NoError(t, confmap.New().Unmarshal(cfg)) + assert.Equal(t, factory.CreateDefaultConfig(), cfg) +} diff --git a/extension/k8smetadata/extension.go b/extension/k8smetadata/extension.go new file mode 100644 index 0000000000..ebeecad117 --- /dev/null +++ b/extension/k8smetadata/extension.go @@ -0,0 +1,104 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package k8smetadata + +import ( + "context" + "math/rand" + "sync" + "time" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/extension" + "go.uber.org/atomic" + "go.uber.org/zap" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" + + "github.com/aws/amazon-cloudwatch-agent/internal/k8sCommon/k8sclient" +) + +const ( + deletionDelay = 2 * time.Minute + jitterKubernetesAPISeconds = 10 +) + +type KubernetesMetadata struct { + logger *zap.Logger + config *Config + ready atomic.Bool + safeStopCh *k8sclient.SafeChannel + mu sync.Mutex + clientset kubernetes.Interface + endpointSliceWatcher *k8sclient.EndpointSliceWatcher +} + +var _ extension.Extension = (*KubernetesMetadata)(nil) + +func jitterSleep(seconds int) { + jitter := time.Duration(rand.Intn(seconds)) * time.Second + time.Sleep(jitter) +} + +func (e *KubernetesMetadata) Start(_ context.Context, _ component.Host) error { + e.mu.Lock() + defer e.mu.Unlock() + + e.logger.Debug("Starting k8smetadata extension...") + + config, err := clientcmd.BuildConfigFromFlags("", "") + if err != nil { + e.logger.Error("Failed to create config", zap.Error(err)) + } + e.logger.Debug("Kubernetes config built successfully") + + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + e.logger.Error("Failed to create kubernetes client", zap.Error(err)) + } + e.logger.Debug("Kubernetes clientset created successfully") + + jitterSleep(jitterKubernetesAPISeconds) + + timedDeleter := &k8sclient.TimedDeleter{Delay: deletionDelay} + sharedInformerFactory := informers.NewSharedInformerFactory(clientset, 0) + + e.endpointSliceWatcher = k8sclient.NewEndpointSliceWatcher(e.logger, sharedInformerFactory, timedDeleter) + e.safeStopCh = &k8sclient.SafeChannel{Ch: make(chan struct{}), Closed: false} + + e.logger.Debug("Starting EndpointSliceWatcher Run()") + e.endpointSliceWatcher.Run(e.safeStopCh.Ch) + + e.logger.Debug("Waiting for EndpointSlice cache to sync...") + e.endpointSliceWatcher.WaitForCacheSync(e.safeStopCh.Ch) + + e.logger.Debug("EndpointSlice cache synced, extension fully started") + e.ready.Store(true) + + return nil +} + +func (e *KubernetesMetadata) Shutdown(_ context.Context) error { + e.mu.Lock() + defer e.mu.Unlock() + e.safeStopCh.Close() + return nil +} + +func (e *KubernetesMetadata) GetPodMetadata(ip string) k8sclient.PodMetadata { + pm, ok := e.endpointSliceWatcher.IPToPodMetadata.Load(ip) + if !ok { + e.logger.Debug("GetPodMetadata: no mapping found for IP", zap.String("ip", ip)) + return k8sclient.PodMetadata{} + } + metadata := pm.(k8sclient.PodMetadata) + e.logger.Debug("GetPodMetadata: found metadata", + zap.String("ip", ip), + zap.String("workload", metadata.Workload), + zap.String("namespace", metadata.Namespace), + zap.String("node", metadata.Node), + ) + return metadata +} diff --git a/extension/k8smetadata/extension_test.go b/extension/k8smetadata/extension_test.go new file mode 100644 index 0000000000..02aa7655d3 --- /dev/null +++ b/extension/k8smetadata/extension_test.go @@ -0,0 +1,39 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package k8smetadata + +import ( + "sync" + "testing" + + "github.com/stretchr/testify/assert" + "go.uber.org/zap" + + "github.com/aws/amazon-cloudwatch-agent/internal/k8sCommon/k8sclient" +) + +func TestKubernetesMetadata_GetPodMetadata(t *testing.T) { + esw := &k8sclient.EndpointSliceWatcher{ + IPToPodMetadata: &sync.Map{}, + } + + const testIP = "1.2.3.4" + expected := k8sclient.PodMetadata{ + Workload: "my-workload", + Namespace: "my-namespace", + Node: "my-node", + } + esw.IPToPodMetadata.Store(testIP, expected) + + kMeta := &KubernetesMetadata{ + logger: zap.NewNop(), + endpointSliceWatcher: esw, + } + + got := kMeta.GetPodMetadata(testIP) + assert.Equal(t, expected, got, "GetPodMetadata should return the stored PodMetadata for %s", testIP) + + unknown := kMeta.GetPodMetadata("9.9.9.9") + assert.Equal(t, k8sclient.PodMetadata{}, unknown, "GetPodMetadata should return empty if the IP is not present") +} diff --git a/extension/k8smetadata/factory.go b/extension/k8smetadata/factory.go new file mode 100644 index 0000000000..25d77edf1b --- /dev/null +++ b/extension/k8smetadata/factory.go @@ -0,0 +1,50 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package k8smetadata + +import ( + "context" + "sync" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/extension" +) + +var ( + TypeStr, _ = component.NewType("k8smetadata") + kubernetesMetadata *KubernetesMetadata + mutex sync.RWMutex +) + +func GetKubernetesMetadata() *KubernetesMetadata { + mutex.RLock() + defer mutex.RUnlock() + if kubernetesMetadata != nil && kubernetesMetadata.ready.Load() { + return kubernetesMetadata + } + return nil +} + +func NewFactory() extension.Factory { + return extension.NewFactory( + TypeStr, + createDefaultConfig, + createExtension, + component.StabilityLevelAlpha, + ) +} + +func createDefaultConfig() component.Config { + return &Config{} +} + +func createExtension(_ context.Context, settings extension.Settings, cfg component.Config) (extension.Extension, error) { + mutex.Lock() + defer mutex.Unlock() + kubernetesMetadata = &KubernetesMetadata{ + logger: settings.Logger, + config: cfg.(*Config), + } + return kubernetesMetadata, nil +} diff --git a/extension/k8smetadata/factory_test.go b/extension/k8smetadata/factory_test.go new file mode 100644 index 0000000000..08a91a80f6 --- /dev/null +++ b/extension/k8smetadata/factory_test.go @@ -0,0 +1,26 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package k8smetadata + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/extension/extensiontest" +) + +func TestCreateDefaultConfig(t *testing.T) { + cfg := NewFactory().CreateDefaultConfig() + assert.Equal(t, &Config{}, cfg) + assert.NoError(t, componenttest.CheckConfigStruct(cfg)) +} + +func TestCreate(t *testing.T) { + cfg := &Config{} + got, err := NewFactory().Create(context.Background(), extensiontest.NewNopSettings(), cfg) + assert.NoError(t, err) + assert.NotNil(t, got) +} diff --git a/extension/server/extension.go b/extension/server/extension.go index 4a34b6602f..3937293627 100644 --- a/extension/server/extension.go +++ b/extension/server/extension.go @@ -51,7 +51,7 @@ func NewServer(logger *zap.Logger, config *Config) *Server { // Initialize a new cert watcher with cert/key pair watcher, err := tlsInternal.NewCertWatcher(config.TLSCertPath, config.TLSKeyPath, config.TLSCAPath, logger) if err != nil { - s.logger.Error("failed to initialize cert watcher", zap.Error(err)) + s.logger.Debug("failed to initialize cert watcher", zap.Error(err)) return s } @@ -86,7 +86,7 @@ func (s *Server) Start(context.Context, component.Host) error { go func() { err := s.httpsServer.ListenAndServeTLS("", "") if err != nil { - s.logger.Error("failed to serve and listen", zap.Error(err)) + s.logger.Debug("failed to serve and listen", zap.Error(err)) } }() } diff --git a/extension/server/factory.go b/extension/server/factory.go index 9699366849..2b590e868d 100644 --- a/extension/server/factory.go +++ b/extension/server/factory.go @@ -27,6 +27,6 @@ func createDefaultConfig() component.Config { return &Config{} } -func createExtension(_ context.Context, settings extension.CreateSettings, cfg component.Config) (extension.Extension, error) { +func createExtension(_ context.Context, settings extension.Settings, cfg component.Config) (extension.Extension, error) { return NewServer(settings.Logger, cfg.(*Config)), nil } diff --git a/extension/server/factory_test.go b/extension/server/factory_test.go index 0d571fd36e..229ef9d3e8 100644 --- a/extension/server/factory_test.go +++ b/extension/server/factory_test.go @@ -20,16 +20,16 @@ func TestCreateDefaultConfig(t *testing.T) { func TestCreateExtension(t *testing.T) { cfg := &Config{} - got, err := NewFactory().CreateExtension(context.Background(), extensiontest.NewNopCreateSettings(), cfg) + got, err := NewFactory().Create(context.Background(), extensiontest.NewNopSettings(), cfg) assert.NoError(t, err) assert.NotNil(t, got) } -func TestCreateExtensionWithConfig(t *testing.T) { +func TestCreateWithConfig(t *testing.T) { cfg := &Config{ListenAddress: ":8080", TLSCertPath: "./testdata/example-server-cert.pem", TLSKeyPath: "./testdata/example-server-key.pem", TLSCAPath: "./testdata/example-CA-cert.pem"} - got, err := NewFactory().CreateExtension(context.Background(), extensiontest.NewNopCreateSettings(), cfg) + got, err := NewFactory().Create(context.Background(), extensiontest.NewNopSettings(), cfg) assert.NoError(t, err) assert.NotNil(t, got) } diff --git a/go.mod b/go.mod index 249d8e4521..5cbc2f98ec 100644 --- a/go.mod +++ b/go.mod @@ -7,43 +7,45 @@ replace github.com/influxdata/telegraf => github.com/aws/telegraf v0.10.2-0.2025 // Replace with https://github.com/amazon-contributing/opentelemetry-collector-contrib, there are no requirements for all receivers/processors/exporters // to be all replaced since there are some changes that will always be from upstream replace ( - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter => github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter v0.0.0-20241216205413-8e059f1441db - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter => github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsemfexporter v0.0.0-20250116001040-07637c7e4577 - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter => github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.0.0-20241216205413-8e059f1441db + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter => github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter v0.0.0-20250225223951-40b46bce466c + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter => github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsemfexporter v0.0.0-20250225223951-40b46bce466c + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter => github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.0.0-20250225223951-40b46bce466c ) -replace github.com/open-telemetry/opentelemetry-collector-contrib/extension/awsproxy => github.com/amazon-contributing/opentelemetry-collector-contrib/extension/awsproxy v0.0.0-20241216205413-8e059f1441db - replace ( - github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/awsutil => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/awsutil v0.0.0-20241216205413-8e059f1441db - github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/containerinsight => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/containerinsight v0.0.0-20241216205413-8e059f1441db - github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/cwlogs => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/cwlogs v0.0.0-20241216205413-8e059f1441db - github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/k8s => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/k8s v0.0.0-20241216205413-8e059f1441db - github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/proxy => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/proxy v0.0.0-20241216205413-8e059f1441db - github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/xray v0.0.0-20241216205413-8e059f1441db - github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/coreinternal v0.0.0-20241216205413-8e059f1441db - github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/k8sconfig v0.0.0-20241216205413-8e059f1441db - github.com/open-telemetry/opentelemetry-collector-contrib/internal/kubelet => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/kubelet v0.0.0-20241216205413-8e059f1441db - github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/metadataproviders v0.0.0-20241216205413-8e059f1441db + github.com/amazon-contributing/opentelemetry-collector-contrib/extension/awsmiddleware => github.com/amazon-contributing/opentelemetry-collector-contrib/extension/awsmiddleware v0.0.0-20250225223951-40b46bce466c + github.com/open-telemetry/opentelemetry-collector-contrib/extension/awsproxy => github.com/amazon-contributing/opentelemetry-collector-contrib/extension/awsproxy v0.0.0-20250225223951-40b46bce466c +) +replace ( + github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/awsutil => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/awsutil v0.0.0-20250225223951-40b46bce466c + github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/containerinsight => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/containerinsight v0.0.0-20250225223951-40b46bce466c + github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/cwlogs => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/cwlogs v0.0.0-20250225223951-40b46bce466c + github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/k8s => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/k8s v0.0.0-20250225223951-40b46bce466c + github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/proxy => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/proxy v0.0.0-20250225223951-40b46bce466c + github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/xray v0.0.0-20250225223951-40b46bce466c + github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/coreinternal v0.0.0-20250225223951-40b46bce466c + github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/k8sconfig v0.0.0-20250225223951-40b46bce466c + github.com/open-telemetry/opentelemetry-collector-contrib/internal/kubelet => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/kubelet v0.0.0-20250225223951-40b46bce466c + github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders => github.com/amazon-contributing/opentelemetry-collector-contrib/internal/metadataproviders v0.0.0-20250225223951-40b46bce466c ) replace ( // For clear resource attributes after copy functionality https://github.com/amazon-contributing/opentelemetry-collector-contrib/pull/148 - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry => github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.0.0-20241216205413-8e059f1441db - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza => github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/stanza v0.0.0-20241216205413-8e059f1441db + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry => github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.0.0-20250225223951-40b46bce466c + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza => github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/stanza v0.0.0-20250225223951-40b46bce466c // Replace with contrib to revert upstream change https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/20519 - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus => github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/translator/prometheus v0.0.0-20241216205413-8e059f1441db + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus => github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/translator/prometheus v0.0.0-20250225223951-40b46bce466c ) -replace github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor => github.com/amazon-contributing/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.0.0-20241216205413-8e059f1441db +replace github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor => github.com/amazon-contributing/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.0.0-20250225223951-40b46bce466c replace ( - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver => github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.0.0-20241216205413-8e059f1441db - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightskueuereceiver => github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awscontainerinsightskueuereceiver v0.0.0-20241216205413-8e059f1441db - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsxrayreceiver => github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.0.0-20241216205413-8e059f1441db - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jmxreceiver => github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/jmxreceiver v0.0.0-20241216205413-8e059f1441db - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver => github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.0.0-20241216205413-8e059f1441db + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver => github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.0.0-20250225223951-40b46bce466c + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightskueuereceiver => github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awscontainerinsightskueuereceiver v0.0.0-20250225223951-40b46bce466c + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsxrayreceiver => github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.0.0-20250225223951-40b46bce466c + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jmxreceiver => github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/jmxreceiver v0.0.0-20250225223951-40b46bce466c + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver => github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.0.0-20250225223951-40b46bce466c ) // Temporary fix, pending PR https://github.com/shirou/gopsutil/pull/957 @@ -94,12 +96,12 @@ replace github.com/aws/aws-sdk-go => github.com/aws/aws-sdk-go v1.48.6 require ( github.com/BurntSushi/toml v1.3.2 github.com/Jeffail/gabs v1.4.0 - github.com/amazon-contributing/opentelemetry-collector-contrib/extension/awsmiddleware v0.0.0-20241216205413-8e059f1441db - github.com/aws/aws-sdk-go v1.53.11 + github.com/amazon-contributing/opentelemetry-collector-contrib/extension/awsmiddleware v0.115.0 + github.com/aws/aws-sdk-go v1.55.5 github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.30.2 github.com/bigkevmcd/go-configparser v0.0.0-20200217161103-d137835d2579 github.com/deckarep/golang-set/v2 v2.3.1 - github.com/fsnotify/fsnotify v1.7.0 + github.com/fsnotify/fsnotify v1.8.0 github.com/gin-gonic/gin v1.10.0 github.com/go-kit/log v0.2.1 github.com/go-playground/validator/v10 v10.20.0 @@ -107,92 +109,87 @@ require ( github.com/gobwas/glob v0.2.3 github.com/google/go-cmp v0.6.0 github.com/google/uuid v1.6.0 - github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd // indirect github.com/hashicorp/golang-lru v1.0.2 github.com/influxdata/telegraf v0.0.0-00010101000000-000000000000 github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8 - github.com/jellydator/ttlcache/v3 v3.2.0 + github.com/jellydator/ttlcache/v3 v3.3.0 github.com/json-iterator/go v1.1.12 github.com/kardianos/service v1.2.1 // Keep this pinned to v1.2.1. v1.2.2 causes the agent to not register as a service on Windows github.com/knadh/koanf v1.5.0 - github.com/knadh/koanf/v2 v2.1.1 + github.com/knadh/koanf/v2 v2.1.2 github.com/kr/pretty v0.3.1 github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c github.com/oklog/run v1.1.0 - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter v0.103.0 - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter v0.103.0 - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.103.0 - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.103.0 - github.com/open-telemetry/opentelemetry-collector-contrib/extension/awsproxy v0.103.0 - github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.103.0 - github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.103.0 - github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.103.0 - github.com/open-telemetry/opentelemetry-collector-contrib/extension/sigv4authextension v0.103.0 - github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage v0.103.0 - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.103.0 - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.103.0 - github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.103.0 - github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.103.0 - github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatorateprocessor v0.103.0 - github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.103.0 - github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbytraceprocessor v0.103.0 - github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.103.0 - github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricsgenerationprocessor v0.103.0 - github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor v0.103.0 - github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.103.0 - github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.103.0 - github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.103.0 - github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanprocessor v0.103.0 - github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.103.0 - github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.103.0 - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.103.0 - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightskueuereceiver v0.0.0-20241216205413-8e059f1441db - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsecscontainermetricsreceiver v0.103.0 - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.103.0 - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver v0.103.0 - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.103.0 - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jmxreceiver v0.103.0 - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.103.0 - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.103.0 - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver v0.103.0 - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/tcplogreceiver v0.103.0 - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/udplogreceiver v0.103.0 - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.103.0 + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter v0.115.0 + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter v0.115.0 + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.115.0 + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.115.0 + github.com/open-telemetry/opentelemetry-collector-contrib/extension/awsproxy v0.115.0 + github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.115.0 + github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.115.0 + github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.115.0 + github.com/open-telemetry/opentelemetry-collector-contrib/extension/sigv4authextension v0.115.0 + github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage v0.115.0 + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.115.0 + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.115.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.115.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.115.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatorateprocessor v0.115.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.115.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbytraceprocessor v0.115.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.115.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricsgenerationprocessor v0.115.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor v0.115.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.115.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.115.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.115.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanprocessor v0.115.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.115.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.115.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.115.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightskueuereceiver v0.0.0-20250225223951-40b46bce466c + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsecscontainermetricsreceiver v0.115.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.115.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver v0.115.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.115.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jmxreceiver v0.115.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.115.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.115.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver v0.115.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/tcplogreceiver v0.115.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/udplogreceiver v0.115.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.115.0 github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.19.1 - github.com/prometheus/common v0.55.0 - github.com/prometheus/prometheus v0.51.2-0.20240405174432-b4a973753c6e + github.com/prometheus/client_golang v1.20.5 + github.com/prometheus/common v0.61.0 + github.com/prometheus/prometheus v0.54.1 github.com/shirou/gopsutil v3.21.11+incompatible github.com/shirou/gopsutil/v3 v3.24.5 - github.com/stretchr/testify v1.9.0 + github.com/stretchr/testify v1.10.0 github.com/xeipuuv/gojsonschema v1.2.0 - go.opentelemetry.io/collector/component v0.103.0 - go.opentelemetry.io/collector/config/configauth v0.103.0 - go.opentelemetry.io/collector/config/confighttp v0.103.0 - go.opentelemetry.io/collector/config/configopaque v1.10.0 - go.opentelemetry.io/collector/config/configtelemetry v0.103.0 - go.opentelemetry.io/collector/config/configtls v0.103.0 - go.opentelemetry.io/collector/confmap v0.103.0 - go.opentelemetry.io/collector/confmap/converter/expandconverter v0.103.0 - go.opentelemetry.io/collector/confmap/provider/envprovider v0.103.0 - go.opentelemetry.io/collector/confmap/provider/fileprovider v0.103.0 - go.opentelemetry.io/collector/consumer v0.103.0 - go.opentelemetry.io/collector/exporter v0.103.0 - go.opentelemetry.io/collector/exporter/debugexporter v0.103.0 - go.opentelemetry.io/collector/exporter/nopexporter v0.103.0 - go.opentelemetry.io/collector/extension v0.103.0 - go.opentelemetry.io/collector/extension/ballastextension v0.103.0 - go.opentelemetry.io/collector/extension/zpagesextension v0.103.0 - go.opentelemetry.io/collector/otelcol v0.103.0 - go.opentelemetry.io/collector/pdata v1.10.0 - go.opentelemetry.io/collector/processor v0.103.0 - go.opentelemetry.io/collector/processor/batchprocessor v0.103.0 - go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.103.0 - go.opentelemetry.io/collector/receiver v0.103.0 - go.opentelemetry.io/collector/receiver/nopreceiver v0.103.0 - go.opentelemetry.io/collector/receiver/otlpreceiver v0.103.0 - go.opentelemetry.io/collector/semconv v0.103.0 - go.opentelemetry.io/collector/service v0.103.0 + go.opentelemetry.io/collector/component v0.115.0 + go.opentelemetry.io/collector/config/configauth v0.115.0 + go.opentelemetry.io/collector/config/confighttp v0.115.0 + go.opentelemetry.io/collector/config/configopaque v1.21.0 + go.opentelemetry.io/collector/config/configtls v1.21.0 + go.opentelemetry.io/collector/confmap v1.21.0 + go.opentelemetry.io/collector/confmap/provider/envprovider v1.21.0 + go.opentelemetry.io/collector/confmap/provider/fileprovider v1.21.0 + go.opentelemetry.io/collector/consumer v1.22.0 + go.opentelemetry.io/collector/exporter v0.115.0 + go.opentelemetry.io/collector/exporter/debugexporter v0.115.0 + go.opentelemetry.io/collector/exporter/nopexporter v0.115.0 + go.opentelemetry.io/collector/extension v0.115.0 + go.opentelemetry.io/collector/extension/zpagesextension v0.115.0 + go.opentelemetry.io/collector/otelcol v0.115.0 + go.opentelemetry.io/collector/processor v0.115.0 + go.opentelemetry.io/collector/processor/batchprocessor v0.115.0 + go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.115.0 + go.opentelemetry.io/collector/receiver v0.115.0 + go.opentelemetry.io/collector/receiver/nopreceiver v0.115.0 + go.opentelemetry.io/collector/receiver/otlpreceiver v0.115.0 + go.opentelemetry.io/collector/semconv v0.115.0 + go.opentelemetry.io/collector/service v0.115.0 go.uber.org/atomic v1.11.0 go.uber.org/multierr v1.11.0 go.uber.org/zap v1.27.0 @@ -206,59 +203,75 @@ require ( gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 gopkg.in/yaml.v3 v3.0.1 gotest.tools/v3 v3.2.0 - k8s.io/api v0.30.0 - k8s.io/apimachinery v0.30.0 - k8s.io/client-go v0.30.0 - k8s.io/klog/v2 v2.120.1 + k8s.io/api v0.31.3 + k8s.io/apimachinery v0.31.3 + k8s.io/client-go v0.31.3 + k8s.io/klog/v2 v2.130.1 +) + +require ( + github.com/aws/aws-sdk-go-v2 v1.32.6 + go.opentelemetry.io/collector/component/componenttest v0.115.0 + go.opentelemetry.io/collector/config/configtelemetry v0.115.0 + go.opentelemetry.io/collector/confmap/converter/expandconverter v0.113.0 + go.opentelemetry.io/collector/consumer/consumertest v0.115.0 + go.opentelemetry.io/collector/exporter/exportertest v0.115.0 + go.opentelemetry.io/collector/extension/extensiontest v0.115.0 + go.opentelemetry.io/collector/otelcol/otelcoltest v0.115.0 + go.opentelemetry.io/collector/pdata v1.22.0 + go.opentelemetry.io/collector/pipeline v0.115.0 + go.opentelemetry.io/collector/processor/processortest v0.115.0 + go.opentelemetry.io/collector/receiver/receivertest v0.115.0 + go.opentelemetry.io/collector/scraper v0.115.0 ) require ( - cloud.google.com/go v0.112.1 // indirect - cloud.google.com/go/compute/metadata v0.3.0 // indirect + cloud.google.com/go/auth v0.9.5 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect + cloud.google.com/go/compute/metadata v0.5.2 // indirect collectd.org v0.4.0 // indirect github.com/Azure/azure-sdk-for-go v67.1.0+incompatible // indirect - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.5.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 // indirect github.com/Azure/go-autorest/autorest v0.11.29 // indirect github.com/Azure/go-autorest/autorest/adal v0.9.23 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect - github.com/Code-Hex/go-generics-cache v1.3.1 // indirect - github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.23.0 // indirect - github.com/IBM/sarama v1.43.2 // indirect + github.com/Code-Hex/go-generics-cache v1.5.1 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0 // indirect + github.com/IBM/sarama v1.43.3 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect - github.com/Microsoft/hcsshim v0.12.0-rc.3 // indirect + github.com/Microsoft/hcsshim v0.12.9 // indirect github.com/Showmax/go-fqdn v1.0.0 // indirect github.com/alecthomas/participle v0.4.1 // indirect github.com/alecthomas/participle/v2 v2.1.1 // indirect - github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 // indirect + github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 // indirect github.com/amazon-contributing/opentelemetry-collector-contrib/override/aws v0.0.0-20241216205413-8e059f1441db // indirect github.com/antchfx/jsonquery v1.1.5 // indirect - github.com/antchfx/xmlquery v1.3.9 // indirect - github.com/antchfx/xpath v1.2.0 // indirect + github.com/antchfx/xmlquery v1.4.2 // indirect + github.com/antchfx/xpath v1.3.2 // indirect github.com/apache/arrow/go/v12 v12.0.1 // indirect - github.com/apache/arrow/go/v14 v14.0.2 // indirect - github.com/apache/thrift v0.20.0 // indirect + github.com/apache/arrow/go/v15 v15.0.2 // indirect + github.com/apache/thrift v0.21.0 // indirect github.com/armon/go-metrics v0.4.1 // indirect - github.com/aws/aws-sdk-go-v2 v1.27.0 // indirect - github.com/aws/aws-sdk-go-v2/config v1.27.16 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.17.16 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.3 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.7 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.7 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.9 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.20.9 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.24.3 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.28.10 // indirect - github.com/aws/smithy-go v1.20.2 // indirect + github.com/aws/aws-sdk-go-v2/config v1.28.6 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.47 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.21 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.25 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.25 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.24.7 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.33.2 // indirect + github.com/aws/smithy-go v1.22.1 // indirect github.com/benbjohnson/clock v1.3.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect - github.com/bmatcuk/doublestar/v4 v4.6.1 // indirect + github.com/bmatcuk/doublestar/v4 v4.7.1 // indirect github.com/bytedance/sonic v1.11.6 // indirect github.com/bytedance/sonic/loader v0.1.1 // indirect github.com/caio/go-tdigest v3.1.0+incompatible // indirect @@ -268,33 +281,39 @@ require ( github.com/cilium/ebpf v0.11.0 // indirect github.com/cloudwego/base64x v0.1.4 // indirect github.com/cloudwego/iasm v0.2.0 // indirect - github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b // indirect + github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 // indirect github.com/containerd/cgroups/v3 v3.0.3 // indirect - github.com/containerd/console v1.0.3 // indirect - github.com/containerd/errdefs v0.1.0 // indirect - github.com/containerd/ttrpc v1.2.4 // indirect + github.com/containerd/console v1.0.4 // indirect + github.com/containerd/errdefs v0.3.0 // indirect + github.com/containerd/errdefs/pkg v0.3.0 // indirect + github.com/containerd/log v0.1.0 // indirect + github.com/containerd/ttrpc v1.2.5 // indirect + github.com/containerd/typeurl/v2 v2.2.0 // indirect github.com/coreos/go-semver v0.3.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect - github.com/cyphar/filepath-securejoin v0.2.4 // indirect + github.com/cyphar/filepath-securejoin v0.2.5 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dennwc/varint v1.0.0 // indirect - github.com/digitalocean/godo v1.109.0 // indirect + github.com/digitalocean/godo v1.126.0 // indirect github.com/distribution/reference v0.6.0 // indirect - github.com/docker/docker v26.1.5+incompatible // indirect + github.com/docker/docker v27.3.1+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/doclambda/protobufquery v0.0.0-20210317203640-88ffabe06a60 // indirect - github.com/eapache/go-resiliency v1.6.0 // indirect + github.com/eapache/go-resiliency v1.7.0 // indirect github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 // indirect github.com/eapache/queue v1.1.0 // indirect + github.com/ebitengine/purego v0.8.1 // indirect + github.com/elastic/go-grok v0.3.1 // indirect + github.com/elastic/lunes v0.1.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect - github.com/envoyproxy/go-control-plane v0.12.0 // indirect - github.com/envoyproxy/protoc-gen-validate v1.0.4 // indirect + github.com/envoyproxy/go-control-plane v0.13.0 // indirect + github.com/envoyproxy/protoc-gen-validate v1.1.0 // indirect github.com/euank/go-kmsg-parser v2.0.0+incompatible // indirect - github.com/evanphx/json-patch v5.6.0+incompatible // indirect github.com/expr-lang/expr v1.16.9 // indirect github.com/fatih/color v1.16.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/gabriel-vasile/mimetype v1.4.3 // indirect github.com/gin-contrib/sse v0.1.0 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect @@ -306,10 +325,10 @@ require ( github.com/go-openapi/swag v0.22.9 // indirect github.com/go-playground/locales v0.14.1 // indirect github.com/go-playground/universal-translator v0.18.1 // indirect - github.com/go-resty/resty/v2 v2.12.0 // indirect - github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect - github.com/go-zookeeper/zk v1.0.3 // indirect - github.com/goccy/go-json v0.10.2 // indirect + github.com/go-resty/resty/v2 v2.13.1 // indirect + github.com/go-viper/mapstructure/v2 v2.2.1 // indirect + github.com/go-zookeeper/zk v1.0.4 // indirect + github.com/goccy/go-json v0.10.4 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/gogo/googleapis v1.4.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect @@ -322,15 +341,16 @@ require ( github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/s2a-go v0.1.7 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect - github.com/googleapis/gax-go/v2 v2.12.2 // indirect - github.com/gophercloud/gophercloud v1.8.0 // indirect + github.com/google/s2a-go v0.1.8 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect + github.com/googleapis/gax-go/v2 v2.13.0 // indirect + github.com/gophercloud/gophercloud v1.14.1 // indirect github.com/gorilla/mux v1.8.1 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/gosnmp/gosnmp v1.34.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect - github.com/hashicorp/consul/api v1.29.1 // indirect + github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 // indirect + github.com/hashicorp/consul/api v1.30.0 // indirect github.com/hashicorp/cronexpr v1.1.2 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect @@ -342,16 +362,16 @@ require ( github.com/hashicorp/go-uuid v1.0.3 // indirect github.com/hashicorp/go-version v1.7.0 // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect - github.com/hashicorp/nomad/api v0.0.0-20240306004928-3e7191ccb702 // indirect + github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3 // indirect github.com/hashicorp/serf v0.10.1 // indirect - github.com/hetznercloud/hcloud-go/v2 v2.6.0 // indirect + github.com/hetznercloud/hcloud-go/v2 v2.13.1 // indirect github.com/iancoleman/strcase v0.3.0 // indirect github.com/imdario/mergo v0.3.16 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/influxdata/line-protocol/v2 v2.2.1 // indirect github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65 // indirect - github.com/ionos-cloud/sdk-go/v6 v6.1.11 // indirect - github.com/jaegertracing/jaeger v1.58.0 // indirect + github.com/ionos-cloud/sdk-go/v6 v6.2.1 // indirect + github.com/jaegertracing/jaeger v1.62.0 // indirect github.com/jcmturner/aescts/v2 v2.0.0 // indirect github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect github.com/jcmturner/gofork v1.7.6 // indirect @@ -359,26 +379,28 @@ require ( github.com/jcmturner/rpc/v2 v2.0.3 // indirect github.com/jhump/protoreflect v1.8.3-0.20210616212123-6cc1efa697ca // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/jonboulle/clockwork v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/karrick/godirwalk v1.17.0 // indirect - github.com/klauspost/compress v1.17.9 // indirect + github.com/klauspost/compress v1.17.11 // indirect github.com/klauspost/cpuid/v2 v2.2.7 // indirect github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b // indirect github.com/kr/text v0.2.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect - github.com/leodido/go-syslog/v4 v4.1.0 // indirect + github.com/leodido/go-syslog/v4 v4.2.0 // indirect github.com/leodido/go-urn v1.4.0 // indirect github.com/leodido/ragel-machinery v0.0.0-20190525184631-5f46317e436b // indirect github.com/lightstep/go-expohisto v1.0.0 // indirect - github.com/linode/linodego v1.33.0 // indirect + github.com/linode/linodego v1.41.0 // indirect github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect + github.com/magefile/mage v1.15.0 // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect - github.com/miekg/dns v1.1.58 // indirect + github.com/miekg/dns v1.1.62 // indirect github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect @@ -392,32 +414,33 @@ require ( github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect github.com/naoina/go-stringutil v0.1.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.103.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/awsutil v0.103.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/containerinsight v0.103.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/cwlogs v0.103.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.103.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/k8s v0.103.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/metrics v0.103.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/proxy v0.103.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray v0.103.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.103.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.103.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.103.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.103.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka v0.103.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/kubelet v0.103.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.103.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/pdatautil v0.103.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.103.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.103.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.103.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.103.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure v0.103.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.103.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.103.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.103.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.103.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.115.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/awsutil v0.115.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/containerinsight v0.115.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/cwlogs v0.115.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.115.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/k8s v0.115.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/metrics v0.115.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/proxy v0.115.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray v0.115.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.115.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.115.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.115.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.115.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka v0.115.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/kubelet v0.115.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.115.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/pdatautil v0.115.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.115.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/kafka/topic v0.115.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.115.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.115.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.115.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure v0.115.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.115.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.115.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.115.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.115.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.0 // indirect github.com/opencontainers/runc v1.1.14 // indirect @@ -426,110 +449,135 @@ require ( github.com/openshift/api v3.9.0+incompatible // indirect github.com/openshift/client-go v0.0.0-20210521082421-73d9475a9142 // indirect github.com/openzipkin/zipkin-go v0.4.3 // indirect - github.com/ovh/go-ovh v1.4.3 // indirect + github.com/ovh/go-ovh v1.6.0 // indirect github.com/pelletier/go-toml/v2 v2.2.2 // indirect github.com/philhofer/fwd v1.1.1 // indirect github.com/pierrec/lz4/v4 v4.1.21 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect + github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/common/sigv4 v0.1.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect - github.com/relvacode/iso8601 v1.4.0 // indirect - github.com/rogpeppe/go-internal v1.12.0 // indirect - github.com/rs/cors v1.11.0 // indirect + github.com/relvacode/iso8601 v1.6.0 // indirect + github.com/rogpeppe/go-internal v1.13.1 // indirect + github.com/rs/cors v1.11.1 // indirect github.com/safchain/ethtool v0.0.0-20210803160452-9aa261dae9b1 // indirect - github.com/scaleway/scaleway-sdk-go v1.0.0-beta.25 // indirect + github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30 // indirect github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646 // indirect - github.com/shirou/gopsutil/v4 v4.24.5 // indirect - github.com/shoenig/go-m1cpu v0.1.6 // indirect + github.com/shirou/gopsutil/v4 v4.24.11 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/sleepinggenius2/gosmi v0.4.4 // indirect - github.com/spf13/cobra v1.8.0 // indirect + github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect - github.com/tidwall/gjson v1.10.2 // indirect + github.com/tidwall/gjson v1.14.2 // indirect github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/pretty v1.2.0 // indirect github.com/tidwall/tinylru v1.1.0 // indirect - github.com/tidwall/wal v1.1.7 // indirect + github.com/tidwall/wal v1.1.8 // indirect github.com/tinylib/msgp v1.1.6 // indirect github.com/tklauser/go-sysconf v0.3.12 // indirect github.com/tklauser/numcpus v0.6.1 // indirect github.com/twitchyliquid64/golang-asm v0.15.1 // indirect + github.com/ua-parser/uap-go v0.0.0-20240611065828-3a4781585db6 // indirect github.com/ugorji/go/codec v1.2.12 // indirect github.com/valyala/fastjson v1.6.4 // indirect - github.com/vishvananda/netlink v1.2.1-beta.2 // indirect + github.com/vishvananda/netlink v1.3.0 // indirect github.com/vishvananda/netns v0.0.4 // indirect github.com/vjeantet/grok v1.0.1 // indirect github.com/vultr/govultr/v2 v2.17.2 // indirect github.com/wavefronthq/wavefront-sdk-go v0.9.10 // indirect + github.com/x448/float16 v0.8.4 // indirect github.com/xdg-go/pbkdf2 v1.0.0 // indirect github.com/xdg-go/scram v1.1.2 // indirect github.com/xdg-go/stringprep v1.0.4 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect - go.etcd.io/bbolt v1.3.10 // indirect + go.etcd.io/bbolt v1.3.11 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/collector v0.103.0 // indirect - go.opentelemetry.io/collector/config/configcompression v1.10.0 // indirect - go.opentelemetry.io/collector/config/configgrpc v0.103.0 // indirect - go.opentelemetry.io/collector/config/confignet v0.103.0 // indirect - go.opentelemetry.io/collector/config/configretry v0.103.0 // indirect - go.opentelemetry.io/collector/config/internal v0.103.0 // indirect - go.opentelemetry.io/collector/confmap/provider/httpprovider v0.103.0 // indirect - go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.103.0 // indirect - go.opentelemetry.io/collector/confmap/provider/yamlprovider v0.103.0 // indirect - go.opentelemetry.io/collector/connector v0.103.0 // indirect - go.opentelemetry.io/collector/extension/auth v0.103.0 // indirect - go.opentelemetry.io/collector/featuregate v1.10.0 // indirect - go.opentelemetry.io/collector/pdata/testdata v0.103.0 // indirect - go.opentelemetry.io/contrib/config v0.7.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 // indirect - go.opentelemetry.io/contrib/propagators/b3 v1.27.0 // indirect - go.opentelemetry.io/contrib/zpages v0.52.0 // indirect - go.opentelemetry.io/otel v1.27.0 // indirect - go.opentelemetry.io/otel/bridge/opencensus v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/prometheus v0.49.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0 // indirect - go.opentelemetry.io/otel/metric v1.27.0 // indirect - go.opentelemetry.io/otel/sdk v1.27.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.27.0 // indirect - go.opentelemetry.io/otel/trace v1.27.0 // indirect - go.opentelemetry.io/proto/otlp v1.2.0 // indirect + go.opentelemetry.io/collector v0.115.0 // indirect + go.opentelemetry.io/collector/client v1.21.0 // indirect + go.opentelemetry.io/collector/component/componentstatus v0.115.0 // indirect + go.opentelemetry.io/collector/config/configcompression v1.21.0 // indirect + go.opentelemetry.io/collector/config/configgrpc v0.115.0 // indirect + go.opentelemetry.io/collector/config/confignet v1.21.0 // indirect + go.opentelemetry.io/collector/config/configretry v1.22.0 // indirect + go.opentelemetry.io/collector/config/internal v0.115.0 // indirect + go.opentelemetry.io/collector/confmap/provider/httpprovider v1.21.0 // indirect + go.opentelemetry.io/collector/confmap/provider/yamlprovider v1.21.0 // indirect + go.opentelemetry.io/collector/connector v0.115.0 // indirect + go.opentelemetry.io/collector/connector/connectorprofiles v0.115.0 // indirect + go.opentelemetry.io/collector/connector/connectortest v0.115.0 // indirect + go.opentelemetry.io/collector/consumer/consumererror v0.115.0 // indirect + go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles v0.115.0 // indirect + go.opentelemetry.io/collector/consumer/consumerprofiles v0.115.0 // indirect + go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelperprofiles v0.115.0 // indirect + go.opentelemetry.io/collector/exporter/exporterprofiles v0.115.0 // indirect + go.opentelemetry.io/collector/extension/auth v0.115.0 // indirect + go.opentelemetry.io/collector/extension/experimental/storage v0.115.0 // indirect + go.opentelemetry.io/collector/extension/extensioncapabilities v0.115.0 // indirect + go.opentelemetry.io/collector/featuregate v1.22.0 // indirect + go.opentelemetry.io/collector/internal/fanoutconsumer v0.115.0 // indirect + go.opentelemetry.io/collector/internal/memorylimiter v0.115.0 // indirect + go.opentelemetry.io/collector/internal/sharedcomponent v0.115.0 // indirect + go.opentelemetry.io/collector/pdata/pprofile v0.115.0 // indirect + go.opentelemetry.io/collector/pdata/testdata v0.115.0 // indirect + go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.115.0 // indirect + go.opentelemetry.io/collector/processor/processorhelper/processorhelperprofiles v0.115.0 // indirect + go.opentelemetry.io/collector/processor/processorprofiles v0.115.0 // indirect + go.opentelemetry.io/collector/receiver/receiverprofiles v0.115.0 // indirect + go.opentelemetry.io/contrib/bridges/otelzap v0.6.0 // indirect + go.opentelemetry.io/contrib/config v0.10.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 // indirect + go.opentelemetry.io/contrib/propagators/b3 v1.31.0 // indirect + go.opentelemetry.io/contrib/zpages v0.56.0 // indirect + go.opentelemetry.io/otel v1.32.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.7.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0 // indirect + go.opentelemetry.io/otel/exporters/prometheus v0.54.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.7.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0 // indirect + go.opentelemetry.io/otel/log v0.8.0 // indirect + go.opentelemetry.io/otel/metric v1.32.0 // indirect + go.opentelemetry.io/otel/sdk v1.32.0 // indirect + go.opentelemetry.io/otel/sdk/log v0.7.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.32.0 // indirect + go.opentelemetry.io/otel/trace v1.32.0 // indirect + go.opentelemetry.io/proto/otlp v1.3.1 // indirect golang.org/x/arch v0.8.0 // indirect golang.org/x/crypto v0.31.0 // indirect - golang.org/x/mod v0.17.0 // indirect - golang.org/x/oauth2 v0.21.0 // indirect + golang.org/x/mod v0.21.0 // indirect + golang.org/x/oauth2 v0.24.0 // indirect golang.org/x/term v0.27.0 // indirect - golang.org/x/time v0.5.0 // indirect - golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect - gonum.org/v1/gonum v0.15.0 // indirect - google.golang.org/api v0.169.0 // indirect - google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect - google.golang.org/grpc v1.65.0 // indirect - google.golang.org/protobuf v1.34.2 // indirect + golang.org/x/time v0.6.0 // indirect + golang.org/x/tools v0.26.0 // indirect + gonum.org/v1/gonum v0.15.1 // indirect + google.golang.org/api v0.199.0 // indirect + google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect + google.golang.org/grpc v1.68.1 // indirect + google.golang.org/protobuf v1.35.2 // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect k8s.io/kubelet v0.30.0 // indirect - k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect + k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect modernc.org/sqlite v1.21.2 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect - sigs.k8s.io/yaml v1.3.0 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/go.sum b/go.sum index a2b8af2db7..e3553220ee 100644 --- a/go.sum +++ b/go.sum @@ -13,30 +13,34 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.112.1 h1:uJSeirPke5UNZHIb4SxfZklVSiWWVqW4oXlETwZziwM= -cloud.google.com/go v0.112.1/go.mod h1:+Vbu+Y1UU+I1rjmzeMOb/8RfkKJK2Gyxi1X6jJCZLo4= +cloud.google.com/go v0.115.1 h1:Jo0SM9cQnSkYfp44+v+NQXHpcHqlnRJk2qxh6yvxxxQ= +cloud.google.com/go v0.115.1/go.mod h1:DuujITeaufu3gL68/lOFIirVNJwQeyf5UXyi+Wbgknc= +cloud.google.com/go/auth v0.9.5 h1:4CTn43Eynw40aFVr3GpPqsQponx2jv0BQpjvajsbbzw= +cloud.google.com/go/auth v0.9.5/go.mod h1:Xo0n7n66eHyOWWCnitop6870Ilwo3PiZyodVkkH1xWM= +cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY= +cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/bigquery v1.59.1 h1:CpT+/njKuKT3CEmswm6IbhNu9u35zt5dO4yPDLW+nG4= -cloud.google.com/go/bigquery v1.59.1/go.mod h1:VP1UJYgevyTwsV7desjzNzDND5p6hZB+Z8gZJN1GQUc= -cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= -cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/bigquery v1.62.0 h1:SYEA2f7fKqbSRRBHb7g0iHTtZvtPSPYdXfmqsjpsBwo= +cloud.google.com/go/bigquery v1.62.0/go.mod h1:5ee+ZkF1x/ntgCsFQJAQTM3QkAZOecfCmvxhkJsWRSA= +cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo= +cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/iam v1.1.6 h1:bEa06k05IO4f4uJonbB5iAgKTPpABy1ayxaIZV/GHVc= -cloud.google.com/go/iam v1.1.6/go.mod h1:O0zxdPeGBoFdWW3HWmBxJsk0pfvNM/p/qa82rWOGTwI= -cloud.google.com/go/monitoring v1.18.0 h1:NfkDLQDG2UR3WYZVQE8kwSbUIEyIqJUPl+aOQdFH1T4= -cloud.google.com/go/monitoring v1.18.0/go.mod h1:c92vVBCeq/OB4Ioyo+NbN2U7tlg5ZH41PZcdvfc+Lcg= +cloud.google.com/go/iam v1.2.0 h1:kZKMKVNk/IsSSc/udOb83K0hL/Yh/Gcqpz+oAkoIFN8= +cloud.google.com/go/iam v1.2.0/go.mod h1:zITGuWgsLZxd8OwAlX+eMFgZDXzBm7icj1PVTYG766Q= +cloud.google.com/go/monitoring v1.21.0 h1:EMc0tB+d3lUewT2NzKC/hr8cSR9WsUieVywzIHetGro= +cloud.google.com/go/monitoring v1.21.0/go.mod h1:tuJ+KNDdJbetSsbSGTqnaBvbauS5kr3Q/koy3Up6r+4= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/pubsub v1.36.1 h1:dfEPuGCHGbWUhaMCTHUFjfroILEkx55iUmKBZTP5f+Y= -cloud.google.com/go/pubsub v1.36.1/go.mod h1:iYjCa9EzWOoBiTdd4ps7QoMtMln5NwaZQpK1hbRfBDE= +cloud.google.com/go/pubsub v1.42.0 h1:PVTbzorLryFL5ue8esTS2BfehUs0ahyNOY9qcd+HMOs= +cloud.google.com/go/pubsub v1.42.0/go.mod h1:KADJ6s4MbTwhXmse/50SebEhE4SmUwHi48z3/dHar1Y= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= @@ -63,14 +67,14 @@ github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVt github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= github.com/Azure/azure-sdk-for-go v67.1.0+incompatible h1:oziYcaopbnIKfM69DL05wXdypiqfrUKdxUKrKpynJTw= github.com/Azure/azure-sdk-for-go v67.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 h1:U2rTu3Ef+7w9FHKIAXM6ZyqF3UOWJZ12zIm8zECAFfg= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 h1:jBQA3cKT4L2rWMpgE7Yt3Hwh2aUj8KXjIGLxjHeYNNo= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0/go.mod h1:4OG6tQ9EOP/MT0NMjDlRzWoVFxfu9rN9B2X+tlSVktg= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.5.0 h1:MxA59PGoCFb+vCwRQi3PhQEwHj4+r2dhuv9HG+vM7iM= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.5.0/go.mod h1:uYt4CfhkJA9o0FN7jfE5minm/i4nUE4MjGUJkzB6Zs8= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 h1:nyQWyZvwGTvunIMxi1Y9uXkcyr+I7TeNrr/foo4Kpk8= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0/go.mod h1:l38EPgmsp71HHLq9j7De57JcKOWPyhrsW1Awm1JS6K0= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 h1:LkHbJbgF3YyvC53aqYGR+wWQDn2Rdp9AQdGndf9QvY4= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0/go.mod h1:QyiQdW4f4/BIfB8ZutZ2s+28RAgfa/pT+zS++ZHyM1I= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0 h1:PTFGRSlMKCQelWwxUyYVEUqseBJVemLyqWJjvMyt0do= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0/go.mod h1:LRr2FzBTQlONPPa5HREE5+RjSCTXl7BwOvYOaWTqCaI= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 h1:bXwSugBiSbgtz7rOtbfGf+woewp4f06orW9OP5BjHLA= @@ -123,16 +127,16 @@ github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbi github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/ClickHouse/clickhouse-go v1.5.4 h1:cKjXeYLNWVJIx2J1K6H2CqyRmfwVJVY1OV1coaaFcI0= github.com/ClickHouse/clickhouse-go v1.5.4/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI= -github.com/Code-Hex/go-generics-cache v1.3.1 h1:i8rLwyhoyhaerr7JpjtYjJZUcCbWOdiYO3fZXLiEC4g= -github.com/Code-Hex/go-generics-cache v1.3.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1vxyillCVzX13KZG8dl4= +github.com/Code-Hex/go-generics-cache v1.5.1 h1:6vhZGc5M7Y/YD8cIUcY8kcuQLB4cHR7U+0KMqAA0KcU= +github.com/Code-Hex/go-generics-cache v1.5.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1vxyillCVzX13KZG8dl4= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.23.0 h1:yRhWveg9NbJcJYoJL4FoSauT2dxnt4N9MIAJ7tvU/mQ= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.23.0/go.mod h1:p2puVVSKjQ84Qb1gzw2XHLs34WQyHTYFZLaVxypAFYs= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0 h1:3c8yed4lgqTt+oTQ+JNMDo+F4xprBf+O/il4ZC0nRLw= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0/go.mod h1:obipzmGjfSjam60XLwGfqUkJsfiheAl+TUjG+4yzyPM= github.com/HdrHistogram/hdrhistogram-go v1.1.0/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM= github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= -github.com/IBM/sarama v1.43.2 h1:HABeEqRUh32z8yzY2hGB/j8mHSzC/HA9zlEjqFNCzSw= -github.com/IBM/sarama v1.43.2/go.mod h1:Kyo4WkF24Z+1nz7xeVUFWIuKVV8RS3wM8mkvPKMdXFQ= +github.com/IBM/sarama v1.43.3 h1:Yj6L2IaNvb2mRBop39N7mmJAHBVY3dTPncr3qGVkxPA= +github.com/IBM/sarama v1.43.3/go.mod h1:FVIRaLrhK3Cla/9FfRF5X9Zua2KpS3SYIXxhac1H+FQ= github.com/Jeffail/gabs v1.4.0 h1://5fYRRTq1edjfIrQGvdkcd22pkYUrHZ5YC/H2GJVAo= github.com/Jeffail/gabs v1.4.0/go.mod h1:6xMvQMK4k33lb7GUUpaAPh6nKMmemQeg5d4gn7/bOXc= github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c h1:RGWPOewvKIROun94nF7v2cua9qP+thov/7M50KEoeSU= @@ -142,8 +146,8 @@ github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee h1:atI/FFjXh6hIVl github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee/go.mod h1:jDA6v0TUYrFEIAE5uGJ29LQOeONIgMdP4Rkqb8HUnPM= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= -github.com/Microsoft/hcsshim v0.12.0-rc.3 h1:5GNGrobGs/sN/0nFO21W9k4lFn+iXXZAE8fCZbmdRak= -github.com/Microsoft/hcsshim v0.12.0-rc.3/go.mod h1:WuNfcaYNaw+KpCEsZCIM6HCEmu0c5HfXpi+dDSmveP0= +github.com/Microsoft/hcsshim v0.12.9 h1:2zJy5KA+l0loz1HzEGqyNnjd3fyZA31ZBCGKacp6lLg= +github.com/Microsoft/hcsshim v0.12.9/go.mod h1:fJ0gkFAna6ukt0bLdKB8djt4XIJhF/vEPuoIWYVvZ8Y= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= @@ -176,78 +180,79 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 h1:ez/4by2iGztzR4L0zgAOR8lTQK9VlyBVVd7G4omaOQs= -github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 h1:t3eaIm0rUkzbrIewtiFmMK5RXHej2XnoXNhxVsAYUfg= +github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs= github.com/aliyun/alibaba-cloud-sdk-go v1.61.1483 h1:J8HaD+Zpfi1gcel3HCKpoHHEsrcuRrZlSnx7R9SCf5I= github.com/aliyun/alibaba-cloud-sdk-go v1.61.1483/go.mod h1:RcDobYh8k5VP6TNybz9m++gL3ijVI5wueVr0EM10VsU= -github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter v0.0.0-20241216205413-8e059f1441db h1:M3mCeHB5pVhpGdFkx9qJlBtUURxkg6fEQUNNJdYLk3I= -github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter v0.0.0-20241216205413-8e059f1441db/go.mod h1:f9JwM/LQdKH8ZbYoH9TO35rmjM6WoTNlhWtYJ3YZucc= -github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsemfexporter v0.0.0-20250116001040-07637c7e4577 h1:+OuAFQweLKuerVx77Z4zACf/qFITKdJWXO55K3Y6vAY= -github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsemfexporter v0.0.0-20250116001040-07637c7e4577/go.mod h1:LPWBVdTSNbZkk80v6aCUthS59cnR4VauVRdAIE3ifaY= -github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.0.0-20241216205413-8e059f1441db h1:L+xPXimFZAtTgougLt59HvivqCyBgh066rv/XbenHUM= -github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.0.0-20241216205413-8e059f1441db/go.mod h1:GNeNylfr5KMt55XowzSdgbP7z8CkIAfIHtWSd+xxtws= -github.com/amazon-contributing/opentelemetry-collector-contrib/extension/awsmiddleware v0.0.0-20241216205413-8e059f1441db h1:rdQRfKoW60Aomz9Tc8fF0Ld5x/Q96vNz8idPQF/qG9Q= -github.com/amazon-contributing/opentelemetry-collector-contrib/extension/awsmiddleware v0.0.0-20241216205413-8e059f1441db/go.mod h1:/RaNSxxO06niapGT00snMdgFfjjjW/kV3TZGX8kHuwM= -github.com/amazon-contributing/opentelemetry-collector-contrib/extension/awsproxy v0.0.0-20241216205413-8e059f1441db h1:hz8QaW7a5RhFo5/9XRACEcEtEp/D6xElN9jW092NU3o= -github.com/amazon-contributing/opentelemetry-collector-contrib/extension/awsproxy v0.0.0-20241216205413-8e059f1441db/go.mod h1:hRZt1DsvoLDIYBwjFvjwg/9IkaBXeCPG0QI57wbj98Q= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/awsutil v0.0.0-20241216205413-8e059f1441db h1:q+FeNooNk9UHEkwGqQB++KanhE0Plu9yDYtNxSN4i7w= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/awsutil v0.0.0-20241216205413-8e059f1441db/go.mod h1:YL1Y62PxJ7dem1ZBUqCfvbnePaGr5p7DTSyOXSCi6O4= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/containerinsight v0.0.0-20241216205413-8e059f1441db h1:xnj2m4/8b/B84g6AkH5fxhuVvuyeCun7OohdcFd396U= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/containerinsight v0.0.0-20241216205413-8e059f1441db/go.mod h1:LT+qAyMutoADv2qezO+vkm/BkxR88qEfXdF2d13mV+E= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/cwlogs v0.0.0-20241216205413-8e059f1441db h1:zlFQw3BjI/5iYOmfvec3tzD9uO7k5IcYSAYvJpD8iuU= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/cwlogs v0.0.0-20241216205413-8e059f1441db/go.mod h1:SkscNdWANcuDJ7PkjS5wurSTAuY69nqP0I+cEVY9Ryw= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/k8s v0.0.0-20241216205413-8e059f1441db h1:hKeTI5bm2QKvFrsZDI4MNmj5Q86rGdxlucKwTF+2MzY= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/k8s v0.0.0-20241216205413-8e059f1441db/go.mod h1:/TOECDME2jYRPY21CrpTX2eMADJdkmBFBXc1lV/nRZA= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/proxy v0.0.0-20241216205413-8e059f1441db h1:TTp+wkHzVY6SHL6Cj4O7XnAxLL7iYf7rZ2pDx6ILQgk= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/proxy v0.0.0-20241216205413-8e059f1441db/go.mod h1:J08A2gx8VFQfuoBiEfZ6uHIkMtVLd0OuRe5pP88b3I0= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/xray v0.0.0-20241216205413-8e059f1441db h1:zeZ/RfbeKYNUzglQs3JcXOkeHMndO8mCUfoNddTJnW0= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/xray v0.0.0-20241216205413-8e059f1441db/go.mod h1:+w647+1nLYvZWdk24gZWvdl/kFowbe2iDyISXLDYdmQ= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/coreinternal v0.0.0-20241216205413-8e059f1441db h1:y7Fd6vE3G6bxB/S6a1yli54aA2hLNk7SxzjJ6X3H9iU= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/coreinternal v0.0.0-20241216205413-8e059f1441db/go.mod h1:Ai4BsM7C05bEQYO2O272S1LmsyIhO5r0iLmXF5NN5so= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/k8sconfig v0.0.0-20241216205413-8e059f1441db h1:GFcv7W5ZCkQ7JZd2rqGXmu8IQJNQux1L0e+NWETnk3c= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/k8sconfig v0.0.0-20241216205413-8e059f1441db/go.mod h1:VS66oUydCMwiWl1BFmLs7iNy4lGsfVYsriXr/d1fpAk= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/kubelet v0.0.0-20241216205413-8e059f1441db h1:USnGN4kAHNum+M14jp7HOGe8IZIw+LrA668XyiL3UPk= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/kubelet v0.0.0-20241216205413-8e059f1441db/go.mod h1:4qvmHiXPOkOXJdpmmxMqprb2BXxOGPgOG45BwLdipUM= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/metadataproviders v0.0.0-20241216205413-8e059f1441db h1:mYtRMWfhv2wGWFhA/3vsX1DwUKgrLSC21a/NZR69fTQ= -github.com/amazon-contributing/opentelemetry-collector-contrib/internal/metadataproviders v0.0.0-20241216205413-8e059f1441db/go.mod h1:l/vd01pHoBByPjOL0xXK7Qg3+qIpPb0RGcpORNciJJM= +github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter v0.0.0-20250225223951-40b46bce466c h1:nBs2sLiADY50ZUWFiKKAEWGXf90lCKo3lz+OMGSgdcM= +github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter v0.0.0-20250225223951-40b46bce466c/go.mod h1:xdbVYSrv7X2GXPKfEeWYhCPkC5ch4h4JKn6uzn5ytqg= +github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsemfexporter v0.0.0-20250225223951-40b46bce466c h1:okYla9HcZGef/6GCnBFseok8fVM8MWrvKYd6Pf3PpSI= +github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsemfexporter v0.0.0-20250225223951-40b46bce466c/go.mod h1:4vjaqZfmvaZ0rj37LrPAwQpo/sgNLA+H65eUN0U+3fk= +github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.0.0-20250225223951-40b46bce466c h1:0mjNGkuv04wIgJ2DdssTivhax40q9Dur7m59u1q1kLU= +github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.0.0-20250225223951-40b46bce466c/go.mod h1:MeHyDYE/w6i9P0HNCql6L+/CJyCgitCnyk0MLfSc+9g= +github.com/amazon-contributing/opentelemetry-collector-contrib/extension/awsmiddleware v0.0.0-20250225223951-40b46bce466c h1:U9NOathV45gdIHMvCWJXo8/romtfJpiqlWD+z52QLLs= +github.com/amazon-contributing/opentelemetry-collector-contrib/extension/awsmiddleware v0.0.0-20250225223951-40b46bce466c/go.mod h1:0iKQnuCWzB/79ZmfGSVrX+0MiLEQiivaziieLQSP6cM= +github.com/amazon-contributing/opentelemetry-collector-contrib/extension/awsproxy v0.0.0-20250225223951-40b46bce466c h1:vwoskKLH2YVu6kH80FWSsTOvLt5dyBCnnaMuX5SNiIo= +github.com/amazon-contributing/opentelemetry-collector-contrib/extension/awsproxy v0.0.0-20250225223951-40b46bce466c/go.mod h1:e/jLBoFSMhTOjmXiLvawbpjJwKG+UgWqS1tC02t/9pk= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/awsutil v0.0.0-20250225223951-40b46bce466c h1:NWzqw0DCm9qMg0mYIGJiZ4unVwLSihlqtqXtFH8LAiY= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/awsutil v0.0.0-20250225223951-40b46bce466c/go.mod h1:NwKx37vwAIGpgEqo5v1AdZS8UKV0e2lX/yUTDgQjrls= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/containerinsight v0.0.0-20250225223951-40b46bce466c h1:pSyAKMMtIFkUlk3p3yNP8EmpcVUjA1lEnzf3pyC0Xw0= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/containerinsight v0.0.0-20250225223951-40b46bce466c/go.mod h1:4HebVM9TmMpsxZAXLX5om998dTm1JUndjcpmhrnGkx4= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/cwlogs v0.0.0-20250225223951-40b46bce466c h1:+fXYl+APm6Zl2t2GH1MZjJLwYW5nCFpyHYGpmCiMU1k= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/cwlogs v0.0.0-20250225223951-40b46bce466c/go.mod h1:sCx+2x6y1jjWGcXbgyP97Q+Himx84LlPZ9yWLgcV4vo= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/k8s v0.0.0-20250225223951-40b46bce466c h1:ubwzwKLO0CGRCz5Eitu2a+vWYgs64EFljsshGMD3Dis= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/k8s v0.0.0-20250225223951-40b46bce466c/go.mod h1:kxuPS8JKnLVWie4PnL8iCoFx/mr89yR4VYFvJhlQfpY= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/proxy v0.0.0-20250225223951-40b46bce466c h1:ScdJbwMFelWZJzIKr1V6qjGxM8lMHInVbqFRQzXWROY= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/proxy v0.0.0-20250225223951-40b46bce466c/go.mod h1:jNjrbc2MpIBzlaEnOVdlxniDzIUgzD80fm6wkBs0flw= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/xray v0.0.0-20250225223951-40b46bce466c h1:R9mNRPbPnKmIORMltSvxh02LnwJlujIvbnSL44vvij0= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/aws/xray v0.0.0-20250225223951-40b46bce466c/go.mod h1:wrCFYGt2D/jFRN3Q94vEFVJTv7DxmDM07aHN7b74KeU= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/coreinternal v0.0.0-20250225223951-40b46bce466c h1:Tqtfs7EkihaIeyVEfG2EkIkUXl1Ft5AzBnPLjIpwdKM= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/coreinternal v0.0.0-20250225223951-40b46bce466c/go.mod h1:r5/40YO1eSP5ZreOmRzVOUtDr7YG39ZIUcVjHd+9Izc= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/k8sconfig v0.0.0-20250225223951-40b46bce466c h1:D7QbMvlVZSn4NvkZsJKljnJCc4vNCv1Rf+67VrqH2hE= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/k8sconfig v0.0.0-20250225223951-40b46bce466c/go.mod h1:Hp9uSq3qNJqdxu24u7RWyuPT9x1GgEUSx9US1LLeLi0= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/kubelet v0.0.0-20250225223951-40b46bce466c h1:knIaRqu5bwQR+waO3gveFruf9PXNqxSN7gxv39xOClw= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/kubelet v0.0.0-20250225223951-40b46bce466c/go.mod h1:DeEH8iR0kq3UaS4NZowwJnOYWGF09qtQ6kipUSFGKjI= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/metadataproviders v0.0.0-20250225223951-40b46bce466c h1:ReDQl2rgqcbvx6khwB8P2wXNyh0d/3cUPWc7FmaGQyY= +github.com/amazon-contributing/opentelemetry-collector-contrib/internal/metadataproviders v0.0.0-20250225223951-40b46bce466c/go.mod h1:cKGsnFWzTlbvm1m7J6d4hchm18LM/h7BCdyWj+sO81M= github.com/amazon-contributing/opentelemetry-collector-contrib/override/aws v0.0.0-20241216205413-8e059f1441db h1:Lqq98S/zDfQ+y0OJu9EIwdmgFf5gCJ1chDZJWqpydVs= github.com/amazon-contributing/opentelemetry-collector-contrib/override/aws v0.0.0-20241216205413-8e059f1441db/go.mod h1:t/hYoRTnlPuRjh8y0BwVGgNvNIXpU2QJME5YVppUUHQ= -github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.0.0-20241216205413-8e059f1441db h1:cHgCAM78RpGlltSVJ905LD/JZqYfEpc4Gb6xZvPh50U= -github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.0.0-20241216205413-8e059f1441db/go.mod h1:Rr5b3hr6Jy9w/zTjsOl3vcyDDusc90P+iGdOd0UCYOo= -github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/stanza v0.0.0-20241216205413-8e059f1441db h1:eSQHA4NrEITpzRdsu5vDJO2Cuuu8Dn8dr0augxc+Oyk= -github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/stanza v0.0.0-20241216205413-8e059f1441db/go.mod h1:2NSghK+mafMGxM8c4Gff8qcprdMD3YQebZtD9UAdB3E= -github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/translator/prometheus v0.0.0-20241216205413-8e059f1441db h1:X6bJy+ZTMbyFx5+JlJ4OkIBhjN7byZ1FLz2BbxBVOGc= -github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/translator/prometheus v0.0.0-20241216205413-8e059f1441db/go.mod h1:21nuEQl7YYeLkVrGGvxPXkljqjR40teBCG5trGZ5LxM= -github.com/amazon-contributing/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.0.0-20241216205413-8e059f1441db h1:hLUqHFJ1WXLugc2UaX921NREVt5Z97LtKhsCvbIp5M4= -github.com/amazon-contributing/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.0.0-20241216205413-8e059f1441db/go.mod h1:mwpBj24vuuuvs11sJpGWtWLtd9Ou7RizcuXc+Uofhi8= -github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.0.0-20241216205413-8e059f1441db h1:kfh9b7QjmRIkT6ytj/GcpQXn+zQtIaxMtfzfyTiChr8= -github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.0.0-20241216205413-8e059f1441db/go.mod h1:dnULh8T9lt2RWMwLd3DdTPiTwniRrHlOQLcdVDn9/aQ= -github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awscontainerinsightskueuereceiver v0.0.0-20241216205413-8e059f1441db h1:MLRpuOmiOtN/vzFQCSaO+HrcZPOi4GW2wZvRQonJsLo= -github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awscontainerinsightskueuereceiver v0.0.0-20241216205413-8e059f1441db/go.mod h1:QWPqmqbzXUZtrh4I0uCJXzDnvUMdxDd9UGjdZLGxf68= -github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.0.0-20241216205413-8e059f1441db h1:QMTBAyheWMMOEDzLd9srk9relhYBnGhr3gQCHX+g/AA= -github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.0.0-20241216205413-8e059f1441db/go.mod h1:igQaQJt7eA/y3dZ2VLXVql+6k/ZXBgrAa2y9FrMMIKQ= -github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/jmxreceiver v0.0.0-20241216205413-8e059f1441db h1:v5kOaxE58eP9KZTl3YhbcHjkdDFRf3Sqjxl0HAT0HxE= -github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/jmxreceiver v0.0.0-20241216205413-8e059f1441db/go.mod h1:hRUrYatVP/GFNxHn2yW1gJcnPyGtdlTXyebpzzzjZeU= -github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.0.0-20241216205413-8e059f1441db h1:LCYWqC+KCiq3gCRDGK0NAaQEoGWawghXcT6dkQLOYKs= -github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.0.0-20241216205413-8e059f1441db/go.mod h1:/R2tBPLPR8dLZA0BQI2ZA7IB/zU7Q+Ghp+ujiPacVkA= +github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.0.0-20250225223951-40b46bce466c h1:Jiz83DiSSR7OMBb01/NiLBv7/7bszsdGX7F6nrjgjAw= +github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.0.0-20250225223951-40b46bce466c/go.mod h1:hW2AaybTRcwxJySGLC3Fh1vd2VDaQhRBfa7O7w30NS8= +github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/stanza v0.0.0-20250225223951-40b46bce466c h1:LVoLf97WeWAy9LbNvNlUisbdJ/nmTVbPmu8LXsU28TI= +github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/stanza v0.0.0-20250225223951-40b46bce466c/go.mod h1:iqgJP7+N03pOIOqYaKjVWYoIKweNdFivsvWJfFw6MTQ= +github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/translator/prometheus v0.0.0-20250225223951-40b46bce466c h1:dGucyN2phPSPEK4AAArzCCtZLtJYrVBXZAZ2qc+0g7I= +github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/translator/prometheus v0.0.0-20250225223951-40b46bce466c/go.mod h1:cW/BaYE6Uo7ZYHbmT0wVBktHP0SfeLqGHMf0qks7rOE= +github.com/amazon-contributing/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.0.0-20250225223951-40b46bce466c h1:IoqQNbkGTGLe7LXw8bvB5gGXk86jMfm2eyDZwG5MKFk= +github.com/amazon-contributing/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.0.0-20250225223951-40b46bce466c/go.mod h1:yyT23VxqHJZaOR1wSO4GqO4Nr8omwlNUL/ouxPmjlvM= +github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.0.0-20250225223951-40b46bce466c h1:9HDD0C6K2RwOEtXijUuZa5on3xCN1UKA/76O+3JZ4JA= +github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.0.0-20250225223951-40b46bce466c/go.mod h1:CevoQ0mSIcnS/eKfBqZnYEUeMEe/slvtpOhlWb6KnDM= +github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awscontainerinsightskueuereceiver v0.0.0-20250225223951-40b46bce466c h1:4S+4WJzISU8TfF1Q2+9KtbwLgrESDgfaEOwnhn2nLn4= +github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awscontainerinsightskueuereceiver v0.0.0-20250225223951-40b46bce466c/go.mod h1:48Qw63SpQsWKxh+cHz77YEtndVIAsHegsRpo/Mi4wp8= +github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.0.0-20250225223951-40b46bce466c h1:sYK3SH8mYA/Ie5oP5bdI1F1Kr4P3PfbwI4fGZZBh8jA= +github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.0.0-20250225223951-40b46bce466c/go.mod h1:BD+tS0/kaanAgD/slvuEMoXOnKD1mqWQwPD4nPyeoI8= +github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/jmxreceiver v0.0.0-20250225223951-40b46bce466c h1:Luly8B0rR7y7m1ku5mPVTa7MsDjchLr9MXQlY1zyncg= +github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/jmxreceiver v0.0.0-20250225223951-40b46bce466c/go.mod h1:yjmFxatKvpGO4X9MTqeIcoWkSKykjFH+83VpNhWlUN0= +github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.0.0-20250225223951-40b46bce466c h1:ZXYCfMPAj1zKUdGleRtLafbEnRT0ertHjeZ4Jo+jDho= +github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.0.0-20250225223951-40b46bce466c/go.mod h1:x4hCznyUolxGt5cE/uXWRCckdIDrUYqH5hJddvdKZd4= github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9 h1:FXrPTd8Rdlc94dKccl7KPmdmIbVh/OjelJ8/vgMRzcQ= github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9/go.mod h1:eliMa/PW+RDr2QLWRmLH1R1ZA4RInpmvOzDDXtaIZkc= github.com/andybalholm/brotli v1.0.5 h1:8uQZIdzKmjc/iuPu7O2ioW48L81FgatrcpfFmiq/cCs= github.com/andybalholm/brotli v1.0.5/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/antchfx/jsonquery v1.1.5 h1:1YWrNFYCcIuJPIjFeOP5b6TXbLSUYY8qqxWbuZOB1qE= github.com/antchfx/jsonquery v1.1.5/go.mod h1:RtMzTHohKaAerkfslTNjr3Y9MdxjKlSgIgaVjVKNiug= -github.com/antchfx/xmlquery v1.3.9 h1:Y+zyMdiUZ4fasTQTkDb3DflOXP7+obcYEh80SISBmnQ= -github.com/antchfx/xmlquery v1.3.9/go.mod h1:wojC/BxjEkjJt6dPiAqUzoXO5nIMWtxHS8PD8TmN4ks= +github.com/antchfx/xmlquery v1.4.2 h1:MZKd9+wblwxfQ1zd1AdrTsqVaMjMCwow3IqkCSe00KA= +github.com/antchfx/xmlquery v1.4.2/go.mod h1:QXhvf5ldTuGqhd1SHNvvtlhhdQLks4dD0awIVhXIDTA= github.com/antchfx/xpath v1.1.7/go.mod h1:Yee4kTMuNiPYJ7nSNorELQMr1J33uOpXDMByNYhvtNk= -github.com/antchfx/xpath v1.2.0 h1:mbwv7co+x0RwgeGAOHdrKy89GvHaGvxxBtPK0uF9Zr8= github.com/antchfx/xpath v1.2.0/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs= +github.com/antchfx/xpath v1.3.2 h1:LNjzlsSjinu3bQpw9hWMY9ocB80oLOWuQqFvO6xt51U= +github.com/antchfx/xpath v1.3.2/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/arrow/go/v12 v12.0.1 h1:JsR2+hzYYjgSUkBSaahpqCetqZMr76djX80fF/DiJbg= github.com/apache/arrow/go/v12 v12.0.1/go.mod h1:weuTY7JvTG/HDPtMQxEUp7pU73vkLWMLpY67QwZ/WWw= -github.com/apache/arrow/go/v14 v14.0.2 h1:N8OkaJEOfI3mEZt07BIkvo4sC6XDbL+48MBPWO5IONw= -github.com/apache/arrow/go/v14 v14.0.2/go.mod h1:u3fgh3EdgN/YQ8cVQRguVW3R+seMybFg8QBQ5LU+eBY= -github.com/apache/thrift v0.20.0 h1:631+KvYbsBZxmuJjYwhezVsrfc/TbqtZV4QcxOX1fOI= -github.com/apache/thrift v0.20.0/go.mod h1:hOk1BQqcp2OLzGsyVXdfMk7YFlMxK3aoEVhjD06QhB8= +github.com/apache/arrow/go/v15 v15.0.2 h1:60IliRbiyTWCWjERBCkO1W4Qun9svcYoZrSLcyOsMLE= +github.com/apache/arrow/go/v15 v15.0.2/go.mod h1:DGXsR3ajT524njufqf95822i+KTh+yea1jass9YXgjA= +github.com/apache/thrift v0.21.0 h1:tdPmh/ptjE1IJnhbhrcl2++TauVjy242rkV/UzJChnE= +github.com/apache/thrift v0.21.0/go.mod h1:W1H8aR/QRtYNvrPeFXBtobyRkd0/YVhTc6i07XIAgDw= github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3 h1:Bmjk+DjIi3tTAU0wxGaFbfjGUqlxxSXARq9A96Kgoos= github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3/go.mod h1:KASm+qXFKs/xjSoWn30NrWBBvdTTQq+UjkhjEJHfSFA= github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740 h1:FD4/ikKOFxwP8muWDypbmBWc634+YcAs3eBrYAmRdZY= @@ -264,30 +269,30 @@ github.com/aws/aws-sdk-go v1.48.6 h1:hnL/TE3eRigirDLrdRE9AWE1ALZSVLAsC4wK8TGsMqk github.com/aws/aws-sdk-go v1.48.6/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/aws/aws-sdk-go-v2 v1.9.1/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= -github.com/aws/aws-sdk-go-v2 v1.27.0 h1:7bZWKoXhzI+mMR/HjdMx8ZCC5+6fY0lS5tr0bbgiLlo= -github.com/aws/aws-sdk-go-v2 v1.27.0/go.mod h1:ffIFB97e2yNsv4aTSGkqtHnppsIJzw7G7BReUZ3jCXM= +github.com/aws/aws-sdk-go-v2 v1.32.6 h1:7BokKRgRPuGmKkFMhEg/jSul+tB9VvXhcViILtfG8b4= +github.com/aws/aws-sdk-go-v2 v1.32.6/go.mod h1:P5WJBrYqqbWVaOxgH0X/FYYD47/nooaPOZPlQdmiN2U= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.13 h1:OPLEkmhXf6xFPiz0bLeDArZIDx1NNS4oJyG4nv3Gct0= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.13/go.mod h1:gpAbvyDGQFozTEmlTFO8XcQKHzubdq0LzRyJpG6MiXM= github.com/aws/aws-sdk-go-v2/config v1.8.3/go.mod h1:4AEiLtAb8kLs7vgw2ZV3p2VZ1+hBavOc84hqxVNpCyw= -github.com/aws/aws-sdk-go-v2/config v1.27.16 h1:knpCuH7laFVGYTNd99Ns5t+8PuRjDn4HnnZK48csipM= -github.com/aws/aws-sdk-go-v2/config v1.27.16/go.mod h1:vutqgRhDUktwSge3hrC3nkuirzkJ4E/mLj5GvI0BQas= +github.com/aws/aws-sdk-go-v2/config v1.28.6 h1:D89IKtGrs/I3QXOLNTH93NJYtDhm8SYa9Q5CsPShmyo= +github.com/aws/aws-sdk-go-v2/config v1.28.6/go.mod h1:GDzxJ5wyyFSCoLkS+UhGB0dArhb9mI+Co4dHtoTxbko= github.com/aws/aws-sdk-go-v2/credentials v1.4.3/go.mod h1:FNNC6nQZQUuyhq5aE5c7ata8o9e4ECGmS4lAXC7o1mQ= -github.com/aws/aws-sdk-go-v2/credentials v1.17.16 h1:7d2QxY83uYl0l58ceyiSpxg9bSbStqBC6BeEeHEchwo= -github.com/aws/aws-sdk-go-v2/credentials v1.17.16/go.mod h1:Ae6li/6Yc6eMzysRL2BXlPYvnrLLBg3D11/AmOjw50k= +github.com/aws/aws-sdk-go-v2/credentials v1.17.47 h1:48bA+3/fCdi2yAwVt+3COvmatZ6jUDNkDTIsqDiMUdw= +github.com/aws/aws-sdk-go-v2/credentials v1.17.47/go.mod h1:+KdckOejLW3Ks3b0E3b5rHsr2f9yuORBum0WPnE5o5w= github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.2.0 h1:8kvinmbIDObqsWegKP0JjeanYPiA4GUVpAtciNWE+jw= github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.2.0/go.mod h1:UVFtSYSWCHj2+brBLDHUdlJXmz8LxUpZhA+Ewypc+xQ= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.6.0/go.mod h1:gqlclDEZp4aqJOancXK6TN24aKhT0W0Ae9MHk3wzTMM= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.3 h1:dQLK4TjtnlRGb0czOht2CevZ5l6RSyRWAnKeGd7VAFE= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.3/go.mod h1:TL79f2P6+8Q7dTsILpiVST+AL9lkF6PPGI167Ny0Cjw= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.21 h1:AmoU1pziydclFT/xRV+xXE/Vb8fttJCLRPv8oAkprc0= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.21/go.mod h1:AjUdLYe4Tgs6kpH4Bv7uMZo7pottoyHMn4eTcIcneaY= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.59 h1:E3Y+OfzOK1+rmRo/K2G0ml8Vs+Xqk0kOnf4nS0kUtBc= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.59/go.mod h1:1M4PLSBUVfBI0aP+C9XI7SM6kZPCGYyI6izWz0TGprE= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.7 h1:lf/8VTF2cM+N4SLzaYJERKEWAXq8MOMpZfU6wEPWsPk= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.7/go.mod h1:4SjkU7QiqK2M9oozyMzfZ/23LmUY+h3oFqhdeP5OMiI= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.7 h1:4OYVp0705xu8yjdyoWix0r9wPIRXnIzzOoUpQVHIJ/g= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.7/go.mod h1:vd7ESTEvI76T2Na050gODNmNU7+OyKrIKroYTu4ABiI= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.25 h1:s/fF4+yDQDoElYhfIVvSNyeCydfbuTKzhxSXDXCPasU= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.25/go.mod h1:IgPfDv5jqFIzQSNbUEMoitNooSMXjRSDkhXv8jiROvU= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.25 h1:ZntTCl5EsYnhN/IygQEUugpdwbhdkom9uHcbCftiGgA= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.25/go.mod h1:DBdPrgeocww+CSl1C8cEV8PN1mHMBhuCDLpXezyvWkE= github.com/aws/aws-sdk-go-v2/internal/ini v1.2.4/go.mod h1:ZcBrrI3zBKlhGFNYWvju0I3TR93I7YIgAfy82Fh4lcQ= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= github.com/aws/aws-sdk-go-v2/internal/v4a v1.1.4 h1:6lJvvkQ9HmbHZ4h/IEwclwv2mrTW8Uq1SOB/kXy0mfw= github.com/aws/aws-sdk-go-v2/internal/v4a v1.1.4/go.mod h1:1PrKYwxTM+zjpw9Y41KFtoJCQrJ34Z47Y4VgVbfndjo= github.com/aws/aws-sdk-go-v2/service/appconfig v1.4.2/go.mod h1:FZ3HkCe+b10uFZZkFdvf98LHW21k49W8o8J366lqVKY= @@ -300,15 +305,15 @@ github.com/aws/aws-sdk-go-v2/service/dynamodb v1.14.0 h1:P+eF8PKkeaiTfN/VBe5GI3u github.com/aws/aws-sdk-go-v2/service/dynamodb v1.14.0/go.mod h1:15NiwrGGBpsC7C3zScmoaqNo1QJ9SRjdM5jxEPnCUR8= github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.4.0 h1:QbFWJr2SAyVYvyoOHvJU6sCGLnqNT94ZbWElJMEI1JY= github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.4.0/go.mod h1:bYsEP8w5YnbYyrx/Zi5hy4hTwRRQISSJS3RWrsGRijg= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 h1:Ji0DY1xUsUr3I8cHps0G+XM3WWU16lP6yG8qu1GAZAs= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2/go.mod h1:5CsjAbs3NlGQyZNFACh+zztPDI7fU6eW9QsxjfnuBKg= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 h1:iXtILhvDxB6kPvEXgsDhGaZCSC6LQET5ZHSdJozeI0Y= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1/go.mod h1:9nu0fVANtYiAePIBh2/pFUSwtJ402hLnp854CNoDOeE= github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.36 h1:eev2yZX7esGRjqRbnVk1UxMLw4CyVZDpZXRCcy75oQk= github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.36/go.mod h1:lGnOkH9NJATw0XEPcAknFBj3zzNTEGRHtSw+CwC1YTg= github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.7.6 h1:JGrc3+kkyr848/wpG2+kWuzHK3H4Fyxj2jnXj8ijQ/Y= github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.7.6/go.mod h1:zwvTysbXES8GDwFcwCPB8NkC+bCdio1abH+E+BRe/xg= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.2/go.mod h1:72HRZDLMtmVQiLG2tLfQcaWLCssELvGl+Zf2WVxMmR8= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.9 h1:Wx0rlZoEJR7JwlSZcHnEa7CNjrSIyVxMFWGAaXy4fJY= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.9/go.mod h1:aVMHdE0aHO3v+f/iw01fmXV/5DbfQ3Bi9nN7nd9bE9Y= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6 h1:50+XsN70RS7dwJ2CkVNXzj7U2L1HKP8nqTd3XWEXBN4= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6/go.mod h1:WqgLmwY7so32kG01zD8CPTJWVWM+TzJoOVHwTg4aPug= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.15.4 h1:v0jkRigbSD6uOdwcaUQmgEwG1BkPfAPDqaeNt/29ghg= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.15.4/go.mod h1:LhTyt8J04LL+9cIt7pYJ5lbS/U98ZmXovLOR/4LUsk8= github.com/aws/aws-sdk-go-v2/service/kinesis v1.13.0 h1:wqLvwC4qdrrGikudu8Z9X2sb79BYUYWAgMF5BGFQJY8= @@ -316,18 +321,18 @@ github.com/aws/aws-sdk-go-v2/service/kinesis v1.13.0/go.mod h1:RCOtKdXlUfirtaxlH github.com/aws/aws-sdk-go-v2/service/s3 v1.40.0 h1:wl5dxN1NONhTDQD9uaEvNsDRX29cBmGED/nl0jkWlt4= github.com/aws/aws-sdk-go-v2/service/s3 v1.40.0/go.mod h1:rDGMZA7f4pbmTtPOk5v5UM2lmX6UAbRnMDJeDvnH7AM= github.com/aws/aws-sdk-go-v2/service/sso v1.4.2/go.mod h1:NBvT9R1MEF+Ud6ApJKM0G+IkPchKS7p7c2YPKwHmBOk= -github.com/aws/aws-sdk-go-v2/service/sso v1.20.9 h1:aD7AGQhvPuAxlSUfo0CWU7s6FpkbyykMhGYMvlqTjVs= -github.com/aws/aws-sdk-go-v2/service/sso v1.20.9/go.mod h1:c1qtZUWtygI6ZdvKppzCSXsDOq5I4luJPZ0Ud3juFCA= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.24.3 h1:Pav5q3cA260Zqez42T9UhIlsd9QeypszRPwC9LdSSsQ= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.24.3/go.mod h1:9lmoVDVLz/yUZwLaQ676TK02fhCu4+PgRSmMaKR1ozk= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.7 h1:rLnYAfXQ3YAccocshIH5mzNNwZBkBo+bP6EhIxak6Hw= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.7/go.mod h1:ZHtuQJ6t9A/+YDuxOLnbryAmITtr8UysSny3qcyvJTc= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6 h1:JnhTZR3PiYDNKlXy50/pNeix9aGMo6lLpXwJ1mw8MD4= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6/go.mod h1:URronUEGfXZN1VpdktPSD1EkAL9mfrV+2F4sjH38qOY= github.com/aws/aws-sdk-go-v2/service/sts v1.7.2/go.mod h1:8EzeIqfWt2wWT4rJVu3f21TfrhJ8AEMzVybRNSb/b4g= -github.com/aws/aws-sdk-go-v2/service/sts v1.28.10 h1:69tpbPED7jKPyzMcrwSvhWcJ9bPnZsZs18NT40JwM0g= -github.com/aws/aws-sdk-go-v2/service/sts v1.28.10/go.mod h1:0Aqn1MnEuitqfsCNyKsdKLhDUOr4txD/g19EfiUqgws= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.2 h1:s4074ZO1Hk8qv65GqNXqDjmkf4HSQqJukaLuuW0TpDA= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.2/go.mod h1:mVggCnIWoM09jP71Wh+ea7+5gAp53q+49wDFs1SW5z8= github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.13.6 h1:HwNzaXr3lHe3YPEyyx7Fh41CZplz6G1YqB3OR0FJ2iw= github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.13.6/go.mod h1:akrYtxss20JAwAF/VzsUJRHf210HwuLZpUy1Njrgpe0= github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= -github.com/aws/smithy-go v1.20.2 h1:tbp628ireGtzcHDDmLT/6ADHidqnwgF57XOXZe6tp4Q= -github.com/aws/smithy-go v1.20.2/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E= +github.com/aws/smithy-go v1.22.1 h1:/HPHZQ0g7f4eUeK6HKglFz8uwVfZKgoI25rb/J+dnro= +github.com/aws/smithy-go v1.22.1/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/aws/telegraf v0.10.2-0.20250113150713-a2dfaa4cdf6d h1:WQ+YwEMyIpKQOGtAtUyqq18RhERiAQGPYSXsSckRrsg= github.com/aws/telegraf v0.10.2-0.20250113150713-a2dfaa4cdf6d/go.mod h1:jrcGlkX91pXbwi4r7gpfd7UPQGMJDdv6tw1mCrzmIOE= github.com/aws/telegraf/patches/gopsutil/v3 v3.0.0-20250113150713-a2dfaa4cdf6d h1:nCzziuk/prBwzhbEyP9onARem7LriIxqGrTFB9D532g= @@ -350,8 +355,8 @@ github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/bmatcuk/doublestar/v3 v3.0.0 h1:TQtVPlDnAYwcrVNB2JiGuMc++H5qzWZd9PhkNo5WyHI= github.com/bmatcuk/doublestar/v3 v3.0.0/go.mod h1:6PcTVMw80pCY1RVuoqu3V++99uQB3vsSYKPTd8AWA0k= -github.com/bmatcuk/doublestar/v4 v4.6.1 h1:FH9SifrbvJhnlQpztAx++wlkk70QBf0iBWDwNy7PA4I= -github.com/bmatcuk/doublestar/v4 v4.6.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= +github.com/bmatcuk/doublestar/v4 v4.7.1 h1:fdDeAqgT47acgwd9bd9HxJRDmc9UAmPpc+2m0CXv75Q= +github.com/bmatcuk/doublestar/v4 v4.7.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= github.com/bytedance/sonic v1.11.6 h1:oUp34TzMlL+OY1OUWxHqsdkgC/Zfc85zGqw9siXjrc0= github.com/bytedance/sonic v1.11.6/go.mod h1:LysEHSvpvDySVdC2f87zGWf6CIKJcAvqab1ZaiQtds4= github.com/bytedance/sonic/loader v0.1.1 h1:c+e5Pt1k/cy5wMveRDyk2X4B9hF4g7an8N3zCYjJFNM= @@ -392,22 +397,26 @@ github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b h1:ga8SEFjZ60pxLcmhnThWgvH2wg8376yUJmPhEH4H3kw= -github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 h1:QVw89YDxXxEe+l8gU8ETbOasdwEV+avkR75ZzsVV9WI= +github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0= github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0= -github.com/containerd/console v1.0.3 h1:lIr7SlA5PxZyMV30bDW0MGbiOPXwc63yRuCP0ARubLw= -github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= -github.com/containerd/containerd v1.7.15 h1:afEHXdil9iAm03BmhjzKyXnnEBtjaLJefdU7DV0IFes= -github.com/containerd/containerd v1.7.15/go.mod h1:ISzRRTMF8EXNpJlTzyr2XMhN+j9K302C21/+cr3kUnY= -github.com/containerd/errdefs v0.1.0 h1:m0wCRBiu1WJT/Fr+iOoQHMQS/eP5myQ8lCv4Dz5ZURM= -github.com/containerd/errdefs v0.1.0/go.mod h1:YgWiiHtLmSeBrvpw+UfPijzbLaB77mEG1WwJTDETIV0= +github.com/containerd/console v1.0.4 h1:F2g4+oChYvBTsASRTz8NP6iIAi97J3TtSAsLbIFn4ro= +github.com/containerd/console v1.0.4/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk= +github.com/containerd/errdefs v0.3.0 h1:FSZgGOeK4yuT/+DnF07/Olde/q4KBoMsaamhXxIMDp4= +github.com/containerd/errdefs v0.3.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= +github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= +github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= -github.com/containerd/ttrpc v1.2.4 h1:eQCQK4h9dxDmpOb9QOOMh2NHTfzroH1IkmHiKZi05Oo= -github.com/containerd/ttrpc v1.2.4/go.mod h1:ojvb8SJBSch0XkqNO0L0YX/5NxR3UnVk2LzFKBK0upc= +github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A= +github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw= +github.com/containerd/ttrpc v1.2.5 h1:IFckT1EFQoFBMG4c3sMdT8EP3/aKfumK1msY+Ze4oLU= +github.com/containerd/ttrpc v1.2.5/go.mod h1:YCXHsb32f+Sq5/72xHubdiJRQY9inL4a4ZQrAbN1q9o= github.com/containerd/typeurl v1.0.2 h1:Chlt8zIieDbzQFzXzAeBEF92KhExuE4p9p92/QmY7aY= github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= +github.com/containerd/typeurl/v2 v2.2.0 h1:6NBDbQzr7I5LHgp34xAXYF5DOTQDn05X58lsPEmzLso= +github.com/containerd/typeurl/v2 v2.2.0/go.mod h1:8XOOxnyatxSWuG8OfsZXVnAF4iZfedjS/8UHSPJnX4g= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= @@ -419,12 +428,12 @@ github.com/couchbase/gomemcached v0.1.3 h1:HIc5qMYNbuhB7zNaiEtj61DCYkquAwrQlf64q github.com/couchbase/gomemcached v0.1.3/go.mod h1:mxliKQxOv84gQ0bJWbI+w9Wxdpt9HjDvgW9MjCym5Vo= github.com/couchbase/goutils v0.1.0 h1:0WLlKJilu7IBm98T8nS9+J36lBFVLRUSIUtyD/uWpAE= github.com/couchbase/goutils v0.1.0/go.mod h1:BQwMFlJzDjFDG3DJUdU0KORxn88UlsOULuxLExMh3Hs= -github.com/cpuguy83/dockercfg v0.3.1 h1:/FpZ+JaygUR/lZP2NlFI2DVfrOEMAIKP5wWEJdoYe9E= -github.com/cpuguy83/dockercfg v0.3.1/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc= -github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/dockercfg v0.3.2 h1:DlJTyZGBDlXqUZ2Dk2Q3xHs/FtnooJJVaad2S9GKorA= +github.com/cpuguy83/dockercfg v0.3.2/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= -github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= +github.com/cyphar/filepath-securejoin v0.2.5 h1:6iR5tXJ/e6tJZzzdMc1km3Sa7RRIVBKAK32O2s7AYfo= +github.com/cyphar/filepath-securejoin v0.2.5/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/danieljoos/wincred v1.1.2 h1:QLdCxFs1/Yl4zduvBdcHB8goaYk9RARS2SgLLRuAyr0= github.com/danieljoos/wincred v1.1.2/go.mod h1:GijpziifJoIBfYh+S7BbkdUTU4LfM+QnGqR5Vl2tAx0= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -439,8 +448,8 @@ github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= github.com/devigned/tab v0.1.1 h1:3mD6Kb1mUOYeLpJvTVSDwSg5ZsfSxfvxGRTxRsJsITA= github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY= -github.com/digitalocean/godo v1.109.0 h1:4W97RJLJSUQ3veRZDNbp1Ol3Rbn6Lmt9bKGvfqYI5SU= -github.com/digitalocean/godo v1.109.0/go.mod h1:R6EmmWI8CT1+fCtjWY9UCB+L5uufuZH13wk3YhxycCs= +github.com/digitalocean/godo v1.126.0 h1:+Znh7VMQj/E8ArbjWnc7OKGjWfzC+I8OCSRp7r1MdD8= +github.com/digitalocean/godo v1.126.0/go.mod h1:PU8JB6I1XYkQIdHFop8lLAY9ojp6M0XcU0TWaQSxbrc= github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= @@ -449,8 +458,8 @@ github.com/djherbis/times v1.5.0 h1:79myA211VwPhFTqUk8xehWrsEO+zcIZj0zT8mXPVARU= github.com/djherbis/times v1.5.0/go.mod h1:5q7FDLvbNg1L/KaBmPcWlVR9NmoKo3+ucqUA3ijQhA0= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/docker v26.1.5+incompatible h1:NEAxTwEjxV6VbBMBoGG3zPqbiJosIApZjxlbrG9q3/g= -github.com/docker/docker v26.1.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v27.3.1+incompatible h1:KttF0XoteNTicmUtBO0L2tP+J7FGRFTjaEF4k6WdhfI= +github.com/docker/docker v27.3.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -466,16 +475,22 @@ github.com/dvsekhvalnov/jose2go v1.6.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB github.com/dynatrace-oss/dynatrace-metric-utils-go v0.3.0 h1:q2Ayh9s6Cr75bS5URiOUAoyFXemgKQaBJphbhAaJHCY= github.com/dynatrace-oss/dynatrace-metric-utils-go v0.3.0/go.mod h1:qw0E9EJ0PnSlhWawDNuqE0zhc1hqOBUCFIAj3dd9DNw= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-resiliency v1.6.0 h1:CqGDTLtpwuWKn6Nj3uNUdflaq+/kIPsg0gfNzHton30= -github.com/eapache/go-resiliency v1.6.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= +github.com/eapache/go-resiliency v1.7.0 h1:n3NRTnBn5N0Cbi/IeOHuQn9s2UwVUH7Ga0ZWcP+9JTA= +github.com/eapache/go-resiliency v1.7.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 h1:Oy0F4ALJ04o5Qqpdz8XLIpNA3WM/iSIXqxtqo7UGVws= github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3/go.mod h1:YvSRo5mw33fLEx1+DlK6L2VV43tJt5Eyel9n9XBcR+0= github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/ebitengine/purego v0.8.1 h1:sdRKd6plj7KYW33EH5As6YKfe8m9zbN9JMrOjNVF/BE= +github.com/ebitengine/purego v0.8.1/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ= github.com/eclipse/paho.mqtt.golang v1.3.5 h1:sWtmgNxYM9P2sP+xEItMozsR3w0cqZFlqnNN1bdl41Y= github.com/eclipse/paho.mqtt.golang v1.3.5/go.mod h1:eTzb4gxwwyWpqBUHGQZ4ABAV7+Jgm1PklsYT/eo8Hcc= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/elastic/go-grok v0.3.1 h1:WEhUxe2KrwycMnlvMimJXvzRa7DoByJB4PVUIE1ZD/U= +github.com/elastic/go-grok v0.3.1/go.mod h1:n38ls8ZgOboZRgKcjMY8eFeZFMmcL9n2lP0iHhIDk64= +github.com/elastic/lunes v0.1.0 h1:amRtLPjwkWtzDF/RKzcEPMvSsSseLDLW+bnhfNSLRe4= +github.com/elastic/lunes v0.1.0/go.mod h1:xGphYIt3XdZRtyWosHQTErsQTd4OP1p9wsbVoHelrd4= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= @@ -487,16 +502,14 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.12.0 h1:4X+VP1GHd1Mhj6IB5mMeGbLCleqxjletLK6K0rbxyZI= -github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= +github.com/envoyproxy/go-control-plane v0.13.0 h1:HzkeUz1Knt+3bK+8LG1bxOO/jzWZmdxpwC51i202les= +github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A= -github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM= +github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= github.com/euank/go-kmsg-parser v2.0.0+incompatible h1:cHD53+PLQuuQyLZeriD1V/esuG4MuU0Pjs5y6iknohY= github.com/euank/go-kmsg-parser v2.0.0+incompatible/go.mod h1:MhmAMZ8V4CYH4ybgdRwPr2TU5ThnS43puaKEMpja1uw= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= -github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/expr-lang/expr v1.16.9 h1:WUAzmR0JNI9JCiF0/ewwHB1gmcGw5wW7nWt8gc6PpCI= github.com/expr-lang/expr v1.16.9/go.mod h1:8/vRC7+7HBzESEqt5kKpYXxrxkr31SaO8r40VO/1IT4= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -523,8 +536,10 @@ github.com/frankban/quicktest v1.14.5 h1:dfYrrRyLtiqT9GyKXgdh+k4inNeTvmGbuSgZ3lx github.com/frankban/quicktest v1.14.5/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= +github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0= github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -590,8 +605,8 @@ github.com/go-playground/validator/v10 v10.20.0 h1:K9ISHbSaI0lyB2eWMPJo+kOS/FBEx github.com/go-playground/validator/v10 v10.20.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= github.com/go-redis/redis v6.15.9+incompatible h1:K0pv1D7EQUjfyoMql+r/jZqCLizCGKFlFgcHWWmHQjg= github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= -github.com/go-resty/resty/v2 v2.12.0 h1:rsVL8P90LFvkUYq/V5BTVe203WfRIU4gvcf+yfzJzGA= -github.com/go-resty/resty/v2 v2.12.0/go.mod h1:o0yGPrkS3lOe1+eFajk6kBW8ScXzwU3hD69/gt2yB/0= +github.com/go-resty/resty/v2 v2.13.1 h1:x+LHXBI2nMB1vqndymf26quycC4aggYJ7DECYbiz03g= +github.com/go-resty/resty/v2 v2.13.1/go.mod h1:GznXlLxkq6Nh4sU59rPmUw3VtgpO3aS96ORAI6Q7d+0= github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= @@ -599,18 +614,19 @@ github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw= github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31 h1:28FVBuwkwowZMjbA7M0wXsI6t3PYulRTMio3SO+eKCM= github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 h1:TQcrn6Wq+sKGkpyPvppOz99zsMBaUOKXq6HSv655U1c= -github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss= +github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/go-zookeeper/zk v1.0.2/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= -github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg= -github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= +github.com/go-zookeeper/zk v1.0.4 h1:DPzxraQx7OrPyXq2phlGlNSIyWEsAox0RJmjTseMV6I= +github.com/go-zookeeper/zk v1.0.4/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= -github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/goccy/go-json v0.10.4 h1:JSwxQzIqKfmFX1swYPpUThQZp/Ka4wzJdK0LWVytLPM= +github.com/goccy/go-json v0.10.4/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0= github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= @@ -712,31 +728,31 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7 h1:y3N7Bm7Y9/CtpiVkw/ZWj6lSlDF3F74SfKwfTCer72Q= -github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= +github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da h1:xRmpO92tb8y+Z85iUOMOicpCfaYcv7o3Cg3wKrIpg8g= +github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= -github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= +github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM= +github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= -github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= +github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw= +github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.12.2 h1:mhN09QQW1jEWeMF74zGR81R30z4VJzjZsfkUhuHF+DA= -github.com/googleapis/gax-go/v2 v2.12.2/go.mod h1:61M8vcyyXR2kqKFxKrfA22jaA8JGF7Dc8App1U3H6jc= +github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s= +github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/gopcua/opcua v0.3.1 h1:BS1TRJUdsPSwU0mlfc8Dffchh0jTw9lWchmF4HFRo2w= github.com/gopcua/opcua v0.3.1/go.mod h1:rdqS1oF5s/+Ko4SnhZA+3tgK4MQuXDzH3KgnnLDaCCQ= -github.com/gophercloud/gophercloud v1.8.0 h1:TM3Jawprb2NrdOnvcHhWJalmKmAmOGgfZElM/3oBYCk= -github.com/gophercloud/gophercloud v1.8.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= +github.com/gophercloud/gophercloud v1.14.1 h1:DTCNaTVGl8/cFu58O1JwWgis9gtISAFONqpMKNg/Vpw= +github.com/gophercloud/gophercloud v1.14.1/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gordonklaus/ineffassign v0.0.0-20200309095847-7953dde2c7bf/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= -github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= +github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE= +github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= @@ -747,19 +763,19 @@ github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWm github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gosnmp/gosnmp v1.34.0 h1:p96iiNTTdL4ZYspPC3leSKXiHfE1NiIYffMu9100p5E= github.com/gosnmp/gosnmp v1.34.0/go.mod h1:QWTRprXN9haHFof3P96XTDYc46boCGAh5IXp0DniEx4= -github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww= -github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= +github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248= +github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grid-x/modbus v0.0.0-20211113184042-7f2251c342c9 h1:Q7e9kXS3sRbTjsNDKazbcbDSGAKjFdk096M3qYbwNpE= github.com/grid-x/modbus v0.0.0-20211113184042-7f2251c342c9/go.mod h1:qVX2WhsI5xyAoM6I/MV1bXSKBPdLAjp7pCvieO/S0AY= github.com/grid-x/serial v0.0.0-20211107191517-583c7356b3aa h1:Rsn6ARgNkXrsXJIzhkE4vQr5Gbx2LvtEMv4BJOK4LyU= github.com/grid-x/serial v0.0.0-20211107191517-583c7356b3aa/go.mod h1:kdOd86/VGFWRrtkNwf1MPk0u1gIjc4Y7R2j7nhwc7Rk= -github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= -github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 h1:pRhl55Yx1eC7BZ1N+BBWwnKaMyD8uC+34TLdndZMAKk= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0/go.mod h1:XKMd7iuf/RGPSMJ/U4HP0zS2Z9Fh8Ps9a+6X26m/tmI= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 h1:ad0vkEBuk23VJzZR9nkLVG0YAoN9coASF1GusYX6AlU= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0/go.mod h1:igFoXX2ELCW06bol23DWPB5BEWfZISOzSP5K2sbLea0= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= github.com/gwos/tcg/sdk v0.0.0-20211223101342-35fbd1ae683c h1:befb5xGUwNCoBuN/akLFCKekUzr0ixyws3aAX/7TaOk= @@ -770,10 +786,8 @@ github.com/harlow/kinesis-consumer v0.3.6-0.20211204214318-c2b9f79d7ab6 h1:38nI+ github.com/harlow/kinesis-consumer v0.3.6-0.20211204214318-c2b9f79d7ab6/go.mod h1:hNEr2hL0WPpm4BSILcClbOE/+Tew0JJnqCbTlc6jCUc= github.com/hashicorp/consul/api v1.10.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= github.com/hashicorp/consul/api v1.13.0/go.mod h1:ZlVrynguJKcYr54zGaDbaL3fOvKC9m72FhPvA8T35KQ= -github.com/hashicorp/consul/api v1.29.1 h1:UEwOjYJrd3lG1x5w7HxDRMGiAUPrb3f103EoeKuuEcc= -github.com/hashicorp/consul/api v1.29.1/go.mod h1:lumfRkY/coLuqMICkI7Fh3ylMG31mQSRZyef2c5YvJI= -github.com/hashicorp/consul/proto-public v0.6.1 h1:+uzH3olCrksXYWAYHKqK782CtK9scfqH+Unlw3UHhCg= -github.com/hashicorp/consul/proto-public v0.6.1/go.mod h1:cXXbOg74KBNGajC+o8RlA502Esf0R9prcoJgiOX/2Tg= +github.com/hashicorp/consul/api v1.30.0 h1:ArHVMMILb1nQv8vZSGIwwQd2gtc+oSQZ6CalyiyH2XQ= +github.com/hashicorp/consul/api v1.30.0/go.mod h1:B2uGchvaXVW2JhFoS8nqTxMD5PBykr4ebY4JWHTTeLM= github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg= github.com/hashicorp/consul/sdk v0.16.1/go.mod h1:fSXvwxB2hmh1FMZCNl6PwX0Q/1wdWtHJcZ7Ea5tns0s= @@ -838,8 +852,8 @@ github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOn github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM= github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0= -github.com/hashicorp/nomad/api v0.0.0-20240306004928-3e7191ccb702 h1:fI1LXuBaS1d9z1kmb++Og6YD8uMRwadXorCwE+xgOFA= -github.com/hashicorp/nomad/api v0.0.0-20240306004928-3e7191ccb702/go.mod h1:z71gkJdrkAt/Rl6C7Q79VE7AwJ5lUF+M+fzFTyIHYB0= +github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3 h1:fgVfQ4AC1avVOnu2cfms8VAiD8lUq3vWI8mTocOXN/w= +github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE= github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= @@ -848,8 +862,8 @@ github.com/hashicorp/vault/api v1.0.4/go.mod h1:gDcqh3WGcR1cpF5AJz/B1UFheUEneMoI github.com/hashicorp/vault/sdk v0.1.13/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/hetznercloud/hcloud-go/v2 v2.6.0 h1:RJOA2hHZ7rD1pScA4O1NF6qhkHyUdbbxjHgFNot8928= -github.com/hetznercloud/hcloud-go/v2 v2.6.0/go.mod h1:4J1cSE57+g0WS93IiHLV7ubTHItcp+awzeBp5bM9mfA= +github.com/hetznercloud/hcloud-go/v2 v2.13.1 h1:jq0GP4QaYE5d8xR/Zw17s9qoaESRJMXfGmtD1a/qckQ= +github.com/hetznercloud/hcloud-go/v2 v2.13.1/go.mod h1:dhix40Br3fDiBhwaSG/zgaYOFFddpfBm/6R1Zz0IiF0= github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= github.com/hjson/hjson-go/v4 v4.0.0/go.mod h1:KaYt3bTw3zhBjYqnXkYywcYctk0A2nxeEFTse3rH13E= @@ -887,8 +901,8 @@ github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8 h1:W2IgzRCb0L9VzMu github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8/go.mod h1:/2NMgWB1DHM1ti/gqhOlg+LJeBVk6FqR5aVGYY0hlwI= github.com/intel/iaevents v1.0.0 h1:J8lETV13FMImV0VbOrKhkA790z7+cAHQ/28gbiefu7E= github.com/intel/iaevents v1.0.0/go.mod h1:nFsAQmrbF6MoZUomrSl4jgmHhe0SrLxTGtyqvqU2X9Y= -github.com/ionos-cloud/sdk-go/v6 v6.1.11 h1:J/uRN4UWO3wCyGOeDdMKv8LWRzKu6UIkLEaes38Kzh8= -github.com/ionos-cloud/sdk-go/v6 v6.1.11/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= +github.com/ionos-cloud/sdk-go/v6 v6.2.1 h1:mxxN+frNVmbFrmmFfXnBC3g2USYJrl6mc1LW2iNYbFY= +github.com/ionos-cloud/sdk-go/v6 v6.2.1/go.mod h1:SXrO9OGyWjd2rZhAhEpdYN6VUAODzzqRdqA9BCviQtI= github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= github.com/jackc/pgconn v1.14.3 h1:bVoTr12EGANZz66nZPkMInAV/KHD2TxH9npjXXgiB3w= @@ -905,8 +919,8 @@ github.com/jackc/pgtype v1.14.0 h1:y+xUdabmyMkJLyApYuPj38mW+aAIqCe5uuBB51rH3Vw= github.com/jackc/pgtype v1.14.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= github.com/jackc/pgx/v4 v4.18.3 h1:dE2/TrEsGX3RBprb3qryqSV9Y60iZN1C6i8IrmW9/BA= github.com/jackc/pgx/v4 v4.18.3/go.mod h1:Ey4Oru5tH5sB6tV7hDmfWFahwF15Eb7DNXlRKx2CkVw= -github.com/jaegertracing/jaeger v1.58.0 h1:aslb9VilVaddzHUA618PUtAaO3GblA7hlyItfwtzAe0= -github.com/jaegertracing/jaeger v1.58.0/go.mod h1:2qpJpm9BzpbxNpaillaCA4pvdAIRTJT0ZRxrzMglBlo= +github.com/jaegertracing/jaeger v1.62.0 h1:YoaJ2e8oVz5sqGGlVAKSUCED8DzJ1q7PojBmZFNKoJA= +github.com/jaegertracing/jaeger v1.62.0/go.mod h1:jhEIHazwyb+a6xlRBi+p96BAvTYTSmGkghcwdQfV7FM= github.com/james4k/rcon v0.0.0-20120923215419-8fbb8268b60a h1:JxcWget6X/VfBMKxPIc28Jel37LGREut2fpV+ObkwJ0= github.com/james4k/rcon v0.0.0-20120923215419-8fbb8268b60a/go.mod h1:1qNVsDcmNQDsAXYfUuF/Z0rtK5eT8x9D6Pi7S3PjXAg= github.com/jarcoal/httpmock v1.3.1 h1:iUx3whfZWVf3jT01hQTO/Eo5sAYtB2/rqaUuOtpInww= @@ -923,8 +937,8 @@ github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh6 github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs= github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= -github.com/jellydator/ttlcache/v3 v3.2.0 h1:6lqVJ8X3ZaUwvzENqPAobDsXNExfUJd61u++uW8a3LE= -github.com/jellydator/ttlcache/v3 v3.2.0/go.mod h1:hi7MGFdMAwZna5n2tuvh63DvFLzVKySzCVW6+0gA2n4= +github.com/jellydator/ttlcache/v3 v3.3.0 h1:BdoC9cE81qXfrxeb9eoJi9dWrdhSuwXMAnHTbnBm4Wc= +github.com/jellydator/ttlcache/v3 v3.3.0/go.mod h1:bj2/e0l4jRnQdrnSTaGTsh4GSXvMjQcy41i7th0GVGw= github.com/jhump/protoreflect v1.8.3-0.20210616212123-6cc1efa697ca h1:a0GZUdb+qnutF8shJxr2qs2qT3fnF+ptxTxPB8+oIvk= github.com/jhump/protoreflect v1.8.3-0.20210616212123-6cc1efa697ca/go.mod h1:7GcYQDdMU/O/BBrl/cX6PNHpXh6cenjd8pneu5yW7Tg= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= @@ -932,6 +946,8 @@ github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHW github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4= +github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= @@ -959,15 +975,15 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/klauspost/asmfmt v1.3.2 h1:4Ri7ox3EwapiOjCki+hw14RyKk201CN4rzyCJRFLpK4= github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= github.com/klauspost/compress v1.14.4/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= -github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM= github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/knadh/koanf v1.5.0 h1:q2TSd/3Pyc/5yP9ldIrSdIz26MCcyNQzW0pEAugLPNs= github.com/knadh/koanf v1.5.0/go.mod h1:Hgyjp4y8v44hpZtPzs7JZfRAW5AhN7KfZcwv1RYggDs= -github.com/knadh/koanf/v2 v2.1.1 h1:/R8eXqasSTsmDCsAyYj+81Wteg8AqrV9CP6gvsTsOmM= -github.com/knadh/koanf/v2 v2.1.1/go.mod h1:4mnTRbZCK+ALuBXHZMjDfG9y714L7TykVnZkXbMU3Es= +github.com/knadh/koanf/v2 v2.1.2 h1:I2rtLRqXRy1p01m/utEtpZSSA6dcJbgGVuE27kW2PzQ= +github.com/knadh/koanf/v2 v2.1.2/go.mod h1:Gphfaen0q1Fc1HTgJgSTC4oRX9R2R5ErYMZJy8fLJBo= github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= @@ -988,19 +1004,21 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/leesper/go_rng v0.0.0-20190531154944-a612b043e353 h1:X/79QL0b4YJVO5+OsPH9rF2u428CIrGL/jLmPsoOQQ4= github.com/leesper/go_rng v0.0.0-20190531154944-a612b043e353/go.mod h1:N0SVk0uhy+E1PZ3C9ctsPRlvOPAFPkCNlcPBDkt0N3U= -github.com/leodido/go-syslog/v4 v4.1.0 h1:Wsl194qyWXr7V6DrGWC3xmxA9Ra6XgWO+toNt2fmCaI= -github.com/leodido/go-syslog/v4 v4.1.0/go.mod h1:eJ8rUfDN5OS6dOkCOBYlg2a+hbAg6pJa99QXXgMrd98= +github.com/leodido/go-syslog/v4 v4.2.0 h1:A7vpbYxsO4e2E8udaurkLlxP5LDpDbmPMsGnuhb7jVk= +github.com/leodido/go-syslog/v4 v4.2.0/go.mod h1:eJ8rUfDN5OS6dOkCOBYlg2a+hbAg6pJa99QXXgMrd98= github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= github.com/leodido/ragel-machinery v0.0.0-20190525184631-5f46317e436b h1:11UHH39z1RhZ5dc4y4r/4koJo6IYFgTRMe/LlwRTEw0= github.com/leodido/ragel-machinery v0.0.0-20190525184631-5f46317e436b/go.mod h1:WZxr2/6a/Ar9bMDc2rN/LJrE/hF6bXE4LPyDSIxwAfg= github.com/lightstep/go-expohisto v1.0.0 h1:UPtTS1rGdtehbbAF7o/dhkWLTDI73UifG8LbfQI7cA4= github.com/lightstep/go-expohisto v1.0.0/go.mod h1:xDXD0++Mu2FOaItXtdDfksfgxfV0z1TMPa+e/EUd0cs= -github.com/linode/linodego v1.33.0 h1:cX2FYry7r6CA1ujBMsdqiM4VhvIQtnWsOuVblzfBhCw= -github.com/linode/linodego v1.33.0/go.mod h1:dSJJgIwqZCF5wnpuC6w5cyIbRtcexAm7uVvuJopGB40= +github.com/linode/linodego v1.41.0 h1:GcP7JIBr9iLRJ9FwAtb9/WCT1DuPJS/xUApapfdjtiY= +github.com/linode/linodego v1.41.0/go.mod h1:Ow4/XZ0yvWBzt3iAHwchvhSx30AyLintsSMvvQ2/SJY= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMnOEbMWQtSEUgU64U4s+GHk7hZK+jtY= github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= +github.com/magefile/mage v1.15.0 h1:BvGheCMAsG3bWUDbZ8AyXXpCNwU9u5CB6sM+HNb9HYg= +github.com/magefile/mage v1.15.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -1040,8 +1058,8 @@ github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3N github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= -github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4= -github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY= +github.com/miekg/dns v1.1.62 h1:cN8OuEF1/x5Rq6Np+h1epln8OiyPWV+lROx9LxcGgIQ= +github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ= github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 h1:AMFGa4R4MiIpspGNG7Z948v4n35fFGB3RR3G/ry4FWs= github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3 h1:+n/aFZefKZp7spd8DFdX7uMikMLXX4oubIzJF4kv/wI= @@ -1085,6 +1103,8 @@ github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5 github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= github.com/moby/sys/user v0.3.0 h1:9ni5DlcW5an3SvRSx4MouotOygvzaXbaSrc/wGDFWPo= github.com/moby/sys/user v0.3.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= +github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g= +github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -1152,109 +1172,111 @@ github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+ github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY= -github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM= +github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA= +github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= -github.com/onsi/gomega v1.31.0 h1:54UJxxj6cPInHS3a35wm6BK/F9nHYueZ1NVujHDrnXE= -github.com/onsi/gomega v1.31.0/go.mod h1:DW9aCi7U6Yi40wNVAvT6kzFnEVEI5n3DloYBiKiT6zk= +github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= +github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.103.0 h1:N4+Kxr4WZ4HNuU334NaqAAjngG/IRkSTGCl9c5H+QY0= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.103.0/go.mod h1:3rtBpjlTpg3s+bXPNM/7o7IQZQYtwytrz9PEF+ISz8E= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.103.0 h1:blcAZWoZ9vqDvr1pT/Q5RfYYNOcOd71oaKFI2m4P4Hc= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.103.0/go.mod h1:FXy3BBa2RwAVHa8M5pTX6vFLouFAh3Ly9bAvnTAp0nk= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.103.0 h1:TV8mINcQEHPk0J2Kd7XtfAfRliG+3s6ZMh7y2Lcqpz8= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.103.0/go.mod h1:sH1bocIM8yLuCVVla8O5ks4zP4Dkqc5opUVAGHlYTRg= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.103.0 h1:lPWHqPCvbxKgUTt3S3+iOyOmXmVbyAGnTsxwaMsrZ/o= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.103.0/go.mod h1:/z9fz5nO64sd85uaT4HGkGjvDjXXVIBM6pd6Tu2z3yY= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.103.0 h1:gVV1vTJwKddA4hTjtgCbrEzk2kqwzf06jpNhDx6LM1M= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.103.0/go.mod h1:bTvEM8TGaBfTKk9bCBOwmLgIU6yn/eaYhF6aMxFABBw= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/sigv4authextension v0.103.0 h1:IxdgFhb+aksBh6C/o43Xg+avMKFh7l8lD9bTnt7Sl3M= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/sigv4authextension v0.103.0/go.mod h1:z0RN7piB4aQju5UCqqU0h5Xm73K5fwCDrNfpohEaR7U= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage v0.103.0 h1:txNSkgEEmDyrasuGO9egQ+58i+7P/mJKdHmSHg0sO4c= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage v0.103.0/go.mod h1:XtRqfouM/9owDyCIi2TksmSc1sfLGmLRiFhyQ7KFS6w= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage v0.103.0 h1:iEreRkHc4UI6cKeTMqFKhCusa7q8BnmJcHFNmJFaBwk= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage v0.103.0/go.mod h1:piJxxco+5cKjsAEWv5hh9YLK4FZbajXXNW5hTxptEL4= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.103.0 h1:hdSxvwF/v3XJMWvYGlBrhewIW5zsbNLmwHeEJdTBJuU= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.103.0/go.mod h1:oGXp5qTdFmHgnm+J8v9wyjRAozZg11M8puiOZa0d+W8= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/metrics v0.103.0 h1:zsfP29smagu0yh4nan/xq2WKYOegjNvSrbj6mTErnaA= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/metrics v0.103.0/go.mod h1:8IP4A6OLAjEqKAl6zuwzFSUI+iH8eCkpkV2aRyd+guk= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.103.0 h1:olbjLOuCgVCiRNzdg0I9Lv7VuUHFbEuqP31fdriyqi0= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.103.0/go.mod h1:2Zksmb1fF+4ksNsOxSVrHz5IoC/2ljvtqJGzy7qe8+E= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.103.0 h1:HIhw3poxC0r5NyOOvO5m+yCnXWBaOvRfkq3XGs+Ag58= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.103.0/go.mod h1:5Y5wUGq+NNIgvkj3qNSsS3QJx3KiU4DfUFbMrlMlPsQ= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8stest v0.103.0 h1:vSHoHLveqT9NULaIdIbfPgEvJ0e+UkBitb3dnawmunc= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8stest v0.103.0/go.mod h1:hCzgpzXbUUDUlETMDKYOpPaOhPjR9T6M3W3nAW5cGX4= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka v0.103.0 h1:gksMpwy9oZN14E4kRBfLtVkE8YJn++7woM1b3hQboJg= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka v0.103.0/go.mod h1:OLpJzn4LgzICAnyu+7mJ33rM2WOgPWpjXvZ6YKrJ6yM= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/pdatautil v0.103.0 h1:5PHQ2a5VsqXDZBmso84fLDzT66GV0mqDT5JCB07W8Ws= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/pdatautil v0.103.0/go.mod h1:oU1oils/dQVq3E3h7e3afFEXfCSmwniEK/NlEByaJWY= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.103.0 h1:JjHxUTE7cbrqddMLueLhVcWbxxr9INLf5sIsRBcuPUE= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.103.0/go.mod h1:DQUw8EmCHCqTIBfHo5Fe7MyYLXSWdInDrKkINhOMuPk= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.103.0 h1:GMx3/P287NuWOJH22eLCtaJ1ATW8BoOZL6GjINUAVYI= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.103.0/go.mod h1:xGM9aD/2Dww/WKg5ei7a43TRSWuVwgMO9NPjPbgtmT8= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.103.0 h1:angDCU0CPBLu2fAi7r7yCEWxRKthhNnYnMugdiULmvw= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.103.0/go.mod h1:N80rYgsjtq+G/DJQVCLCkxmpCIA/mnQTRUJxa8mMNsw= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.103.0 h1:XYkQCe4UPFy7qmBs1zhknhe1wzPnAff4Rb4WtnD8aAQ= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.103.0/go.mod h1:hmeKPJaZjzOjcndDxpWnjt0781EMqvj3or01baNVoRI= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.103.0 h1:1cZyMMLSpSWFdfITyVc9Bb+8rn175/GGwtWZQ3nClpc= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.103.0/go.mod h1:o8BPP4DM2SkdkPJxJOdmgxKz5DftGcuyUXgqf5MoWAw= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.103.0 h1:L1DCWusakqlxHC/5yfAfq4c5og1kFdJKV0jcw7FDdoE= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.103.0/go.mod h1:Ok6bUxGfoGMUBvO0Lwgp3xsHHKqe0TMq4juIl7X3Oxo= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure v0.103.0 h1:GiP0syg12+MrI5IpL8Qt+rQktWDMsP0/8Nu9qmMtscw= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure v0.103.0/go.mod h1:akYmkj+fLA32/LkQTJM9KIqaOvtsyjLV1NZkFU1E6w8= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.103.0 h1:diJ27fBrfu/oOW0bv8q3BWbVmjNLMBJF2RgzSq6KRgI= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.103.0/go.mod h1:u8Ell52AGRzNA5RjfaunzYQWBb+7tKb3ZlAQJyQzNXM= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.103.0 h1:MquBpfLZRaGnQeeIF3Kn+bQNXRLZtKTWGEysbNnxcRo= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.103.0/go.mod h1:+3rCl7Z3Zm4ZZOyX9lMmaAoN8NeeCXUOmR/tysR631g= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.103.0 h1:2Fxuu4RbroXQFpUB/ZuZXyUb4QoL8XgOe7KlkK3TxHU= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.103.0/go.mod h1:o3ju9lTtG+PnSjffls/sEGvgfYMG1jZZeT1h0rzI+T4= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.103.0 h1:c5sOZWBMD/gyKxqwGgS6cwLdvkg6W8I3/0RZOm84xA0= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.103.0/go.mod h1:2CG+06d3sh+O36maAs2TPAbNRXZ0zcqY2kH5pEABrzg= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.103.0 h1:xerHvPmP765eM4U5eaSvfRpXiV3ioCJjLR5ydi6BgVs= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.103.0/go.mod h1:5yUxKVs+fx/MtDkGKlJWOTnOZIgOYdE+Zpya3mAdaQw= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatorateprocessor v0.103.0 h1:ZFyDkChccGaT717u7CnMaMOPERr0d2tlJvOrSlDGNbM= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatorateprocessor v0.103.0/go.mod h1:cElQF63Syrv/yugPAe5ho/BofKJoy0YSLuH8JYEQEvc= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.103.0 h1:u2ZcBCuOXpD7FIm+LZrhEMhi/Ny/mhivAfuCUpPzrUg= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.103.0/go.mod h1:9n3OvWN10XXyg+o71jeuO94u8mEWOI90Vg8y3M7zpE0= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbytraceprocessor v0.103.0 h1:Y8sviHpNWaWnCZe1d/1myw5hvFnfoa+PFQCyNvGt7Sc= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbytraceprocessor v0.103.0/go.mod h1:fcpYaH7c0/yT5dg2ACJdqv7w/sT/MB9azoB9kApYzJg= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.103.0 h1:v19lfgrsflmimm0LGOZoB51eNO0305Yud6LCnhS7LPY= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.103.0/go.mod h1:Z8OvUd7UJJpyXT2O5ma8QT42NfkP/7Q0l1oxStJ/5MY= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricsgenerationprocessor v0.103.0 h1:nGxoV0yVFNJOS7jGRAdkSHEUBONHp7Sd8kbP/WP64go= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricsgenerationprocessor v0.103.0/go.mod h1:T1+fAkPFvDPsOxmyrNSm8t5yAsvM6S1k3Fh1N2WiWas= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor v0.103.0 h1:2IanPrPrTN3YYbxGSiF8axwE0+C+bJ9rKO2hvuG9uf8= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor v0.103.0/go.mod h1:AH3r6xo/87nV1QNMDNamuFYIMwEfSLma06KdjEzxRdU= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.103.0 h1:70OJ27PhAZ7R846ppiimDFqIzOzdwjyG8+lT/+yzWIE= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.103.0/go.mod h1:3WvuN4AKL9K70la+X2y0d+kKLrydOfDp5y1WgUyZSgE= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.103.0 h1:Gg2j3yN2iCQYwK+464RyVvmX+QyjjdXL3JpG+6BfDAQ= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.103.0/go.mod h1:4nxV+BIA4LLyjhFeuUuLlY7iza+PyySjKz4PfL44SRQ= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanprocessor v0.103.0 h1:DPhJng0ihuSPKxfBxU5eb1Af4Poc8GhrLTWlM+VA8w8= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanprocessor v0.103.0/go.mod h1:26cGSuPjOkGmHT9n54eHLRwKl9vU/Ave0UlWo13Pqfs= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.103.0 h1:5VFkBkdrGkxBmIL1bpjVHpReGD9Piu1LdH9oJagSnLU= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.103.0/go.mod h1:rSWs+3K3m8AnBBOpl8mu8tY+8ieey/93ZPLiYIAiy5k= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.103.0 h1:+JRtnmcLjvG0BESXWCQ9PECtTisWtnIT1/DutAdr38U= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.103.0/go.mod h1:0mYHrHaJZiWfEcYxJCMTs7AlXmnIsC3cDNbSq5e255Q= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsecscontainermetricsreceiver v0.103.0 h1:InzQu+unKUDU/vMkDogPiyN4U/ihdqwxk+5hH0Pwy+M= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsecscontainermetricsreceiver v0.103.0/go.mod h1:lIiRC8VVNxW/ibQcu0UnrV4fg6DWUjoaL2M2PWYhEng= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver v0.103.0 h1:QUsI2cTLs4CLk/TBll1llkzzchmqr2TRANdgMy0Nz/k= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver v0.103.0/go.mod h1:cdJpaBiLfV16LteiKj8iP7snJ6alJy1dVa7FTcC915c= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.103.0 h1:Hblr03Vde7jlnAsLSayhq1VG+gpTfVJNVvZqnx0fwDY= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.103.0/go.mod h1:kY7NC2dfScT9zmss53yuhZNkv7XQGl46yXxTr+xEYPo= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.103.0 h1:/BabP7nnV4cyI1JZNQn5zzCrJymzPpCjCMnCXex0/kU= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.103.0/go.mod h1:TGJchv8xra/30Kz8oaOgvF1XyARtOSYqWowAJ5NxQyI= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver v0.103.0 h1:OQKqCw+kH0uBF4bT+eRwciTKdFcOsa/5zWZr9qHgzkI= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver v0.103.0/go.mod h1:Abr+XCGgD/kZLgA50UBg0vzntqFSeCAzfjtANH02018= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/tcplogreceiver v0.103.0 h1:cFvf9IuS10isVLVtQWcXdMVe51btEL1SIHA+RYok5yw= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/tcplogreceiver v0.103.0/go.mod h1:uU1jJXA3/AIxn+yuBIJ+X8RI2wAp3cH25wiRSYY45cM= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/udplogreceiver v0.103.0 h1:aMe9gL0XnYcrCp83Yt/87vQc+GqSdLOfS+B2AZUcYQo= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/udplogreceiver v0.103.0/go.mod h1:XZRd7kTx2Yc+4bfvGjiLwYiYluFrMPRDiqgsCCoPOZ8= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.103.0 h1:AvfCPJtrzfAmoF58vTA3pehns51NUEOmulPxrYALroY= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.103.0/go.mod h1:WzOea7GxCLC23+HBXHoYwahGC+YJsu066pV4MzUL0fg= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.115.0 h1:ERZZn6Z3/nQVyzIWCSfGSH8im5v+NS3eOjc+F24ljvs= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.115.0/go.mod h1:PetcN/RbgdGEXdKy543/rwr/Vw1grrWJW1Fnf9szf6U= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.115.0 h1:u7Ht+E1ghQESffcjyaxWrXGsfSWa1VE9LKC4f2PPx84= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.115.0/go.mod h1:r3iS2mDYu+cnGjgNc8TgvuUUAN6A6/1BvR1e1YJBrqM= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.115.0 h1:51D/x3xIAnWgVrY0lgdU+b+yb2aWd72uDqu9GhjRcNI= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.115.0/go.mod h1:nLau1YUdjhtLrk4jXLPb2l9riQ1Ap4xytTLl7MBedBg= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.115.0 h1:BtYrSkQSYGJufsmbqqrpzb+BJXH2S4CKL14i1bxOFCU= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.115.0/go.mod h1:4LQ1S3eBu+MyCNaCkBk0hIoAhvJJS851i/tY45FtDf4= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.115.0 h1:HVGG31WeB6Fn2+il2/ycWj9tDP0fxOeOqD1rKCjsBSc= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.115.0/go.mod h1:2hYojHs5daPVWECuZsPViKwty0ojuHUEmk8GEuaFqO0= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/sigv4authextension v0.115.0 h1:pZcKgTxza29XcYkfT4fwIRhg+ASz7kY64R3CVCohqIQ= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/sigv4authextension v0.115.0/go.mod h1:tAZJhSdNKRnR4kVXnoVc3IlOl2RuE8tK+uv60ez5Q1E= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage v0.115.0 h1:4Ycg73pYVdiF+oq+BmUq7Dkg0WKeKvBSk9AOKvBe4LU= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage v0.115.0/go.mod h1:l2Q+MmYk2ZRDSbhX9GlJYvBXC51AqhDJAj2ne290Xik= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage v0.115.0 h1:Jh3XgGs4YBz0zCj6HU49gspyAjJUHf5DVCQTyw69FDw= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage v0.115.0/go.mod h1:biiJzDxPevfbrnGaTZOU2I0f1zT3DWUGkpXdH/+uQ8k= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.115.0 h1:SF3gOOEkfntE3zEhY80yO7BVQ5CkaK8ecic2U2AZPHE= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.115.0/go.mod h1:jeBzX5m8O9X0LQxiryV9sJUIrn+QAwOnCBE2wZWIltQ= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/metrics v0.115.0 h1:FjVyTJm0UQBMhWeuYOmK3BJtf93AbDKRScKGZ9Fk9ek= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/metrics v0.115.0/go.mod h1:G56rS4nL0VypkD7a94UaQmIjO5t0kffVcjbhpvSogww= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.115.0 h1:vRQQFD4YpasQFUAdF030UWtaflSYFXK542bfWMGhOK0= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.115.0/go.mod h1:BZ7DT+0VkKR7P3I9PGEDfVa0GdB0ty41eEcejIUXF9A= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.115.0 h1:h6zEsBtuZalQu7lKYf6ZCcj8fTocT+zxdmuOou9515Q= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.115.0/go.mod h1:6QU/K0dGCGYorkOvJmhbDFCspy4RPxRkFjf9I64y6I0= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8stest v0.115.0 h1:vXDJE8YHfAoYIAlPRtODchlqb6lWnGhJxPaT2ljvN7I= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8stest v0.115.0/go.mod h1:f3IgMFHIjEUEI/I+5e3KWMPq9h2PSMy9WovmvPdmlb0= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka v0.115.0 h1:yXfwbXOAOEjaICnOb2raWD17jy9fwTXNoblS+3dBYZc= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka v0.115.0/go.mod h1:5kMXT8uFeGzsIIocU1PjfzYwakbCbQquiferXmkw38g= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/pdatautil v0.115.0 h1:8A+iBT5G23zvBPqYx32Qh4800jHFo4X9T1fpQKVQ+4E= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/pdatautil v0.115.0/go.mod h1:AhdPvwYKu7G8LKRWzHTNQYBq27RinsMm5qSanwSA/rU= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.115.0 h1:5mo0RdP0Hs3/QLoTwf9ilHc8x0CsfdQzGzMoGdDxiSQ= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.115.0/go.mod h1:iYgX5DFrPGvWSmKy4jOUanlsS3Rf/pV5vdX0jNFqs4s= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.115.0 h1:Z9p78zj9Qblw472mGkPieuX7mqduAp47rzMbFfq5evI= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.115.0/go.mod h1:mtxUxJEIQy27MaGR1yzcn/OK8NoddEgb7fumpEbKYss= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/kafka/topic v0.115.0 h1:7wjWEkvtP1oU2TNZzjo+5sY3GRvwhMhOJ9zFAeGGgo8= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/kafka/topic v0.115.0/go.mod h1:1q/L2R/28emNCz0EHfxEw853I6lPxTcHTqS+UrMea0k= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.115.0 h1:qdZ9EqmdM19pWhPoFA7VivBTdzP2HvNwXa3CCMHYoDQ= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.115.0/go.mod h1:mrL1MNrcg0zYAJ+aK9WtOH062dl2wN9DDG7mZk9H8v4= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.115.0 h1:MerLKMrkM4YoGF6Di0D9yMXO02yCX8mrZAi/+jJVVeI= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.115.0/go.mod h1:R8AkVWe9G5Q0oMOapvm9HNS076E3Min8SVlmhBL3QD0= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.115.0 h1:WEqcnWSy9dNSlGb8pYRBX7zhaz2ReyaeImlenbzNTB4= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.115.0/go.mod h1:6Mk71CakHUA3I6oM9hARDiyQypYyOolvb+4PFYyVEFg= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.115.0 h1:R9MRrO+dSkAHBQLZjuwjv2RHXHQqF2Wtm1Ki0VKD5cs= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.115.0/go.mod h1:rKXLXmwdUVcUHwTilroKSejbg3KSwLeYzNPSpkIEnv4= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure v0.115.0 h1:PmYiiOKopMdXl/WA0SxBEZFTiJVBuIpZrmRi4xvDzHI= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure v0.115.0/go.mod h1:qBPBqhO1kCBCCu05bTbWCxAUNDqzPn6oALLIrQyHmZE= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.115.0 h1:rrIm0dyEdaHmQo6udPK1V3opkzEKa0PrZzSdY5oGqmQ= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.115.0/go.mod h1:AMeisxL/9gs0bzozaymUqI1/EJ9GPvtnLh/BtqtjSF8= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.115.0 h1:ioGiKiO0WqT3PxkzanuJsPVA24FItH6nTJeDeSMFpYA= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.115.0/go.mod h1:x1W4J+pzK/Bi9jjYBYESTsPq0nRJJLZoN7cPNd0vYSU= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.115.0 h1:A9zqBtUJZ5J/0VI+B1dxuQhc2iVYpD9c54SgaKtFIN8= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.115.0/go.mod h1:hG7GOrBBux/cg1fAUzvSlzYY02ekxjF9IvH4ls/nGXA= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.115.0 h1:hAsK9I081ShnSDSKPVEHB3TLawyOmbR6bPDiQEkgo2Y= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.115.0/go.mod h1:z8XdvlhXSYVboxS3TPGembE9kfxLAYH2PxPLMvf8wTk= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.115.0 h1:t3BGnPpmeuxW51vISSu51PrAs49ACBCa1Yl1NfZGE5Y= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.115.0/go.mod h1:jQLYyroEYEV1kWJApmGBgVuGUd73v+Q6EUJ6Wy7N508= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatorateprocessor v0.115.0 h1:X6rEs7IxDpcDDBOCmkA3xHmc373UxHchH7BykK3Ao+o= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatorateprocessor v0.115.0/go.mod h1:fmLLh7jL0uK/t8op9TieOz7pwxItl4hdFo2fX7U0Etg= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.115.0 h1:ficXJmB6l6kfiu+R6CmggtnlQWMHUNzu2csDYA4CFSs= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.115.0/go.mod h1:ykraxSeEVCuA43oqlMWnex78+vNQ+1dBTJUeInkqIpA= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbytraceprocessor v0.115.0 h1:QlisfzozR6a7wF+iIIjJC9YG1/btqrJANhrwUc3JiWI= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbytraceprocessor v0.115.0/go.mod h1:2DNBNDZbbk0rp3VDp1QcOfc5etsM1DnpFb7011rr2gg= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.115.0 h1:6RGhDlZkekmp12EvK6JV9fiIwrdZBOJID6/Ts9tXzL4= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.115.0/go.mod h1:qZRQtGr/DAjuBqAuKJMN2cWvc9RI94lB0Oq8UyGAduo= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricsgenerationprocessor v0.115.0 h1:XUstEF2/Rrx4aWwyke0QbXxyMsD/dN3Ej8sxxz3bE1g= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricsgenerationprocessor v0.115.0/go.mod h1:JnloihIXG9SsX8ITiGsNqviZvZTi2WBO23NVtdrVrks= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor v0.115.0 h1:4qQKdEV5b2Mr6U1FYdOHY5ewVnNyNok/unHXPznwYXA= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor v0.115.0/go.mod h1:FF5YKDPWjQp/D7jLSgs5fl1iAJ51gZ8eegR1yfieGRM= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.115.0 h1:vwZQ7k8oqlK0bdZYTsjP/59zjQQfjSD4fNsWIWsTu2w= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.115.0/go.mod h1:5ObSa9amrbzbYTdAK1Qhv3D/YqCxxnQhP0sk2eWB7Oo= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.115.0 h1:KbfjEsr2d/5TGWHvcaBC3lOpYAnquEraLXcis4IamAs= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.115.0/go.mod h1:fmtZPK5RIz+2Lcm9xQZuwiM+M8/juSSeJufSxUT+J9w= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanprocessor v0.115.0 h1:iT04H54PeygRCV/pUe3Rndai2Uf3LyiXsjueeEaq3vU= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanprocessor v0.115.0/go.mod h1:9mCy+uOqybQlQSuR+KGxWFgdk+/YzHvHMz/k/ph1ncE= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.115.0 h1:olyiml73slGYORDjZNViW3nKiysC+K+h5yPsSBjUxQ4= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.115.0/go.mod h1:N00k1mTxzfS2clqxSP4Dxk7iX8GWbbuCq6LF8/ECk/M= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.115.0 h1:sLRTfXUFiqJ5Qe/NN5MUJxTaFt46E0Y/xjSY+KesCQc= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.115.0/go.mod h1:361IqXD4jnfs6G+Yn7978uv1UNozhZo4yBYy4p6Nqzc= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsecscontainermetricsreceiver v0.115.0 h1:9UHE7iTYDxUjKVd56bPqeMcci8T8M12RuAZZpijkQu0= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsecscontainermetricsreceiver v0.115.0/go.mod h1:LDlwPxalBANVRNnAqHJfIp0XQ8TrQC9tuTKcYAl1Ns4= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver v0.115.0 h1:JSFnfWwlVGLul8p9DE6Sk6E0zaqCvbys7CqvJQD4MIs= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver v0.115.0/go.mod h1:cw0qzwXzKKxM7QyDcNSp9OSDLySVXyaSrgdqWPqlDk8= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.115.0 h1:hYNlyUj3F43cuv1ap19NlEEchQfs91vYeNoQ1+nswLo= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.115.0/go.mod h1:1o6wF5HJdpb2hd2eGMoQhGuTKb4F2+j/IHBJJSPdM2w= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.115.0 h1:satJaDP5SjwFKLPa5UxBg3MIUJQ2NQ+4bTO88n0SW0g= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.115.0/go.mod h1:1NZE+dWHRux5kN0fI1w57AbHzqL4xVniD0X6HPeFwHU= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver v0.115.0 h1:zBzHPDgDHLmFyJzjiBq7Z7IwbcHiotmnBJNP29Hxm1Q= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver v0.115.0/go.mod h1:Utdu9cbK7qZ1cDsvBCttJyoEe12fx62GbzCB6/hl4FQ= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/tcplogreceiver v0.115.0 h1:nTPnLX+KPeG4/YHSGkry7QDwNR6W1OTfnLozKvvey2Q= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/tcplogreceiver v0.115.0/go.mod h1:OoSL6tya9EITzSXn2YoKnYnCZGuxi0MhmJkcT8Y8oac= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/udplogreceiver v0.115.0 h1:QqOsBCJejzQwOYdiyEMa2WsJLyb/m86LzFyEAraUo3s= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/udplogreceiver v0.115.0/go.mod h1:wDTa8bGTdXIchhbX5b0pPekJMTEbDMVQE1I+Yel5z2I= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.115.0 h1:h/HAHLIZnIyu85l8wOeggOyiI8z8citNAqxQktVKUpk= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.115.0/go.mod h1:iEU0NA/i2sUREqD19JYmjKwrjMUTcddad/h1LGdSMHw= github.com/openconfig/gnmi v0.0.0-20180912164834-33a1865c3029 h1:lXQqyLroROhwR2Yq/kXbLzVecgmVeZh2TFLg6OxCd+w= github.com/openconfig/gnmi v0.0.0-20180912164834-33a1865c3029/go.mod h1:t+O9It+LKzfOAhKTT5O0ehDix+MTqbtT0T9t+7zzOvc= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= @@ -1278,8 +1300,8 @@ github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYr github.com/openzipkin/zipkin-go v0.2.5/go.mod h1:KpXfKdgRDnnhsxw4pNIH9Md5lyFqKUa4YDFlwRYAMyE= github.com/openzipkin/zipkin-go v0.4.3 h1:9EGwpqkgnwdEIJ+Od7QVSEIH+ocmm5nPat0G7sjsSdg= github.com/openzipkin/zipkin-go v0.4.3/go.mod h1:M9wCJZFWCo2RiY+o1eBCEMe0Dp2S5LDHcMZmk3RmK7c= -github.com/ovh/go-ovh v1.4.3 h1:Gs3V823zwTFpzgGLZNI6ILS4rmxZgJwJCz54Er9LwD0= -github.com/ovh/go-ovh v1.4.3/go.mod h1:AkPXVtgwB6xlKblMjRKJJmjRp+ogrE7fz2lVgcQY8SY= +github.com/ovh/go-ovh v1.6.0 h1:ixLOwxQdzYDx296sXcgS35TOPEahJkpjMGtzPadCjQI= +github.com/ovh/go-ovh v1.6.0/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= @@ -1310,6 +1332,8 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -1324,8 +1348,8 @@ github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3O github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -1338,8 +1362,8 @@ github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB8 github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common v0.61.0 h1:3gv/GThfX0cV2lpO7gkTUwZru38mxevy90Bj8YFSRQQ= +github.com/prometheus/common v0.61.0/go.mod h1:zr29OCN/2BsJRaFwG8QOBr41D6kkchKbpeNH7pAjb/s= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -1350,14 +1374,14 @@ github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1 github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/prometheus/prometheus v0.51.2-0.20240405174432-b4a973753c6e h1:UmqAuY2OyDoog8+l5FybViJE5B2r+UxVGCUwFTsY5AA= -github.com/prometheus/prometheus v0.51.2-0.20240405174432-b4a973753c6e/go.mod h1:+0ld+ozir7zWFcHA2vVpWAKxXakIioEjPPNOqH+J3ZA= +github.com/prometheus/prometheus v0.54.1 h1:vKuwQNjnYN2/mDoWfHXDhAsz/68q/dQDb+YbcEqU7MQ= +github.com/prometheus/prometheus v0.54.1/go.mod h1:xlLByHhk2g3ycakQGrMaU8K7OySZx98BzeCR99991NY= github.com/rabbitmq/amqp091-go v1.2.0/go.mod h1:ogQDLSOACsLPsIq0NpbtiifNZi2YOz0VTJ0kHRghqbM= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/relvacode/iso8601 v1.4.0 h1:GsInVSEJfkYuirYFxa80nMLbH2aydgZpIf52gYZXUJs= -github.com/relvacode/iso8601 v1.4.0/go.mod h1:FlNp+jz+TXpyRqgmM7tnzHHzBnz776kmAH2h3sZCn0I= +github.com/relvacode/iso8601 v1.6.0 h1:eFXUhMJN3Gz8Rcq82f9DTMW0svjtAVuIEULglM7QHTU= +github.com/relvacode/iso8601 v1.6.0/go.mod h1:FlNp+jz+TXpyRqgmM7tnzHHzBnz776kmAH2h3sZCn0I= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rhnvrm/simples3 v0.6.1/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA= @@ -1366,10 +1390,10 @@ github.com/riemann/riemann-go-client v0.5.0/go.mod h1:FMiaOL8dgBnRfgwENzV0xlYJ2e github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= -github.com/rs/cors v1.11.0 h1:0B9GE/r9Bc2UxRMMtymBkHTenPkHDv0CW4Y98GBY+po= -github.com/rs/cors v1.11.0/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA= +github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= @@ -1382,18 +1406,16 @@ github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6g github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e h1:CGjiMQ0wMH4wtNWrlj6kiTbkPt2F3rbYnhGX6TWLfco= github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.25 h1:/8rfZAdFfafRXOgz+ZpMZZWZ5pYggCY9t7e/BvjaBHM= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.25/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30 h1:yoKAVkEVwAqbGbR8n87rHQ1dulL25rKloGadb3vm770= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30/go.mod h1:sH0u6fq6x4R5M7WxkoQFY/o7UaiItec0o1LinLCJNq8= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646 h1:RpforrEYXWkmGwJHIGnLZ3tTWStkjVVstwzNGqxX2Ds= github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= -github.com/shirou/gopsutil/v4 v4.24.5 h1:gGsArG5K6vmsh5hcFOHaPm87UD003CaDMkAOweSQjhM= -github.com/shirou/gopsutil/v4 v4.24.5/go.mod h1:aoebb2vxetJ/yIDZISmduFvVNPHqXQ9SEJwRXxkf0RA= -github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= -github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= +github.com/shirou/gopsutil/v4 v4.24.11 h1:WaU9xqGFKvFfsUv94SXcUPD7rCkU0vr/asVdQOBZNj8= +github.com/shirou/gopsutil/v4 v4.24.11/go.mod h1:s4D/wg+ag4rG0WO7AiTj2BeYCRhym0vM7DHbZRxnIT8= github.com/shoenig/test v1.7.1 h1:UJcjSAI3aUKx52kfcfhblgyhZceouhvvs3OYdWgn+PY= github.com/shoenig/test v1.7.1/go.mod h1:UxJ6u/x2v/TNs/LoLxBNJRV9DiwBBKYxXSyczsBHFoI= github.com/showwin/speedtest-go v1.1.4 h1:pcY1W5LYZu44lH6Fuu80nu/Pj67n//VArlZudbAgR6E= @@ -1427,8 +1449,8 @@ github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= -github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= @@ -1458,24 +1480,26 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/testcontainers/testcontainers-go v0.31.0 h1:W0VwIhcEVhRflwL9as3dhY6jXjVCA27AkmbnZ+UTh3U= -github.com/testcontainers/testcontainers-go v0.31.0/go.mod h1:D2lAoA0zUFiSY+eAflqK5mcUx/A5hrrORaEQrd0SefI= -github.com/tidwall/gjson v1.10.2 h1:APbLGOM0rrEkd8WBw9C24nllro4ajFuJu0Sc9hRz8Bo= +github.com/testcontainers/testcontainers-go v0.34.0 h1:5fbgF0vIN5u+nD3IWabQwRybuB4GY8G2HHgCkbMzMHo= +github.com/testcontainers/testcontainers-go v0.34.0/go.mod h1:6P/kMkQe8yqPHfPWNulFGdFHTD8HB2vLq/231xY2iPQ= github.com/tidwall/gjson v1.10.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.14.2 h1:6BBkirS0rAHjumnjHF6qgy5d2YAJ1TLIaFE2lzfOLqo= +github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/tidwall/tinylru v1.1.0 h1:XY6IUfzVTU9rpwdhKUF6nQdChgCdGjkMfLzbWyiau6I= github.com/tidwall/tinylru v1.1.0/go.mod h1:3+bX+TJ2baOLMWTnlyNWHh4QMnFyARg2TLTQ6OFbzw8= -github.com/tidwall/wal v1.1.7 h1:emc1TRjIVsdKKSnpwGBAcsAGg0767SvUk8+ygx7Bb+4= -github.com/tidwall/wal v1.1.7/go.mod h1:r6lR1j27W9EPalgHiB7zLJDYu3mzW5BQP5KrzBpYY/E= +github.com/tidwall/wal v1.1.8 h1:2qDSGdAdjaY3PEvHRva+9UFqgk+ef7cOiW1Qn5JH1y0= +github.com/tidwall/wal v1.1.8/go.mod h1:r6lR1j27W9EPalgHiB7zLJDYu3mzW5BQP5KrzBpYY/E= github.com/tinylib/msgp v1.1.6 h1:i+SbKraHhnrf9M5MYmvQhFnbLhAXSDWF8WWsuyRdocw= github.com/tinylib/msgp v1.1.6/go.mod h1:75BAfg2hauQhs3qedfdDZmWAPcFMAvJE5b9rGOMufyw= github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk= @@ -1489,15 +1513,16 @@ github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= github.com/twmb/murmur3 v1.1.7 h1:ULWBiM04n/XoN3YMSJ6Z2pHDFLf+MeIVQU71ZPrvbWg= github.com/twmb/murmur3 v1.1.7/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ= +github.com/ua-parser/uap-go v0.0.0-20240611065828-3a4781585db6 h1:SIKIoA4e/5Y9ZOl0DCe3eVMLPOQzJxgZpfdHHeauNTM= +github.com/ua-parser/uap-go v0.0.0-20240611065828-3a4781585db6/go.mod h1:BUbeWZiieNxAuuADTBNb3/aeje6on3DhU3rpWsQSB1E= github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE= github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= github.com/valyala/fastjson v1.6.4 h1:uAUNq9Z6ymTgGhcm0UynUAB6tlbakBrz6CQFax3BXVQ= github.com/valyala/fastjson v1.6.4/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY= github.com/vapourismo/knx-go v0.0.0-20211128234507-8198fa17db36 h1:JBj2CqnFwBhI3XsdMNn9MjKvehog+p5QZihotqq0Zuo= github.com/vapourismo/knx-go v0.0.0-20211128234507-8198fa17db36/go.mod h1:AslkIOXnEbVmvzc8uqDjm8ZyIqNJcEPiFRqlokmqr2o= -github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs= -github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= -github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/vishvananda/netlink v1.3.0 h1:X7l42GfcV4S6E4vHTsw48qbrV+9PVojNfIhZcwQdrZk= +github.com/vishvananda/netlink v1.3.0/go.mod h1:i6NetklAujEcC6fK0JPjT8qSwWyO0HLn4UKG+hGqeJs= github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= github.com/vjeantet/grok v1.0.1 h1:2rhIR7J4gThTgcZ1m2JY4TrJZNgjn985U28kT2wQrJ4= @@ -1512,6 +1537,8 @@ github.com/wvanbergen/kafka v0.0.0-20171203153745-e2edea948ddf h1:TOV5PC6fIWwFOF github.com/wvanbergen/kafka v0.0.0-20171203153745-e2edea948ddf/go.mod h1:nxx7XRXbR9ykhnC8lXqQyJS0rfvJGxKyKw/sT1YOttg= github.com/wvanbergen/kazoo-go v0.0.0-20180202103751-f72d8611297a h1:ILoU84rj4AQ3q6cjQvtb9jBjx4xzR/Riq/zYhmDQiOk= github.com/wvanbergen/kazoo-go v0.0.0-20180202103751-f72d8611297a/go.mod h1:vQQATAGxVK20DC1rRubTJbZDDhhpA4QfU02pMdPxGO4= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY= @@ -1544,8 +1571,8 @@ github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0= github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= -go.etcd.io/bbolt v1.3.10 h1:+BqfJTcCzTItrop8mq/lbzL8wSGtj94UO/3U31shqG0= -go.etcd.io/bbolt v1.3.10/go.mod h1:bK3UQLPJZly7IlNmV7uVHJDxfe5aK9Ll93e/74Y9oEQ= +go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0= +go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= go.etcd.io/etcd/api/v3 v3.5.4/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A= go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= @@ -1563,129 +1590,191 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/collector v0.103.0 h1:mssWo1y31p1F/SRsSBnVUX6YocgawCqM1blpE+hkWog= -go.opentelemetry.io/collector v0.103.0/go.mod h1:mgqdTFB7QCYiOeEdJSSEktovPqy+2fw4oTKJzyeSB0U= -go.opentelemetry.io/collector/component v0.103.0 h1:j52YAsp8EmqYUotVUwhovkqFZGuxArEkk65V4TI46NE= -go.opentelemetry.io/collector/component v0.103.0/go.mod h1:jKs19tGtCO8Hr5/YM0F+PoFcl8SVe/p4Ge30R6srkbc= -go.opentelemetry.io/collector/config/configauth v0.103.0 h1:tv2Ilj0X9T8ZsDd4mB8Sl+nXQ8CG8MJVQ1Lo4mmE0Pk= -go.opentelemetry.io/collector/config/configauth v0.103.0/go.mod h1:VIo8DpFeyOOCMUVoQsBdq3t2snUiBBECP0UxW1bwz/o= -go.opentelemetry.io/collector/config/configcompression v1.10.0 h1:ClkAY1rzaxFawmC53BUf3TjTWKOGx+2xnpqOJIkg6Tk= -go.opentelemetry.io/collector/config/configcompression v1.10.0/go.mod h1:6+m0GKCv7JKzaumn7u80A2dLNCuYf5wdR87HWreoBO0= -go.opentelemetry.io/collector/config/configgrpc v0.103.0 h1:H1TXxUwxaZINmAzuehP/8ExKhJKzuw/oBGc7juzwloo= -go.opentelemetry.io/collector/config/configgrpc v0.103.0/go.mod h1:1FG873Wpw9AWANjXBvOd59noWY3dZoU6WkMWLJDx5FQ= -go.opentelemetry.io/collector/config/confighttp v0.103.0 h1:tgCWMKuIorSr4+iQOv0A8Ya/8do73hiG5KHinWaz63Q= -go.opentelemetry.io/collector/config/confighttp v0.103.0/go.mod h1:xMXoLsTGTJlftu+VAL3iadEs4gkmqFrvuPPnpNi6ETo= -go.opentelemetry.io/collector/config/confignet v0.103.0 h1:A2/8y2oEFaJbmtl+r1JIP0y+281vmmcPp0P51xcSn5s= -go.opentelemetry.io/collector/config/confignet v0.103.0/go.mod h1:pfOrCTfSZEB6H2rKtx41/3RN4dKs+X2EKQbw3MGRh0E= -go.opentelemetry.io/collector/config/configopaque v1.10.0 h1:FAxj6ggLpJE/kFnR1ezYwjRdo6gHo2+CjlIsHVCFVnQ= -go.opentelemetry.io/collector/config/configopaque v1.10.0/go.mod h1:0xURn2sOy5j4fbaocpEYfM97HPGsiffkkVudSPyTJlM= -go.opentelemetry.io/collector/config/configretry v0.103.0 h1:AwWGdeYJvUCZfh127Jb8uj7kuoPrJ3t2iZGwQUxmb7c= -go.opentelemetry.io/collector/config/configretry v0.103.0/go.mod h1:P+RA0IA+QoxnDn4072uyeAk1RIoYiCbxYsjpKX5eFC4= -go.opentelemetry.io/collector/config/configtelemetry v0.103.0 h1:KLbhkFqdw9D31t0IhJ/rnhMRvz/s14eie0fKfm5xWns= -go.opentelemetry.io/collector/config/configtelemetry v0.103.0/go.mod h1:WxWKNVAQJg/Io1nA3xLgn/DWLE/W1QOB2+/Js3ACi40= -go.opentelemetry.io/collector/config/configtls v0.103.0 h1:nbk8sJIHoYYQbpZtUkUQceTbjC4wEjoePKJ15v8cCcU= -go.opentelemetry.io/collector/config/configtls v0.103.0/go.mod h1:046dfdfHW8wWCMhzUaWJo7guRiCoSz5QzVjCSDzymdU= -go.opentelemetry.io/collector/config/internal v0.103.0 h1:pimS3uLHfOBbConZrviGoTwu+bkTNDoQBtbeWCg8U8k= -go.opentelemetry.io/collector/config/internal v0.103.0/go.mod h1:kJRkB+PgamWqPi/GWbYWvnRzVzS1rwDUh6+VSz4C7NQ= -go.opentelemetry.io/collector/confmap v0.103.0 h1:qKKZyWzropSKfgtGv12JzADOXNgThqH1Vx6qzblBE24= -go.opentelemetry.io/collector/confmap v0.103.0/go.mod h1:TlOmqe/Km3K6WgxyhEAdCb/V1Yp6eSU76fCoiluEa88= -go.opentelemetry.io/collector/confmap/converter/expandconverter v0.103.0 h1:zApcKLSosuu9I/4IRHTqlE1H6XNiZNAgd26YbzHwkto= -go.opentelemetry.io/collector/confmap/converter/expandconverter v0.103.0/go.mod h1:hoel+3CPjRhPSHzCrE1E+wCyoSLHlgW7662Ntwx2ujM= -go.opentelemetry.io/collector/confmap/provider/envprovider v0.103.0 h1:0XHQ/ffxSUx3sMbnYSf8a4jnVYLUrxo+/XwdhXkizgs= -go.opentelemetry.io/collector/confmap/provider/envprovider v0.103.0/go.mod h1:NiE4Fe42Sew1TyXuU1YEd0xZBDNI+w6IRkC2OTlJUak= -go.opentelemetry.io/collector/confmap/provider/fileprovider v0.103.0 h1:5dB2G7d6RKmWS8ptuAWvAEKGYODk2DTRm84bU9HooLQ= -go.opentelemetry.io/collector/confmap/provider/fileprovider v0.103.0/go.mod h1:GT/GBk17lDhc27762w6PNHvKYbA+TnHvNEyQHUsjKpY= -go.opentelemetry.io/collector/confmap/provider/httpprovider v0.103.0 h1:Hrp+nw4W9/jeJfi3GfJW6EYh7DeNkaC1wojOh4x8CbI= -go.opentelemetry.io/collector/confmap/provider/httpprovider v0.103.0/go.mod h1:kUst0pGVBlKDSlvJYDclrsApbkMv7ahRDh6/pE4LsBc= -go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.103.0 h1:JUDRYsMOhkIBxZqZli0BU+64zahIUgnEPZSe9wo2T0Q= -go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.103.0/go.mod h1:+mUrWjpdGIdSKMeeplLO+qXFSBc287as2oIPVdKMTxc= -go.opentelemetry.io/collector/confmap/provider/yamlprovider v0.103.0 h1:boTv+ZRkn1h5eUbt5sLSU5lCrItCCxCen/laRmsHLyg= -go.opentelemetry.io/collector/confmap/provider/yamlprovider v0.103.0/go.mod h1:0pZ7RD7SPg+yklgGPN+74Zzbps4R9x5bRPZX1D1gtGM= -go.opentelemetry.io/collector/connector v0.103.0 h1:jwmrgCT6ftz3U4o8mAqP+/yaQ5KsLMFXo2+OHXhy+tE= -go.opentelemetry.io/collector/connector v0.103.0/go.mod h1:6RDaeDMiXTKEXSy1eIaO0EiM+/91NVHdBxOc9e2++2A= -go.opentelemetry.io/collector/consumer v0.103.0 h1:L/7SA/U2ua5L4yTLChnI9I+IFGKYU5ufNQ76QKYcPYs= -go.opentelemetry.io/collector/consumer v0.103.0/go.mod h1:7jdYb9kSSOsu2R618VRX0VJ+Jt3OrDvvUsDToHTEOLI= -go.opentelemetry.io/collector/exporter v0.103.0 h1:g0nF/FAwuA7tTJf5uo1PvlQl7xFqCgvfH+FYqufBSiw= -go.opentelemetry.io/collector/exporter v0.103.0/go.mod h1:PC2OvciPEew2kaA/ZMyxRqfeOW8Wgi0CYR614PEyS/w= -go.opentelemetry.io/collector/exporter/debugexporter v0.103.0 h1:jwZHoXvp3vdQ3obtnU+Vav5ChTCUBSC6mvlOZJ8doCU= -go.opentelemetry.io/collector/exporter/debugexporter v0.103.0/go.mod h1:kzmBnKxsLNVBRGS8nwu497SvHspzyeiV06+LiPHktto= -go.opentelemetry.io/collector/exporter/nopexporter v0.103.0 h1:QaxkFbHSSYj2RRgkIhB6lDjJHFSGr71WlLk46fG0mAo= -go.opentelemetry.io/collector/exporter/nopexporter v0.103.0/go.mod h1:/wopRTmGS20A2Ihxcuj8M4j4VWMG6AFwmrt0eT6rDNg= -go.opentelemetry.io/collector/extension v0.103.0 h1:vTsd+GElvT7qKk9Y9d6UKuuT2Ngx0mai8Q48hkKQMwM= -go.opentelemetry.io/collector/extension v0.103.0/go.mod h1:rp2l3xskNKWv0yBCyU69Pv34TnP1QVD1ijr0zSndnsM= -go.opentelemetry.io/collector/extension/auth v0.103.0 h1:i7cQl+Ewpve/DIN4rFMg1GiyUPE14LZsYWrJ1RqtP84= -go.opentelemetry.io/collector/extension/auth v0.103.0/go.mod h1:JdYBS/EkPAz2APAi8g7xTiSRlZTc7c4H82AQM9epzxw= -go.opentelemetry.io/collector/extension/ballastextension v0.103.0 h1:U8f6b6xnkD04HNIOgqLAWXfCcezSAU2vMq5SMtKjjbo= -go.opentelemetry.io/collector/extension/ballastextension v0.103.0/go.mod h1:/B3xE2YhUgGXSsFH8Qg0kq4WpfkuTyWDJGNyfRgMax8= -go.opentelemetry.io/collector/extension/zpagesextension v0.103.0 h1:jgSEQY++zOI6hFQygwuvS6ulJ/Yu4xXgUg+Ijoxx51I= -go.opentelemetry.io/collector/extension/zpagesextension v0.103.0/go.mod h1:2OUi0Hp+3zPUJmi7goJ6d1/kGgFAw3SDESRX7xQ0QHE= -go.opentelemetry.io/collector/featuregate v1.10.0 h1:krSqokHTp7JthgmtewysqHuOAkcuuZl7G2n91s7HygE= -go.opentelemetry.io/collector/featuregate v1.10.0/go.mod h1:PsOINaGgTiFc+Tzu2K/X2jP+Ngmlp7YKGV1XrnBkH7U= +go.opentelemetry.io/collector v0.115.0 h1:qUZ0bTeNBudMxNQ7FJKS//TxTjeJ7tfU/z22mcFavWU= +go.opentelemetry.io/collector v0.115.0/go.mod h1:66qx0xKnVvdwq60e1DEfb4e+zmM9szhPsv2hxZ/Mpj4= +go.opentelemetry.io/collector/client v1.21.0 h1:3Kes8lOFMYVxoxeAmX+DTEAkuS1iTA3NkSfqzGmygJA= +go.opentelemetry.io/collector/client v1.21.0/go.mod h1:jYJGiL0UA975OOyHmjbQSokNWt1OiviI5KjPOMUMGwc= +go.opentelemetry.io/collector/component v0.115.0 h1:iLte1oCiXzjiCnaOBKdsXacfFiECecpWxW3/LeriMoo= +go.opentelemetry.io/collector/component v0.115.0/go.mod h1:oIUFiH7w1eOimdeYhFI+gAIxYSiLDocKVJ0PTvX7d6s= +go.opentelemetry.io/collector/component/componentstatus v0.115.0 h1:pbpUIL+uKDfEiSgKK+S5nuSL6MDIIQYsp4b65ZGVb9M= +go.opentelemetry.io/collector/component/componentstatus v0.115.0/go.mod h1:36A+9XSiOz0Cdhq+UwwPRlEr5CYuSkEnVO9om4BH7d0= +go.opentelemetry.io/collector/component/componenttest v0.115.0 h1:9URDJ9VyP6tuij+YHjp/kSSMecnZOd7oGvzu+rw9SJY= +go.opentelemetry.io/collector/component/componenttest v0.115.0/go.mod h1:PzXvNqKLCiSADZGZFKH+IOHMkaQ0GTHuzysfVbTPKYY= +go.opentelemetry.io/collector/config/configauth v0.115.0 h1:xa+ALdyPgva3rZnLBh1H2oS5MsHP6JxSqMtQmcELnys= +go.opentelemetry.io/collector/config/configauth v0.115.0/go.mod h1:C7anpb3Rf4KswMT+dgOzkW9UX0z/65PLORpUw3p0VYc= +go.opentelemetry.io/collector/config/configcompression v1.21.0 h1:0zbPdZAgPFMAarwJEC4gaR6f/JBP686A3TYSgb3oa+E= +go.opentelemetry.io/collector/config/configcompression v1.21.0/go.mod h1:LvYG00tbPTv0NOLoZN0wXq1F5thcxvukO8INq7xyfWU= +go.opentelemetry.io/collector/config/configgrpc v0.115.0 h1:gZzXSFe6hB3RUcEeAYqk1yT+TBa+X9tp6/1x29Yg2yk= +go.opentelemetry.io/collector/config/configgrpc v0.115.0/go.mod h1:107lRZ5LdQPMdGJGd4m1GhyKxyH0az2cUOqrJgTEN8E= +go.opentelemetry.io/collector/config/confighttp v0.115.0 h1:BIy394oNXnqySJwrCqgAJu4gWgAV5aQUDD6k1hy6C8o= +go.opentelemetry.io/collector/config/confighttp v0.115.0/go.mod h1:Wr50ut12NmCEAl4bWLJryw2EjUmJTtYRg89560Q51wc= +go.opentelemetry.io/collector/config/confignet v1.21.0 h1:PeQ5YrMnfftysFL/WVaSrjPOWjD6DfeABY50pf9CZxU= +go.opentelemetry.io/collector/config/confignet v1.21.0/go.mod h1:ZppUH1hgUJOubawEsxsQ9MzEYFytqo2GnVSS7d4CVxc= +go.opentelemetry.io/collector/config/configopaque v1.21.0 h1:PcvRGkBk4Px8BQM7tX+kw4i3jBsfAHGoGQbtZg6Ox7U= +go.opentelemetry.io/collector/config/configopaque v1.21.0/go.mod h1:sW0t0iI/VfRL9VYX7Ik6XzVgPcR+Y5kejTLsYcMyDWs= +go.opentelemetry.io/collector/config/configretry v1.22.0 h1:gKZeYPvCho1+pO6ePRXkloA2nKUUFnA+yBUSHfOzJPU= +go.opentelemetry.io/collector/config/configretry v1.22.0/go.mod h1:cleBc9I0DIWpTiiHfu9v83FUaCTqcPXmebpLxjEIqro= +go.opentelemetry.io/collector/config/configtelemetry v0.115.0 h1:U07FinCDop+r2RjWQ3aP9ZWONC7r7kQIp1GkXQi6nsI= +go.opentelemetry.io/collector/config/configtelemetry v0.115.0/go.mod h1:SlBEwQg0qly75rXZ6W1Ig8jN25KBVBkFIIAUI1GiAAE= +go.opentelemetry.io/collector/config/configtls v1.21.0 h1:ZfrlAYgBD8lzp04W0GxwiDmUbrvKsvDYJi+wkyiXlpA= +go.opentelemetry.io/collector/config/configtls v1.21.0/go.mod h1:5EsNefPfVCMOTlOrr3wyj7LrsOgY7V8iqRl8oFZEqtw= +go.opentelemetry.io/collector/config/internal v0.115.0 h1:eVk57iufZpUXyPJFKTb1Ebx5tmcCyroIlt427r5pxS8= +go.opentelemetry.io/collector/config/internal v0.115.0/go.mod h1:OVkadRWlKAoWjHslqjWtBLAne8ceQm8WYT71ZcBWLFc= +go.opentelemetry.io/collector/confmap v1.21.0 h1:1tIcx2/Suwg8VhuPmQw87ba0ludPmumpFCFRZZa6RXA= +go.opentelemetry.io/collector/confmap v1.21.0/go.mod h1:Rrhs+MWoaP6AswZp+ReQ2VO9dfOfcUjdjiSHBsG+nec= +go.opentelemetry.io/collector/confmap/converter/expandconverter v0.113.0 h1:DBjWXlVzdwVbs1ZOH+k1vmoBt3TLx8NvRK8ZK3nKdmo= +go.opentelemetry.io/collector/confmap/converter/expandconverter v0.113.0/go.mod h1:/YDWibNLalyfd0BR0V5ixiParsNCvVKkA58f3bcu/AA= +go.opentelemetry.io/collector/confmap/provider/envprovider v1.21.0 h1:YLf++Z8CMp86AanfOCWUiE7vKbb1kSjgC3a9VJoxbD4= +go.opentelemetry.io/collector/confmap/provider/envprovider v1.21.0/go.mod h1:aSWLYcmgZZJDNtWN1M8JKQuehoGgOxibl1KuvKTar4M= +go.opentelemetry.io/collector/confmap/provider/fileprovider v1.21.0 h1:+zukkM+3l426iGoJkXTpLB2Z8QnZFu26TkGPjh5Rn/4= +go.opentelemetry.io/collector/confmap/provider/fileprovider v1.21.0/go.mod h1:BXBpQhF3n4CNLYO2n/mWZPd2U9ekpbLXLRGZrun1VfI= +go.opentelemetry.io/collector/confmap/provider/httpprovider v1.21.0 h1:NYYGM+SgIlTuNGjd8eGzDr8DkvOe4q7cXon8djF9yyI= +go.opentelemetry.io/collector/confmap/provider/httpprovider v1.21.0/go.mod h1:XRYbuwqq1awFuNhLDUv4aSvn6MzqX+abcevx1O+APJI= +go.opentelemetry.io/collector/confmap/provider/yamlprovider v1.21.0 h1:P3Q9RytCMY76ORPCnkkjOa4fkuFqmZiQRor+F/nPlYE= +go.opentelemetry.io/collector/confmap/provider/yamlprovider v1.21.0/go.mod h1:xhYhHK3yLQ78tsoaKPIGUfFulgy961ImOe2gATH3RQc= +go.opentelemetry.io/collector/connector v0.115.0 h1:4Kkm3HQFzNT1eliMOB8FbIn+PLMRJ2qQku5Vmy3V8Ko= +go.opentelemetry.io/collector/connector v0.115.0/go.mod h1:+ByuAmYLrYHoKh9B+LGqUc0N2kXcN2l8Dea8Mp6brZ8= +go.opentelemetry.io/collector/connector/connectorprofiles v0.115.0 h1:aW1f4Az0I+QJyImFccNWAXqik80bnNu27aQqi2hFfD8= +go.opentelemetry.io/collector/connector/connectorprofiles v0.115.0/go.mod h1:lmynB1CucydOsHa8RSSBh5roUZPfuiv65imXhtNzClM= +go.opentelemetry.io/collector/connector/connectortest v0.115.0 h1:GjtourFr0MJmlbtEPAZ/1BZCxkNAeJ0aMTlrxwftJ0k= +go.opentelemetry.io/collector/connector/connectortest v0.115.0/go.mod h1:f3KQXXNlh/XuV8elmnuVVyfY92dJCAovz10gD72OH0k= +go.opentelemetry.io/collector/consumer v1.22.0 h1:QmfnNizyNZFt0uK3GG/EoT5h6PvZJ0dgVTc5hFEc1l0= +go.opentelemetry.io/collector/consumer v1.22.0/go.mod h1:tiz2khNceFAPokxxfzAuFfIpShBasMT2AL2Sbc7+m0I= +go.opentelemetry.io/collector/consumer/consumererror v0.115.0 h1:yli//xBCQMPZKXNgNlXemo4dvqhnFrAmCZ11DvQgmcY= +go.opentelemetry.io/collector/consumer/consumererror v0.115.0/go.mod h1:LwVzAvQ6ZVNG7mbOvurbAo+W/rKws0IcjOwriuZXqPE= +go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles v0.115.0 h1:gaIhzpaGFWauiyznrQ3f++TbcdXxA5rpsX3L9uGjMM8= +go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles v0.115.0/go.mod h1:7oXvuGBSawS5bc413lh1KEMcXkqBcrCqZQahOdnE24U= +go.opentelemetry.io/collector/consumer/consumerprofiles v0.115.0 h1:H3fDuyQW1t2HWHkz96WMBQJKUevypOCjBqnqtaAWyoA= +go.opentelemetry.io/collector/consumer/consumerprofiles v0.115.0/go.mod h1:IzEmZ91Tp7TBxVDq8Cc9xvLsmO7H08njr6Pu9P5d9ns= +go.opentelemetry.io/collector/consumer/consumertest v0.115.0 h1:hru0I2447y0TluCdwlKYFFtgcpyCnlM+LiOK1JZyA70= +go.opentelemetry.io/collector/consumer/consumertest v0.115.0/go.mod h1:ybjALRJWR6aKNOzEMy1T1ruCULVDEjj4omtOJMrH/kU= +go.opentelemetry.io/collector/exporter v0.115.0 h1:JnxfpOnsuqhTPKJXVKJLS1Cv3BiVrVLzpHOjJEQw+xw= +go.opentelemetry.io/collector/exporter v0.115.0/go.mod h1:xof3fHQK8wADhaKLIJcQ7ChZaFLNC+haRdPN0wgl6kY= +go.opentelemetry.io/collector/exporter/debugexporter v0.115.0 h1:gb9VMQhcbvYqp0SJ4Hp8R9XqOLNLsoTgNJCPKpNEaVc= +go.opentelemetry.io/collector/exporter/debugexporter v0.115.0/go.mod h1:H/HS1UJlcZPNBbOcrsGZc2sPdQDHtbOjHOxMtJkmlcU= +go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelperprofiles v0.115.0 h1:fetbc740pODH6JW+H49SW0hiAJwQE+/B0SbuIlaY2rg= +go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelperprofiles v0.115.0/go.mod h1:oEKZ/d5BeaCK6Made9iwaeqmlT4lRbJSlW9nhIn/TwM= +go.opentelemetry.io/collector/exporter/exporterprofiles v0.115.0 h1:lSQEleCn/q9eFufcuK61NdFKU70ZlgI9dBjPCO/4CrE= +go.opentelemetry.io/collector/exporter/exporterprofiles v0.115.0/go.mod h1:7l5K2AecimX2kx+nZC1gKG3QkP247CO1+SodmJ4fFkQ= +go.opentelemetry.io/collector/exporter/exportertest v0.115.0 h1:P9SMTUXQOtcaq40bGtnnAe14zRmR4/yUgj/Tb2BEf/k= +go.opentelemetry.io/collector/exporter/exportertest v0.115.0/go.mod h1:1jMZ9gFGXglb8wfNrBZIgd+RvpZhSyFwdfE+Jtf9w4U= +go.opentelemetry.io/collector/exporter/nopexporter v0.115.0 h1:ufwLbNp7mfoSxWJcoded3D9f/nIVvCwNa/0+ZqxzkzU= +go.opentelemetry.io/collector/exporter/nopexporter v0.115.0/go.mod h1:iIJgru1t+VJVVCE5KMAKjXbq9RkK4/5FCClnWnAlGtc= +go.opentelemetry.io/collector/extension v0.115.0 h1:/cBb8AUdD0KMWC6V3lvCC16eP9Fg0wd1Upcp5rgvuGI= +go.opentelemetry.io/collector/extension v0.115.0/go.mod h1:HI7Ak6loyi6ZrZPsQJW1OO1wbaAW8OqXLFNQlTZnreQ= +go.opentelemetry.io/collector/extension/auth v0.115.0 h1:TTMokbBsSHZRFH48PvGSJmgSS8F3Rkr9MWGHZn8eJDk= +go.opentelemetry.io/collector/extension/auth v0.115.0/go.mod h1:3w+2mzeb2OYNOO4Bi41TUo4jr32ap2y7AOq64IDpxQo= +go.opentelemetry.io/collector/extension/auth/authtest v0.115.0 h1:OZe7dKbZ01qodSpZU0ZYzI6zpmmzJ3UvfdBSFAbSgDw= +go.opentelemetry.io/collector/extension/auth/authtest v0.115.0/go.mod h1:fk9WCXP0x91Q64Z8HZKWTHh9PWtgoWE1KXe3n2Bff3U= +go.opentelemetry.io/collector/extension/experimental/storage v0.115.0 h1:sZXw0+77092pq24CkUoTRoHQPLQUsDq6HFRNB0g5yR4= +go.opentelemetry.io/collector/extension/experimental/storage v0.115.0/go.mod h1:qjFH7Y3QYYs88By2ZB5GMSUN5k3ul4Brrq2J6lKACA0= +go.opentelemetry.io/collector/extension/extensioncapabilities v0.115.0 h1:/g25Hp5aoCNKdDjIb3Fc7XRglO8yaBRFLO/IUNPnqNI= +go.opentelemetry.io/collector/extension/extensioncapabilities v0.115.0/go.mod h1:EQx7ETiy330O6q05S2KRZsRNDg0aQEeJmVl7Ipx+Fcw= +go.opentelemetry.io/collector/extension/extensiontest v0.115.0 h1:GBVFxFEskR8jSdu9uaQh2qpXnN5VNXhXjpJ2UjxtE8I= +go.opentelemetry.io/collector/extension/extensiontest v0.115.0/go.mod h1:eu1ecbz5mT+cHoH2H3GmD/rOO0WsicSJD2RLrYuOmRA= +go.opentelemetry.io/collector/extension/zpagesextension v0.115.0 h1:zYrZZocc7n0ZuDyXNkIaX0P0qk2fjMQj7NegwBJZA4k= +go.opentelemetry.io/collector/extension/zpagesextension v0.115.0/go.mod h1:OaXwNHF3MAcInBzCXrhXbTNHfIi9b7YGhXjtCFZqxNY= +go.opentelemetry.io/collector/featuregate v1.22.0 h1:1TUcdqA5VpEsX1Lrr6GG15CptZxDXxiu5AXgwpeNSR4= +go.opentelemetry.io/collector/featuregate v1.22.0/go.mod h1:3GaXqflNDVwWndNGBJ1+XJFy3Fv/XrFgjMN60N3z7yg= +go.opentelemetry.io/collector/internal/fanoutconsumer v0.115.0 h1:6DRiSECeApFq6Jj5ug77rG53R6FzJEZBfygkyMEXdpg= +go.opentelemetry.io/collector/internal/fanoutconsumer v0.115.0/go.mod h1:vgQf5HQdmLQqpDHpDq2S3nTRoUuKtRcZpRTsy+UiwYw= +go.opentelemetry.io/collector/internal/memorylimiter v0.115.0 h1:U07IJxyHZXM6eLn8cOq/Lycx6DhQZhpDOuYtIRw/d6I= +go.opentelemetry.io/collector/internal/memorylimiter v0.115.0/go.mod h1:KNcU8WVpW5y7Ij6CGnsefb7q1UZT7VvrTDhe5FKNOA4= +go.opentelemetry.io/collector/internal/sharedcomponent v0.115.0 h1:9TL6T6ALqDpumUJ0tYIuPIg5LGo4r6eoqlNArYX116o= +go.opentelemetry.io/collector/internal/sharedcomponent v0.115.0/go.mod h1:SgBLKMh11bOTPR1bdDZbi5MlqsoDBBFI3uBIwnei+0k= go.opentelemetry.io/collector/model v0.44.0 h1:I+M6X2NANYChOGYrpbxCoEYJah3eHdMvumKjothIAtA= go.opentelemetry.io/collector/model v0.44.0/go.mod h1:4jo1R8uBDspLCxUGhQ0k3v/EFXFbW7s0AIy3LuGLbcU= -go.opentelemetry.io/collector/otelcol v0.103.0 h1:Skqnc2mxDdk3eiYioUuG7ST6ur5k83SOv7mIBt60fBw= -go.opentelemetry.io/collector/otelcol v0.103.0/go.mod h1:iJF3ghCv+nRZI6+hI7z3kGRZrgH///Fd9tNXY82X90g= -go.opentelemetry.io/collector/pdata v1.10.0 h1:oLyPLGvPTQrcRT64ZVruwvmH/u3SHTfNo01pteS4WOE= -go.opentelemetry.io/collector/pdata v1.10.0/go.mod h1:IHxHsp+Jq/xfjORQMDJjSH6jvedOSTOyu3nbxqhWSYE= -go.opentelemetry.io/collector/pdata/testdata v0.103.0 h1:iI6NOE0L2je/bxlWzAWHQ/yCtnGupgv42Hl9Al1q/g4= -go.opentelemetry.io/collector/pdata/testdata v0.103.0/go.mod h1:tLzRhb/h37/9wFRQVr+CxjKi5qmhSRpCAiOlhwRkeEk= -go.opentelemetry.io/collector/processor v0.103.0 h1:YZ+LRuHKtOam7SCeLkJAP6bS1d6XxeYP22OyMN3VP0s= -go.opentelemetry.io/collector/processor v0.103.0/go.mod h1:/mxyh0NpJgpZycm7iHDpM7i5PdtWvKKdCZf0cyADJfU= -go.opentelemetry.io/collector/processor/batchprocessor v0.103.0 h1:vunxXGq5Pzcawj4QbXKrIOoXLHpPbRbwNBFPR80X0R4= -go.opentelemetry.io/collector/processor/batchprocessor v0.103.0/go.mod h1:c5nh1LHVlBFQajCnm/5hwKqAvOLpTTOd2GQyB7lT75E= -go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.103.0 h1:ZwPULpXaIOmY/Niaia2aNyKc40KZE9jorhN+zm6zijw= -go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.103.0/go.mod h1:BvAZflYYV3/FoHjVDKYfeyx5/bIqJDaeAaf/JtDmc8w= -go.opentelemetry.io/collector/receiver v0.103.0 h1:V3JBKkX+7e/NYpDDZVyeu2VQB1/lLFuoJFPfupdCcZs= -go.opentelemetry.io/collector/receiver v0.103.0/go.mod h1:Yybv4ynKFdMOYViWWPMmjkugR89FSQN0P37wP6mX6qM= -go.opentelemetry.io/collector/receiver/nopreceiver v0.103.0 h1:GgeYAKOaHWDm+8JVN63y/0elp1uTOF+XqDQfXWm2i1A= -go.opentelemetry.io/collector/receiver/nopreceiver v0.103.0/go.mod h1:Hwoaia7m3+5qVtZyXb5/qSlFFfDP0Wd0F/2yKC/LFiw= -go.opentelemetry.io/collector/receiver/otlpreceiver v0.103.0 h1:TycVVl4AWioV6kWeFcCIk2QuKfXOzn88yw989opsMdE= -go.opentelemetry.io/collector/receiver/otlpreceiver v0.103.0/go.mod h1:jAbzL5lwOGG93YbcPZ6aFZIZq+tjYQ+BS3vKKT2nRgw= -go.opentelemetry.io/collector/semconv v0.103.0 h1:5tlVoZlo9USHAU2Bz4YrEste0Vm5AMufXkYJhAVve1Q= -go.opentelemetry.io/collector/semconv v0.103.0/go.mod h1:yMVUCNoQPZVq/IPfrHrnntZTWsLf5YGZ7qwKulIl5hw= -go.opentelemetry.io/collector/service v0.103.0 h1:e4Eri4jo+YOuEK0+/JE9SUdT/NZaJ2jz/ROJlmLn96s= -go.opentelemetry.io/collector/service v0.103.0/go.mod h1:p1mlniiC1MuPN5FANYJYgf5V5CGFP0hNqWfI8t7Aw8M= -go.opentelemetry.io/contrib/config v0.7.0 h1:b1rK5tGTuhhPirJiMxOcyQfZs76j2VapY6ODn3b2Dbs= -go.opentelemetry.io/contrib/config v0.7.0/go.mod h1:8tdiFd8N5etOi3XzBmAoMxplEzI3TcL8dU5rM5/xcOQ= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 h1:vS1Ao/R55RNV4O7TA2Qopok8yN+X0LIP6RVWLFkprck= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0/go.mod h1:BMsdeOxN04K0L5FNUBfjFdvwWGNe/rkmSwH4Aelu/X0= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 h1:9l89oX4ba9kHbBol3Xin3leYJ+252h0zszDtBwyKe2A= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0/go.mod h1:XLZfZboOJWHNKUv7eH0inh0E9VV6eWDFB/9yJyTLPp0= -go.opentelemetry.io/contrib/propagators/b3 v1.27.0 h1:IjgxbomVrV9za6bRi8fWCNXENs0co37SZedQilP2hm0= -go.opentelemetry.io/contrib/propagators/b3 v1.27.0/go.mod h1:Dv9obQz25lCisDvvs4dy28UPh974CxkahRDUPsY7y9E= -go.opentelemetry.io/contrib/zpages v0.52.0 h1:MPgkMy0Cp3O5EdfVXP0ss3ujhEibysTM4eszx7E7d+E= -go.opentelemetry.io/contrib/zpages v0.52.0/go.mod h1:fqG5AFdoYru3A3DnhibVuaaEfQV2WKxE7fYE1jgDRwk= -go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= -go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ= -go.opentelemetry.io/otel/bridge/opencensus v1.27.0 h1:ao9aGGHd+G4YfjBpGs6vbkvt5hoC67STlJA9fCnOAcs= -go.opentelemetry.io/otel/bridge/opencensus v1.27.0/go.mod h1:uRvWtAAXzyVOST0WMPX5JHGBaAvBws+2F8PcC5gMnTk= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.27.0 h1:bFgvUr3/O4PHj3VQcFEuYKvRZJX1SJDQ+11JXuSB3/w= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.27.0/go.mod h1:xJntEd2KL6Qdg5lwp97HMLQDVeAhrYxmzFseAMDPQ8I= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.27.0 h1:CIHWikMsN3wO+wq1Tp5VGdVRTcON+DmOJSfDjXypKOc= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.27.0/go.mod h1:TNupZ6cxqyFEpLXAZW7On+mLFL0/g0TE3unIYL91xWc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 h1:R9DE4kQ4k+YtfLI2ULwX82VtNQ2J8yZmA7ZIF/D+7Mc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0/go.mod h1:OQFyQVrDlbe+R7xrEyDr/2Wr67Ol0hRUgsfA+V5A95s= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 h1:QY7/0NeRPKlzusf40ZE4t1VlMKbqSNT7cJRYzWuja0s= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0/go.mod h1:HVkSiDhTM9BoUJU8qE6j2eSWLLXvi1USXjyd2BXT8PY= -go.opentelemetry.io/otel/exporters/prometheus v0.49.0 h1:Er5I1g/YhfYv9Affk9nJLfH/+qCCVVg1f2R9AbJfqDQ= -go.opentelemetry.io/otel/exporters/prometheus v0.49.0/go.mod h1:KfQ1wpjf3zsHjzP149P4LyAwWRupc6c7t1ZJ9eXpKQM= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.27.0 h1:/jlt1Y8gXWiHG9FBx6cJaIC5hYx5Fe64nC8w5Cylt/0= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.27.0/go.mod h1:bmToOGOBZ4hA9ghphIc1PAf66VA8KOtsuy3+ScStG20= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0 h1:/0YaXu3755A/cFbtXp+21lkXgI0QE5avTWA2HjU9/WE= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0/go.mod h1:m7SFxp0/7IxmJPLIY3JhOcU9CoFzDaCPL6xxQIxhA+o= -go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik= -go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak= -go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI= -go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A= -go.opentelemetry.io/otel/sdk/metric v1.27.0 h1:5uGNOlpXi+Hbo/DRoI31BSb1v+OGcpv2NemcCrOL8gI= -go.opentelemetry.io/otel/sdk/metric v1.27.0/go.mod h1:we7jJVrYN2kh3mVBlswtPU22K0SA+769l93J6bsyvqw= -go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw= -go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4= +go.opentelemetry.io/collector/otelcol v0.115.0 h1:wZhFGrSCZcTQ4qw4ePjI2PaSrOCejoQKAjprKD/xavs= +go.opentelemetry.io/collector/otelcol v0.115.0/go.mod h1:iK8DPvaizirIYKDl1zZG7DDYUj6GkkH4KHifVVM88vk= +go.opentelemetry.io/collector/otelcol/otelcoltest v0.115.0 h1:HNlFpQujlnvawBk8nvMGxzjDHWDCfSprxem/EpQn4u8= +go.opentelemetry.io/collector/otelcol/otelcoltest v0.115.0/go.mod h1:WsMbqYl2rm3nPFbdxQqyLXf4iu97nYLeuQ1seZIpV3Y= +go.opentelemetry.io/collector/pdata v1.22.0 h1:3yhjL46NLdTMoP8rkkcE9B0pzjf2973crn0KKhX5UrI= +go.opentelemetry.io/collector/pdata v1.22.0/go.mod h1:nLLf6uDg8Kn5g3WNZwGyu8+kf77SwOqQvMTb5AXEbEY= +go.opentelemetry.io/collector/pdata/pprofile v0.115.0 h1:NI89hy13vNDw7EOnQf7Jtitks4HJFO0SUWznTssmP94= +go.opentelemetry.io/collector/pdata/pprofile v0.115.0/go.mod h1:jGzdNfO0XTtfLjXCL/uCC1livg1LlfR+ix2WE/z3RpQ= +go.opentelemetry.io/collector/pdata/testdata v0.115.0 h1:Rblz+AKXdo3fG626jS+KSd0OSA4uMXcTQfpwed6P8LI= +go.opentelemetry.io/collector/pdata/testdata v0.115.0/go.mod h1:inNnRt6S2Nn260EfCBEcjesjlKOSsr0jPwkPqpBkt4s= +go.opentelemetry.io/collector/pipeline v0.115.0 h1:bmACBqb0e8U9ag+vGGHUP7kCfAO7HHROdtzIEg8ulus= +go.opentelemetry.io/collector/pipeline v0.115.0/go.mod h1:qE3DmoB05AW0C3lmPvdxZqd/H4po84NPzd5MrqgtL74= +go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.115.0 h1:3l9ruCAOrssTUDnyChKNzHWOdTtfThnYaoPZ1/+5sD0= +go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.115.0/go.mod h1:2Myg+law/5lcezo9PhhZ0wjCaLYdGK24s1jDWbSW9VY= +go.opentelemetry.io/collector/processor v0.115.0 h1:+fveHGRe24PZPv/F5taahGuZ9HdNW44hgNWEJhIUdyc= +go.opentelemetry.io/collector/processor v0.115.0/go.mod h1:/oLHBlLsm7tFb7zOIrA5C0j14yBtjXKAgxJJ2Bktyk4= +go.opentelemetry.io/collector/processor/batchprocessor v0.115.0 h1:dgw1jcE/YVFTs41b3Y7SerU3BBSyMEE93AYV+BAxR8E= +go.opentelemetry.io/collector/processor/batchprocessor v0.115.0/go.mod h1:imG1kDEq14UGlxyCjSCf1TUEFdSWRvF7tLoYX9nixEQ= +go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.115.0 h1:LCA2jwxy1PRc7X/AtRJfMdOANh5rVLdwo5PAM+gAuyo= +go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.115.0/go.mod h1:gPcHyza7Rek3jfrQFxw99fcWBDkkRqBaMHcUz9yYv5I= +go.opentelemetry.io/collector/processor/processorhelper/processorhelperprofiles v0.115.0 h1:r1UF8LPICTRXBL0685zV/CC8J4sWg/qm1g+sHOYMq2Y= +go.opentelemetry.io/collector/processor/processorhelper/processorhelperprofiles v0.115.0/go.mod h1:3erq5umu5a7DKXo4PBm4I5yJjc6r0aJNvBV2nVSPDuE= +go.opentelemetry.io/collector/processor/processorprofiles v0.115.0 h1:cCZAs+FXaebZPppqAN3m+X3etoSBL6NvyQo8l0hOZoo= +go.opentelemetry.io/collector/processor/processorprofiles v0.115.0/go.mod h1:kMxF0gknlWX4duuAJFi2/HuIRi6C3w95tOenRa0GKOY= +go.opentelemetry.io/collector/processor/processortest v0.115.0 h1:j9HEaYFOeOB6VYl9zGhBnhQbTkqGBa2udUvu5NTh6hc= +go.opentelemetry.io/collector/processor/processortest v0.115.0/go.mod h1:Gws+VEnp/eW3qAqPpqbKsrbnnxxNfyDjqrfUXbZfZic= +go.opentelemetry.io/collector/receiver v0.115.0 h1:55Q3Jvj6zHCIA1psKqi/3kEMJO4OqUF5tNAEYNdB1U8= +go.opentelemetry.io/collector/receiver v0.115.0/go.mod h1:nBSCh2O/WUcfgpJ+Jpz+B0z0Hn5jHeRvF2WmLij5EIY= +go.opentelemetry.io/collector/receiver/nopreceiver v0.115.0 h1:87dxAcHekbXqLtjcQjnK1An2PWkWAhTly+EXzPEgYOE= +go.opentelemetry.io/collector/receiver/nopreceiver v0.115.0/go.mod h1:Llu88KNSNwvmYPRr2PMDDbVY9zHfHEbPPB4yTjjQQe0= +go.opentelemetry.io/collector/receiver/otlpreceiver v0.115.0 h1:NqMWsGuVy6y6VKTaPeJS7NZ9KAxhE/xyGUC7GaLYm/o= +go.opentelemetry.io/collector/receiver/otlpreceiver v0.115.0/go.mod h1:9ituzngnjsh/YvO+Phayq9BTk/nw0rgK5ZVvX1oxULk= +go.opentelemetry.io/collector/receiver/receiverprofiles v0.115.0 h1:R9JLaj2Al93smIPUkbJshAkb/cY0H5JBOxIx+Zu0NG4= +go.opentelemetry.io/collector/receiver/receiverprofiles v0.115.0/go.mod h1:05E5hGujWeeXJmzKZwTdHyZ/+rRyrQlQB5p5Q2XY39M= +go.opentelemetry.io/collector/receiver/receivertest v0.115.0 h1:OiB684SbHQi6/Pd3ZH0cXjYvCpBS9ilQBfTQx0wVXHg= +go.opentelemetry.io/collector/receiver/receivertest v0.115.0/go.mod h1:Y8Z9U/bz9Xpyt8GI8DxZZgryw3mnnIw+AeKVLTD2cP8= +go.opentelemetry.io/collector/scraper v0.115.0 h1:hbfebO7x1Xm96OwqeuLz5w7QAaB3ZMlwOkUo0XzPadc= +go.opentelemetry.io/collector/scraper v0.115.0/go.mod h1:7YoCO6/4PeExLiX1FokcydJGCQUa7lUqZsqXokJ5VZ4= +go.opentelemetry.io/collector/semconv v0.115.0 h1:SoqMvg4ZEB3mz2EdAb6XYa+TuMo5Mir5FRBr3nVFUDY= +go.opentelemetry.io/collector/semconv v0.115.0/go.mod h1:N6XE8Q0JKgBN2fAhkUQtqK9LT7rEGR6+Wu/Rtbal1iI= +go.opentelemetry.io/collector/service v0.115.0 h1:k4GAOiI5tZgB2QKgwA6c3TeAVr7QL/ft5cOQbzUr8Iw= +go.opentelemetry.io/collector/service v0.115.0/go.mod h1:DKde9LMhNebdREecDSsqiTFLI2wRc+IoV4/wGxU6goY= +go.opentelemetry.io/contrib/bridges/otelzap v0.6.0 h1:j8icMXyyqNf6HGuwlYhniPnVsbJIq7n+WirDu3VAJdQ= +go.opentelemetry.io/contrib/bridges/otelzap v0.6.0/go.mod h1:evIOZpl+kAlU5IsaYX2Siw+IbpacAZvXemVsgt70uvw= +go.opentelemetry.io/contrib/config v0.10.0 h1:2JknAzMaYjxrHkTnZh3eOme/Y2P5eHE2SWfhfV6Xd6c= +go.opentelemetry.io/contrib/config v0.10.0/go.mod h1:aND2M6/KfNkntI5cyvHriR/zvZgPf8j9yETdSmvpfmc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 h1:yMkBS9yViCc7U7yeLzJPM2XizlfdVvBRSmsQDWu6qc0= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0/go.mod h1:n8MR6/liuGB5EmTETUBeU5ZgqMOlqKRxUaqPQBOANZ8= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 h1:UP6IpuHFkUgOQL9FFQFrZ+5LiwhhYRbi7VZSIx6Nj5s= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0/go.mod h1:qxuZLtbq5QDtdeSHsS7bcf6EH6uO6jUAgk764zd3rhM= +go.opentelemetry.io/contrib/propagators/b3 v1.31.0 h1:PQPXYscmwbCp76QDvO4hMngF2j8Bx/OTV86laEl8uqo= +go.opentelemetry.io/contrib/propagators/b3 v1.31.0/go.mod h1:jbqfV8wDdqSDrAYxVpXQnpM0XFMq2FtDesblJ7blOwQ= +go.opentelemetry.io/contrib/zpages v0.56.0 h1:W7vP6s3juzL5KiHpr41zLNmsJ0QAZudYu8ay0zGAoko= +go.opentelemetry.io/contrib/zpages v0.56.0/go.mod h1:IxPRP4TYHw9jLeaEOSDIiA9zmyJNZNO6sbW55iMvSXs= +go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= +go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.7.0 h1:mMOmtYie9Fx6TSVzw4W+NTpvoaS1JWWga37oI1a/4qQ= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.7.0/go.mod h1:yy7nDsMMBUkD+jeekJ36ur5f3jJIrmCwUrY67VFhNpA= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 h1:j7ZSD+5yn+lo3sGV69nW04rRR0jhYnBwjuX3r0HvnK0= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0/go.mod h1:WXbYJTUaZXAbYd8lbgGuvih0yuCfOFC5RJoYnoLcGz8= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0 h1:t/Qur3vKSkUCcDVaSumWF2PKHt85pc7fRvFuoVT8qFU= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0/go.mod h1:Rl61tySSdcOJWoEgYZVtmnKdA0GeKrSqkHC1t+91CH8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 h1:K0XaT3DwHAcV4nKLzcQvwAgSyisUghWoY20I7huthMk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0/go.mod h1:B5Ki776z/MBnVha1Nzwp5arlzBbE3+1jk+pGmaP5HME= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 h1:FFeLy03iVTXP6ffeN2iXrxfGsZGCjVx0/4KlizjyBwU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0/go.mod h1:TMu73/k1CP8nBUpDLc71Wj/Kf7ZS9FK5b53VapRsP9o= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0 h1:lUsI2TYsQw2r1IASwoROaCnjdj2cvC2+Jbxvk6nHnWU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0/go.mod h1:2HpZxxQurfGxJlJDblybejHB6RX6pmExPNe517hREw4= +go.opentelemetry.io/otel/exporters/prometheus v0.54.0 h1:rFwzp68QMgtzu9PgP3jm9XaMICI6TsofWWPcBDKwlsU= +go.opentelemetry.io/otel/exporters/prometheus v0.54.0/go.mod h1:QyjcV9qDP6VeK5qPyKETvNjmaaEc7+gqjh4SS0ZYzDU= +go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.7.0 h1:TwmL3O3fRR80m8EshBrd8YydEZMcUCsZXzOUlnFohwM= +go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.7.0/go.mod h1:tH98dDv5KPmPThswbXA0fr0Lwfs+OhK8HgaCo7PjRrk= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0 h1:SZmDnHcgp3zwlPBS2JX2urGYe/jBKEIT6ZedHRUyCz8= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0/go.mod h1:fdWW0HtZJ7+jNpTKUR0GpMEDP69nR8YBJQxNiVCE3jk= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0 h1:UGZ1QwZWY67Z6BmckTU+9Rxn04m2bD3gD6Mk0OIOCPk= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0/go.mod h1:fcwWuDuaObkkChiDlhEpSq9+X1C0omv+s5mBtToAQ64= +go.opentelemetry.io/otel/log v0.8.0 h1:egZ8vV5atrUWUbnSsHn6vB8R21G2wrKqNiDt3iWertk= +go.opentelemetry.io/otel/log v0.8.0/go.mod h1:M9qvDdUTRCopJcGRKg57+JSQ9LgLBrwwfC32epk5NX8= +go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= +go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= +go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= +go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= +go.opentelemetry.io/otel/sdk/log v0.7.0 h1:dXkeI2S0MLc5g0/AwxTZv6EUEjctiH8aG14Am56NTmQ= +go.opentelemetry.io/otel/sdk/log v0.7.0/go.mod h1:oIRXpW+WD6M8BuGj5rtS0aRu/86cbDV/dAfNaZBIjYM= +go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU= +go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ= +go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= +go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94= -go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A= +go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= +go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= @@ -1723,7 +1812,7 @@ golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= -golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1767,8 +1856,8 @@ golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hM golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1801,7 +1890,6 @@ golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= @@ -1818,7 +1906,7 @@ golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= -golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1827,8 +1915,8 @@ golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= -golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE= +golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1882,7 +1970,6 @@ golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1893,7 +1980,6 @@ golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1921,13 +2007,15 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= @@ -1938,7 +2026,7 @@ golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= -golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1956,6 +2044,7 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1963,8 +2052,9 @@ golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= +golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -2023,8 +2113,8 @@ golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= +golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -2033,8 +2123,8 @@ golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3j golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= -gonum.org/v1/gonum v0.15.0 h1:2lYxjRbTYyxkJxlhC+LvJIx3SsANPdRybu1tGj9/OrQ= -gonum.org/v1/gonum v0.15.0/go.mod h1:xzZVBJBtS+Mz4q0Yl2LJTk+OxOg4jiXZ7qBoM0uISGo= +gonum.org/v1/gonum v0.15.1 h1:FNy7N6OUZVUaWG9pTiD+jlhdQ3lMP+/LcTpJ6+a8sQ0= +gonum.org/v1/gonum v0.15.1/go.mod h1:eZTZuRFrzu5pcyjN5wJhcIhnUdNijYxX1T2IcrOGY0o= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= @@ -2053,8 +2143,8 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.169.0 h1:QwWPy71FgMWqJN/l6jVlFHUa29a7dcUy02I8o799nPY= -google.golang.org/api v0.169.0/go.mod h1:gpNOiMA2tZ4mf5R9Iwf4rK/Dcz0fbdIgWYWVoxmsyLg= +google.golang.org/api v0.199.0 h1:aWUXClp+VFJmqE0JPvpZOK3LDQMyFKYIow4etYd9qxs= +google.golang.org/api v0.199.0/go.mod h1:ohG4qSztDJmZdjK/Ar6MhbAmb/Rpi4JHOqagsh90K28= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -2094,12 +2184,12 @@ google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY= -google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo= -google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 h1:7whR9kGa5LUwFtpLm2ArCEejtnxlGeLbAyjFY8sGNFw= -google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 h1:Zy9XzmMEflZ/MAaA7vNcoebnRAld7FsPW1EeBB7V0m8= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= +google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 h1:BulPr26Jqjnd4eYDVe+YvyR7Yc2vJGkO5/0UxD0/jZU= +google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:hL97c3SYopEHblzpxRL4lSs523++l8DYxGM1FQiYmb4= +google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 h1:M0KvPgPmDZHPlbRbaNU1APr28TvwvvdUPlSv7PUvy8g= +google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:dguCy7UOdZhTvLzDyt15+rOrawrpM4q7DD9dQ1P11P4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:XVhgTWWV3kGQlwJHR3upFWZeTsei6Oks1apkZSeonIE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -2119,8 +2209,8 @@ google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/grpc v1.68.1 h1:oI5oTa11+ng8r8XMMN7jAOmWfPZWbYpCFaMUTACxkM0= +google.golang.org/grpc v1.68.1/go.mod h1:+q1XYFJjShcqn0QZHvCyeR4CXPA+llXIeUIfIe00waw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -2135,8 +2225,8 @@ google.golang.org/protobuf v1.25.1-0.20200805231151-a709e31e5d12/go.mod h1:9JNX7 google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= +google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -2146,6 +2236,8 @@ gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/fatih/pool.v2 v2.0.0 h1:xIFeWtxifuQJGk/IEPKsTduEKcKvPmhoiVDGpC40nKg= gopkg.in/fatih/pool.v2 v2.0.0/go.mod h1:8xVGeu1/2jr2wm5V9SPuMht2H5AEmf5aFMGSQixtjTY= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= @@ -2191,30 +2283,30 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.21.1/go.mod h1:FstGROTmsSHBarKc8bylzXih8BLNYTiS3TZcsoEDg2s= -k8s.io/api v0.30.0 h1:siWhRq7cNjy2iHssOB9SCGNCl2spiF1dO3dABqZ8niA= -k8s.io/api v0.30.0/go.mod h1:OPlaYhoHs8EQ1ql0R/TsUgaRPhpKNxIMrKQfWUp8QSE= +k8s.io/api v0.31.3 h1:umzm5o8lFbdN/hIXbrK9oRpOproJO62CV1zqxXrLgk8= +k8s.io/api v0.31.3/go.mod h1:UJrkIp9pnMOI9K2nlL6vwpxRzzEX5sWgn8kGQe92kCE= k8s.io/apimachinery v0.21.1/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= -k8s.io/apimachinery v0.30.0 h1:qxVPsyDM5XS96NIh9Oj6LavoVFYff/Pon9cZeDIkHHA= -k8s.io/apimachinery v0.30.0/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= +k8s.io/apimachinery v0.31.3 h1:6l0WhcYgasZ/wk9ktLq5vLaoXJJr5ts6lkaQzgeYPq4= +k8s.io/apimachinery v0.31.3/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= k8s.io/client-go v0.21.1/go.mod h1:/kEw4RgW+3xnBGzvp9IWxKSNA+lXn3A7AuH3gdOAzLs= -k8s.io/client-go v0.30.0 h1:sB1AGGlhY/o7KCyCEQ0bPWzYDL0pwOZO4vAtTSh/gJQ= -k8s.io/client-go v0.30.0/go.mod h1:g7li5O5256qe6TYdAMyX/otJqMhIiGgTapdLchhmOaY= +k8s.io/client-go v0.31.3 h1:CAlZuM+PH2cm+86LOBemaJI/lQ5linJ6UFxKX/SoG+4= +k8s.io/client-go v0.31.3/go.mod h1:2CgjPUTpv3fE5dNygAr2NcM8nhHzXvxB8KL5gYc3kJs= k8s.io/code-generator v0.21.1/go.mod h1:hUlps5+9QaTrKx+jiM4rmq7YmH8wPOIko64uZCHDh6Q= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= -k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= -k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= k8s.io/kubelet v0.30.0 h1:/pqHVR2Rn8ExCpn211wL3pMtqRFpcBcJPl4+1INbIMk= k8s.io/kubelet v0.30.0/go.mod h1:WukdKqbQxnj+csn3K8XOKeX7Sh60J/da25IILjvvB5s= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= lukechampine.com/uint128 v1.3.0 h1:cDdUVfRwDUDovz610ABgFD17nXD4/uDgVHl2sC3+sbo= lukechampine.com/uint128 v1.3.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= modernc.org/cc/v3 v3.40.0 h1:P3g79IUS/93SYhtoeaHW+kRCIrYaxJ27MFPv+7kaTOw= @@ -2247,5 +2339,5 @@ sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= -sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/internal/k8sCommon/k8sclient/endpointslicewatcher.go b/internal/k8sCommon/k8sclient/endpointslicewatcher.go new file mode 100644 index 0000000000..764fa6520b --- /dev/null +++ b/internal/k8sCommon/k8sclient/endpointslicewatcher.go @@ -0,0 +1,307 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package k8sclient + +import ( + "fmt" + "sync" + + "go.uber.org/zap" + discv1 "k8s.io/api/discovery/v1" + "k8s.io/client-go/informers" + "k8s.io/client-go/tools/cache" +) + +// EndpointSliceWatcher watches EndpointSlices and builds: +// 1. ip/ip:port -> {"workload", "namespace", "node"} +type EndpointSliceWatcher struct { + logger *zap.Logger + informer cache.SharedIndexInformer + IPToPodMetadata *sync.Map // key: "ip" or "ip:port", val: {"workload", "namespace", "node"} + + // For bookkeeping, so we can remove old mappings upon EndpointSlice deletion + sliceToKeysMap sync.Map // map[sliceUID string] -> []string of keys we inserted, which are "ip" or "ip:port" + deleter Deleter +} + +// PodMetadata holds {"workload", "namespace", "node"} +type PodMetadata struct { + Workload string + Namespace string + Node string +} + +// kvPair holds one mapping from key -> value. The isService flag +// indicates whether this key is for a Service or for an IP/IP:port. +type kvPair struct { + key string // key: "ip" or "ip:port" + value PodMetadata // value: {"workload", "namespace", "node"} +} + +// NewEndpointSliceWatcher creates an EndpointSlice watcher for the new approach (when USE_LIST_POD=false). +func NewEndpointSliceWatcher( + logger *zap.Logger, + factory informers.SharedInformerFactory, + deleter Deleter, +) *EndpointSliceWatcher { + + esInformer := factory.Discovery().V1().EndpointSlices().Informer() + err := esInformer.SetTransform(minimizeEndpointSlice) + if err != nil { + logger.Error("failed to minimize Service objects", zap.Error(err)) + } + + return &EndpointSliceWatcher{ + logger: logger, + informer: esInformer, + IPToPodMetadata: &sync.Map{}, + deleter: deleter, + } +} + +// run starts the endpointSliceWatcher. +func (w *EndpointSliceWatcher) Run(stopCh chan struct{}) { + w.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + w.handleSliceAdd(obj) + }, + UpdateFunc: func(oldObj, newObj interface{}) { + w.handleSliceUpdate(newObj, oldObj) + }, + DeleteFunc: func(obj interface{}) { + w.handleSliceDelete(obj) + }, + }) + go w.informer.Run(stopCh) +} + +func (w *EndpointSliceWatcher) WaitForCacheSync(stopCh chan struct{}) { + if !cache.WaitForNamedCacheSync("endpointSliceWatcher", stopCh, w.informer.HasSynced) { + w.logger.Fatal("timed out waiting for endpointSliceWatcher cache to sync") + } + w.logger.Info("endpointSliceWatcher: Cache synced") +} + +// extractEndpointSliceKeyValuePairs computes the relevant mappings from an EndpointSlice. +// +// It returns a list of kvPair: +// - All IP and IP:port keys -> {"workload", "namespace", "node"} +// +// This function does NOT modify ipToPodMetadata It's purely for computing +// the pairs, so it can be reused by both add and update methods. +func (w *EndpointSliceWatcher) extractEndpointSliceKeyValuePairs(slice *discv1.EndpointSlice) []kvPair { + var pairs []kvPair + svcName := slice.Labels["kubernetes.io/service-name"] + + for _, endpoint := range slice.Endpoints { + if endpoint.TargetRef != nil { + if endpoint.TargetRef.Kind != "Pod" { + continue + } + + podName := endpoint.TargetRef.Name + ns := endpoint.TargetRef.Namespace + + var nodeName string + if endpoint.NodeName != nil { + nodeName = *endpoint.NodeName + } + + w.logger.Debug("Processing endpoint", + zap.String("podName", podName), + zap.String("namespace", ns), + zap.String("nodeName", nodeName), + ) + + derivedWorkload := inferWorkloadName(podName, svcName) + if derivedWorkload == "" { + w.logger.Warn("failed to infer workload name from Pod name", zap.String("podName", podName)) + continue + } + fullWl := + PodMetadata{ + Workload: derivedWorkload, + Namespace: ns, + Node: nodeName, + } + + // Build IP and IP:port pairs + for _, addr := range endpoint.Addresses { + // "ip" -> {"workload", "namespace", "node"} + pairs = append(pairs, kvPair{ + key: addr, + value: fullWl, + }) + + // "ip:port" -> {"workload", "namespace", "node"} for each port + for _, portDef := range slice.Ports { + if portDef.Port != nil { + ipPort := fmt.Sprintf("%s:%d", addr, *portDef.Port) + pairs = append(pairs, kvPair{ + key: ipPort, + value: fullWl, + }) + } + } + } + } + + } + + return pairs +} + +// handleSliceAdd handles a new EndpointSlice that wasn't seen before. +// It computes all keys and directly stores them. Then it records those keys +// in sliceToKeysMap so that we can remove them later upon deletion. +func (w *EndpointSliceWatcher) handleSliceAdd(obj interface{}) { + newSlice := obj.(*discv1.EndpointSlice) + w.logger.Debug("Received EndpointSlice Add", + zap.String("sliceName", newSlice.Name), + zap.String("uid", string(newSlice.UID)), + zap.String("namespace", newSlice.Namespace), + ) + sliceUID := string(newSlice.UID) + + // Compute all key-value pairs for this new slice + pairs := w.extractEndpointSliceKeyValuePairs(newSlice) + w.logger.Debug("Extracted pairs from new slice", + zap.Int("pairsCount", len(pairs)), + ) + + // Insert them into our ipToWorkload / serviceToWorkload, and track the keys. + keys := make([]string, 0, len(pairs)) + for _, kv := range pairs { + w.IPToPodMetadata.Store(kv.key, kv.value) + keys = append(keys, kv.key) + } + + // Save these keys so we can remove them on delete + w.sliceToKeysMap.Store(sliceUID, keys) +} + +// handleSliceUpdate handles an update from oldSlice -> newSlice. +// Instead of blindly removing all old keys and adding new ones, it diffs them: +// - remove only keys that no longer exist, +// - add only new keys that didn't exist before, +// - keep those that haven't changed. +func (w *EndpointSliceWatcher) handleSliceUpdate(oldObj, newObj interface{}) { + oldSlice := oldObj.(*discv1.EndpointSlice) + newSlice := newObj.(*discv1.EndpointSlice) + + w.logger.Debug("Received EndpointSlice Update", + zap.String("oldSliceUID", string(oldSlice.UID)), + zap.String("newSliceUID", string(newSlice.UID)), + zap.String("name", newSlice.Name), + zap.String("namespace", newSlice.Namespace), + ) + + oldUID := string(oldSlice.UID) + newUID := string(newSlice.UID) + + // 1) Fetch old keys from sliceToKeysMap (if present). + var oldKeys []string + if val, ok := w.sliceToKeysMap.Load(oldUID); ok { + oldKeys = val.([]string) + } + + // 2) Compute fresh pairs (and thus keys) from the new slice. + newPairs := w.extractEndpointSliceKeyValuePairs(newSlice) + var newKeys []string + for _, kv := range newPairs { + newKeys = append(newKeys, kv.key) + } + + // Convert oldKeys/newKeys to sets for easy diff + oldKeysSet := make(map[string]struct{}, len(oldKeys)) + for _, k := range oldKeys { + oldKeysSet[k] = struct{}{} + } + newKeysSet := make(map[string]struct{}, len(newKeys)) + for _, k := range newKeys { + newKeysSet[k] = struct{}{} + } + + // 3) For each key in oldKeys that doesn't exist in newKeys, remove it + for k := range oldKeysSet { + if _, stillPresent := newKeysSet[k]; !stillPresent { + w.deleter.DeleteWithDelay(w.IPToPodMetadata, k) + } + } + + // 4) For each key in newKeys that wasn't in oldKeys, we need to store it + // in the appropriate sync.Map. We'll look up the value from newPairs. + for _, kv := range newPairs { + if _, alreadyHad := oldKeysSet[kv.key]; !alreadyHad { + w.IPToPodMetadata.Store(kv.key, kv.value) + } + } + + // 5) Update sliceToKeysMap for the new slice UID + // (Often the UID doesn't change across updates, but we'll handle it properly.) + w.sliceToKeysMap.Delete(oldUID) + w.sliceToKeysMap.Store(newUID, newKeys) + + w.logger.Debug("Finished handling EndpointSlice Update", + zap.String("sliceUID", string(newSlice.UID))) +} + +// handleSliceDelete removes any IP->workload or service->workload keys that were created by this slice. +func (w *EndpointSliceWatcher) handleSliceDelete(obj interface{}) { + slice := obj.(*discv1.EndpointSlice) + w.logger.Debug("Received EndpointSlice Delete", + zap.String("uid", string(slice.UID)), + zap.String("name", slice.Name), + zap.String("namespace", slice.Namespace), + ) + w.removeSliceKeys(slice) +} + +func (w *EndpointSliceWatcher) removeSliceKeys(slice *discv1.EndpointSlice) { + sliceUID := string(slice.UID) + val, ok := w.sliceToKeysMap.Load(sliceUID) + if !ok { + return + } + + keys := val.([]string) + for _, k := range keys { + w.deleter.DeleteWithDelay(w.IPToPodMetadata, k) + } + w.sliceToKeysMap.Delete(sliceUID) +} + +// minimizeEndpointSlice removes fields that are not required by our mapping logic, +// retaining only the minimal set of fields needed (ObjectMeta.Name, Namespace, UID, Labels, +// Endpoints (with their Addresses and TargetRef) and Ports). +func minimizeEndpointSlice(obj interface{}) (interface{}, error) { + eps, ok := obj.(*discv1.EndpointSlice) + if !ok { + return obj, fmt.Errorf("object is not an EndpointSlice") + } + + // Minimize metadata: we only really need Name, Namespace, UID and Labels. + eps.Annotations = nil + eps.ManagedFields = nil + eps.Finalizers = nil + + // The watcher only uses: + // - eps.Labels["kubernetes.io/service-name"] + // - eps.Namespace (from metadata) + // - eps.UID (from metadata) + // - eps.Endpoints: for each endpoint, its Addresses and TargetRef. + // - eps.Ports: each port's Port (and optionally Name/Protocol) + // + // For each endpoint, clear fields that we don’t use. + for i := range eps.Endpoints { + // We only need Addresses and TargetRef. Hostname, and Zone are not used. + eps.Endpoints[i].Hostname = nil + eps.Endpoints[i].Zone = nil + eps.Endpoints[i].DeprecatedTopology = nil + eps.Endpoints[i].Hints = nil + } + + // No transformation is needed for eps.Ports because we use them directly. + return eps, nil +} diff --git a/internal/k8sCommon/k8sclient/endpointslicewatcher_test.go b/internal/k8sCommon/k8sclient/endpointslicewatcher_test.go new file mode 100644 index 0000000000..05ef32bea1 --- /dev/null +++ b/internal/k8sCommon/k8sclient/endpointslicewatcher_test.go @@ -0,0 +1,292 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package k8sclient + +import ( + "fmt" + "reflect" + "sort" + "sync" + "testing" + + "github.com/stretchr/testify/assert" + "go.uber.org/zap" + v1 "k8s.io/api/core/v1" + discv1 "k8s.io/api/discovery/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +// MockDeleter deletes a key immediately, useful for testing. +type MockDeleter struct{} + +func (md *MockDeleter) DeleteWithDelay(m *sync.Map, key interface{}) { + m.Delete(key) +} + +var mockDeleter = &MockDeleter{} + +func newEndpointSliceWatcherForTest() *EndpointSliceWatcher { + return &EndpointSliceWatcher{ + logger: zap.NewNop(), + IPToPodMetadata: &sync.Map{}, + deleter: mockDeleter, + } +} + +// createTestEndpointSlice is a helper to build a minimal EndpointSlice. +// The slice will have one Endpoint (with its TargetRef) and a list of Ports. +// svcName is stored in the Labels (key "kubernetes.io/service-name") if non-empty. +func createTestEndpointSlice(uid, namespace, svcName, podName string, addresses []string, portNumbers []int32, nodeName *string) *discv1.EndpointSlice { + // Build the port list. + var ports []discv1.EndpointPort + for i, p := range portNumbers { + portVal := p + name := fmt.Sprintf("port-%d", i) + protocol := v1.ProtocolTCP + ports = append(ports, discv1.EndpointPort{ + Name: &name, + Protocol: &protocol, + Port: &portVal, + }) + } + + // Build a single endpoint with the given addresses and a TargetRef. + endpoint := discv1.Endpoint{ + Addresses: addresses, + TargetRef: &v1.ObjectReference{ + Kind: "Pod", + Name: podName, + Namespace: namespace, + }, + } + if nodeName != nil { + endpoint.NodeName = nodeName + } + + labels := map[string]string{} + if svcName != "" { + labels["kubernetes.io/service-name"] = svcName + } + + return &discv1.EndpointSlice{ + ObjectMeta: metav1.ObjectMeta{ + UID: types.UID(uid), + Namespace: namespace, + Labels: labels, + }, + Endpoints: []discv1.Endpoint{endpoint}, + Ports: ports, + } +} + +// --- Tests --- + +// TestEndpointSliceAddition verifies that when a new EndpointSlice is added, +// the appropriate IP/IP:port -> PodMetadata entries are inserted into IPToPodMetadata. +func TestEndpointSliceAddition(t *testing.T) { + watcher := newEndpointSliceWatcherForTest() + + // Create a test EndpointSlice: + // - UID: "uid-1", Namespace: "testns" + // - Labels: "kubernetes.io/service-name" = "mysvc" + // - One Endpoint with TargetRef.Kind = "Pod", Name "workload-69dww", Namespace "testns" + // - Endpoint.Addresses: ["1.2.3.4"] + // - One Port with value 80 + slice := createTestEndpointSlice("uid-1", "testns", "mysvc", "workload-69dww", []string{"1.2.3.4"}, []int32{80}, nil) + + // Call the add handler. + watcher.handleSliceAdd(slice) + + // The code calls inferWorkloadName(podName, svcName) to get the Workload name. + // For "workload-69dww", it typically infers "workload". + // So expected PodMetadata is: + expectedMeta := PodMetadata{ + Workload: "workload", + Namespace: "testns", + Node: "", + } + + // We expect these keys to be present in IPToPodMetadata: + // - "1.2.3.4" + // - "1.2.3.4:80" + var expectedKeys = []string{"1.2.3.4", "1.2.3.4:80"} + + for _, key := range expectedKeys { + val, ok := watcher.IPToPodMetadata.Load(key) + assert.True(t, ok, "expected IPToPodMetadata key %s", key) + assert.Equal(t, expectedMeta, val, "IPToPodMetadata[%s] mismatch", key) + } + + // Verify that sliceToKeysMap recorded all keys. + val, ok := watcher.sliceToKeysMap.Load(string(slice.UID)) + assert.True(t, ok, "expected sliceToKeysMap to contain UID %s", slice.UID) + storedKeys := val.([]string) + sort.Strings(storedKeys) + sort.Strings(expectedKeys) + assert.Equal(t, expectedKeys, storedKeys, "sliceToKeysMap keys mismatch") +} + +// TestEndpointSliceDeletion verifies that when an EndpointSlice is deleted, +// all keys that were added get removed from IPToPodMetadata. +func TestEndpointSliceDeletion(t *testing.T) { + watcher := newEndpointSliceWatcherForTest() + + // Create a test EndpointSlice (similar to the addition test). + slice := createTestEndpointSlice("uid-1", "testns", "mysvc", "workload-76977669dc-lwx64", + []string{"1.2.3.4"}, []int32{80}, nil) + watcher.handleSliceAdd(slice) + + // Now call deletion. + watcher.handleSliceDelete(slice) + + // Verify that the keys are removed from IPToPodMetadata. + removedKeys := []string{"1.2.3.4", "1.2.3.4:80"} + for _, key := range removedKeys { + _, ok := watcher.IPToPodMetadata.Load(key) + assert.False(t, ok, "expected IPToPodMetadata key %s to be deleted", key) + } + + // Also verify that sliceToKeysMap no longer contains an entry. + _, ok := watcher.sliceToKeysMap.Load(string(slice.UID)) + assert.False(t, ok, "expected sliceToKeysMap entry for UID %s to be deleted", slice.UID) +} + +// TestEndpointSliceUpdate verifies that on updates, keys are added and/or removed as appropriate. +func TestEndpointSliceUpdate(t *testing.T) { + // --- Subtest: Complete change (no overlap) --- + t.Run("complete change", func(t *testing.T) { + watcher := newEndpointSliceWatcherForTest() + + // Old slice: + // UID "uid-2", Namespace "testns", svc label "mysvc", + // One endpoint with Pod name "workload-75d9d5968d-fx8px", addresses ["1.2.3.4"], port 80. + oldSlice := createTestEndpointSlice("uid-2", "testns", "mysvc", "workload-75d9d5968d-fx8px", + []string{"1.2.3.4"}, []int32{80}, nil) + watcher.handleSliceAdd(oldSlice) + + // New slice: same UID, but label changed to "othersvc" + // and a different endpoint: Pod "workload-6d9b7f8597-wbvxn", addresses ["1.2.3.5"], port 443. + newSlice := createTestEndpointSlice("uid-2", "testns", "othersvc", "workload-6d9b7f8597-wbvxn", + []string{"1.2.3.5"}, []int32{443}, nil) + + // Call update handler. + watcher.handleSliceUpdate(oldSlice, newSlice) + + // Old keys that should be removed: + // "1.2.3.4" and "1.2.3.4:80" + removedKeys := []string{"1.2.3.4", "1.2.3.4:80"} + for _, key := range removedKeys { + _, ok := watcher.IPToPodMetadata.Load(key) + assert.False(t, ok, "expected IPToPodMetadata key %s to be removed", key) + } + + // New keys that should be added: + // "1.2.3.5", "1.2.3.5:443" + // The derived workload name is "workload" (from "workload-6d9b7f8597-wbvxn"), namespace "testns". + expectedMeta := PodMetadata{ + Workload: "workload", + Namespace: "testns", + Node: "", + } + addedKeys := []string{"1.2.3.5", "1.2.3.5:443"} + for _, key := range addedKeys { + val, ok := watcher.IPToPodMetadata.Load(key) + assert.True(t, ok, "expected IPToPodMetadata key %s to be added", key) + assert.Equal(t, expectedMeta, val, "value for key %s mismatch", key) + } + + // Check that sliceToKeysMap now contains exactly the new keys. + val, ok := watcher.sliceToKeysMap.Load(string(newSlice.UID)) + assert.True(t, ok, "expected sliceToKeysMap entry for UID %s", newSlice.UID) + gotKeys := val.([]string) + sort.Strings(gotKeys) + sort.Strings(addedKeys) + assert.True(t, reflect.DeepEqual(addedKeys, gotKeys), + "sliceToKeysMap keys mismatch, got: %v, want: %v", gotKeys, addedKeys) + }) + + // --- Subtest: Partial overlap --- + t.Run("partial overlap", func(t *testing.T) { + watcher := newEndpointSliceWatcherForTest() + + // Old slice: + // UID "uid-3", namespace "testns", label "mysvc", + // 1 endpoint: Pod "workload-6d9b7f8597-b5l2j", addresses ["1.2.3.4"], port 80. + oldSlice := createTestEndpointSlice("uid-3", "testns", "mysvc", "workload-6d9b7f8597-b5l2j", + []string{"1.2.3.4"}, []int32{80}, nil) + watcher.handleSliceAdd(oldSlice) + + // New slice: same UID, same label "mysvc", + // but now 2 endpoints: + // - Pod "workload-6d9b7f8597-b5l2j", addresses ["1.2.3.4"], port 80 (same as old) + // - Pod "workload-6d9b7f8597-fx8px", addresses ["1.2.3.5"], port 80 (new) + newSlice := &discv1.EndpointSlice{ + ObjectMeta: metav1.ObjectMeta{ + UID: "uid-3", + Namespace: "testns", + Labels: map[string]string{ + "kubernetes.io/service-name": "mysvc", + }, + }, + Endpoints: []discv1.Endpoint{ + { + Addresses: []string{"1.2.3.4"}, + TargetRef: &v1.ObjectReference{ + Kind: "Pod", + Name: "workload-6d9b7f8597-b5l2j", + Namespace: "testns", + }, + }, + { + Addresses: []string{"1.2.3.5"}, + TargetRef: &v1.ObjectReference{ + Kind: "Pod", + Name: "workload-6d9b7f8597-fx8px", + Namespace: "testns", + }, + }, + }, + Ports: []discv1.EndpointPort{ + { + Name: func() *string { s := "port-0"; return &s }(), + Protocol: func() *v1.Protocol { p := v1.ProtocolTCP; return &p }(), + Port: func() *int32 { p := int32(80); return &p }(), + }, + }, + } + + watcher.handleSliceUpdate(oldSlice, newSlice) + + // We expect: + // - "1.2.3.4" & "1.2.3.4:80" remain + // - "1.2.3.5" & "1.2.3.5:80" get added + expectedMeta := PodMetadata{ + Workload: "workload", // from "workload-6d9b7f8597-..." + Namespace: "testns", + Node: "", + } + + expectedKeys := []string{ + "1.2.3.4", + "1.2.3.4:80", + "1.2.3.5", + "1.2.3.5:80", + } + for _, key := range expectedKeys { + val, ok := watcher.IPToPodMetadata.Load(key) + assert.True(t, ok, "expected IPToPodMetadata key %s", key) + assert.Equal(t, expectedMeta, val, "IPToPodMetadata[%s] mismatch", key) + } + + // Check that sliceToKeysMap has the union of all keys. + val, ok := watcher.sliceToKeysMap.Load("uid-3") + assert.True(t, ok, "expected sliceToKeysMap entry for uid-3") + gotKeys := val.([]string) + sort.Strings(gotKeys) + sort.Strings(expectedKeys) + assert.True(t, reflect.DeepEqual(expectedKeys, gotKeys), + "sliceToKeysMap keys mismatch, got: %v, want: %v", gotKeys, expectedKeys) + }) +} diff --git a/internal/k8sCommon/k8sclient/kubernetes_utils.go b/internal/k8sCommon/k8sclient/kubernetes_utils.go new file mode 100644 index 0000000000..fffd3faf58 --- /dev/null +++ b/internal/k8sCommon/k8sclient/kubernetes_utils.go @@ -0,0 +1,120 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package k8sclient + +import ( + "fmt" + "regexp" + "sync" + "time" +) + +const ( + // kubeAllowedStringAlphaNums holds the characters allowed in replicaset names from as parent deployment + // https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/util/rand/rand.go#L121 + kubeAllowedStringAlphaNums = "bcdfghjklmnpqrstvwxz2456789" +) + +var ( + // ReplicaSet name = Deployment name + "-" + up to 10 alphanumeric characters string, if the ReplicaSet was created through a deployment + // The suffix string of the ReplicaSet name is an int32 number (0 to 4,294,967,295) that is cast to a string and then + // mapped to an alphanumeric value with only the following characters allowed: "bcdfghjklmnpqrstvwxz2456789". + // The suffix string length is therefore nondeterministic. The regex accepts a suffix of length 6-10 to account for + // ReplicaSets not managed by deployments that may have similar names. + // Suffix Generation: https://github.com/kubernetes/kubernetes/blob/master/pkg/controller/controller_utils.go#L1201 + // Alphanumeric Mapping: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/util/rand/rand.go#L121) + replicaSetWithDeploymentNamePattern = fmt.Sprintf(`^(.+)-[%s]{6,10}$`, kubeAllowedStringAlphaNums) + deploymentFromReplicaSetPattern = regexp.MustCompile(replicaSetWithDeploymentNamePattern) + // if a pod is launched directly by a replicaSet or daemonSet (with a given name by users), its name has the following pattern: + // Pod name = ReplicaSet name + 5 alphanumeric characters long string + // some code reference for daemon set: + // 1. daemonset uses the strategy to create pods: https://github.com/kubernetes/kubernetes/blob/82e3a671e79d1740ab9a3b3fac8a3bb7d065a6fb/pkg/registry/apps/daemonset/strategy.go#L46 + // 2. the strategy uses SimpleNameGenerator to create names: https://github.com/kubernetes/kubernetes/blob/82e3a671e79d1740ab9a3b3fac8a3bb7d065a6fb/staging/src/k8s.io/apiserver/pkg/storage/names/generate.go#L53 + // 3. the random name generator only use non vowels char + numbers: https://github.com/kubernetes/kubernetes/blob/82e3a671e79d1740ab9a3b3fac8a3bb7d065a6fb/staging/src/k8s.io/apimachinery/pkg/util/rand/rand.go#L83 + podWithSuffixPattern = fmt.Sprintf(`^(.+)-[%s]{5}$`, kubeAllowedStringAlphaNums) + replicaSetOrDaemonSetFromPodPattern = regexp.MustCompile(podWithSuffixPattern) + + // Pattern for StatefulSet: - + reStatefulSet = regexp.MustCompile(`^(.+)-(\d+)$`) +) + +// InferWorkloadName tries to parse the given podName to find the top-level workload name. +// +// 1) If it matches -, return . +// 2) If it matches -<5charSuffix>: +// - If is -<6–10charSuffix>, return . +// - Else return (likely a bare ReplicaSet or DaemonSet). +// +// 3) If no pattern matches, return the original podName. +// +// Caveat: You can't reliably distinguish DaemonSet vs. bare ReplicaSet by name alone. +// In some edge cases when the deployment name is longer than 47 characters, The regex pattern is +// not reliable. See reference: +// - https://pauldally.medium.com/why-you-try-to-keep-your-deployment-names-to-47-characters-or-less-1f93a848d34c +// - https://github.com/kubernetes/kubernetes/issues/116447#issuecomment-1530652258 +// +// For that, we fall back to use service name as last defense. +func inferWorkloadName(podName, fallbackServiceName string) string { + // 1) Check if it's a StatefulSet pod: - + if matches := reStatefulSet.FindStringSubmatch(podName); matches != nil { + return matches[1] // e.g. "mysql-0" => "mysql" + } + + // 2) Check if it's a Pod with a 5-char random suffix: -<5Chars> + if matches := replicaSetOrDaemonSetFromPodPattern.FindStringSubmatch(podName); matches != nil { + parentName := matches[1] + + // If parentName ends with 6–10 random chars, that parent is a Deployment-based ReplicaSet. + // So the top-level workload is the first part before that suffix. + if rsMatches := deploymentFromReplicaSetPattern.FindStringSubmatch(parentName); rsMatches != nil { + return rsMatches[1] // e.g. "nginx-a2b3c4" => "nginx" + } + + // Otherwise, it's a "bare" ReplicaSet or DaemonSet—just return parentName. + return parentName + } + + // 3) If none of the patterns matched, return the service name as fallback + if fallbackServiceName != "" { + return fallbackServiceName + } + + // 4) Finally return the full pod name (I don't think this will happen) + return podName +} + +// a safe channel which can be closed multiple times +type SafeChannel struct { + sync.Mutex + + Ch chan struct{} + Closed bool +} + +func (sc *SafeChannel) Close() { + sc.Lock() + defer sc.Unlock() + + if !sc.Closed { + close(sc.Ch) + sc.Closed = true + } +} + +// Deleter represents a type that can delete a key from a map after a certain delay. +type Deleter interface { + DeleteWithDelay(m *sync.Map, key interface{}) +} + +// TimedDeleter deletes a key after a specified delay. +type TimedDeleter struct { + Delay time.Duration +} + +func (td *TimedDeleter) DeleteWithDelay(m *sync.Map, key interface{}) { + go func() { + time.Sleep(td.Delay) + m.Delete(key) + }() +} diff --git a/internal/k8sCommon/k8sclient/kubernetes_utils_test.go b/internal/k8sCommon/k8sclient/kubernetes_utils_test.go new file mode 100644 index 0000000000..21f3b0f3d5 --- /dev/null +++ b/internal/k8sCommon/k8sclient/kubernetes_utils_test.go @@ -0,0 +1,82 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package k8sclient + +import ( + "testing" +) + +func TestInferWorkloadName(t *testing.T) { + testCases := []struct { + name string + podName string + service string + expected string + }{ + { + name: "StatefulSet single digit", + podName: "mysql-0", + service: "fallback-service", + expected: "mysql", + }, + { + name: "StatefulSet multiple digits", + podName: "mysql-10", + service: "fallback-service", + expected: "mysql", + }, + { + name: "ReplicaSet or DaemonSet bare pod (5 char suffix)", + podName: "nginx-b2dfg", + service: "fallback-service", + expected: "nginx", + }, + { + name: "Deployment-based ReplicaSet pod (two-level suffix)", + podName: "nginx-76977669dc-lwx64", + service: "fallback-service", + expected: "nginx", + }, + { + name: "Non matching, fallback to service", + podName: "simplepod", + service: "my-service", + expected: "my-service", + }, + { + name: "ReplicaSet name with some numeric part, still 5 char suffix", + podName: "nginx-123-d9stt", + service: "my-service", + expected: "nginx-123", + }, + { + name: "Confusing case but still matches a deployment-based RS suffix", + podName: "nginx-245678-d9stt", + service: "nginx-service", + expected: "nginx", + }, + { + name: "Confusing case not matching any known pattern, fallback to service if none matched fully", + podName: "nginx-123456-d9stt", + service: "nginx-service", + expected: "nginx-123456", + }, + { + name: "Empty Pod name, fallback to service", + podName: "", + service: "service", + expected: "service", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + got := inferWorkloadName(tc.podName, tc.service) + if got != tc.expected { + t.Errorf("inferWorkloadName(%q, %q) = %q; want %q", + tc.podName, tc.service, got, tc.expected) + } + }) + } +} diff --git a/internal/tls/testdata/.gitignore b/internal/tls/testdata/.gitignore new file mode 100644 index 0000000000..d472f47f0a --- /dev/null +++ b/internal/tls/testdata/.gitignore @@ -0,0 +1,4 @@ +# ignore autogenerated files +server.crt +server.key +tls-ca.crt \ No newline at end of file diff --git a/internal/tls/testdata/server.crt b/internal/tls/testdata/server.crt deleted file mode 100644 index 12424f7b27..0000000000 --- a/internal/tls/testdata/server.crt +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIEGDCCAgCgAwIBAgIQDfInHXLoKYcZoMZe0q/N9TANBgkqhkiG9w0BAQsFADAS -MRAwDgYDVQQKEwdSb290IENBMB4XDTI0MTAyMjIyMzYzNloXDTI0MTAyMjIzMzYz -NlowFTETMBEGA1UEChMKS3ViZXJuZXRlczCCASIwDQYJKoZIhvcNAQEBBQADggEP -ADCCAQoCggEBAP3MeHLv7sragkzD8iOj75YCJvOoys4Iy+EVwZhLAdtx+K89IOJr -6EKknoI0/FZowg5xuz4sE3sK8uQVAjtN0u4Mu6oQm94uSB5RxGvkBV6vn+3JxUdC -+fj+KiTg0x+pEoDxVXSrL3gF2ZtvfNdC05+FCk39pdEPe5tbnh+IPtcXSqWmtWEB -LiHPhSU0HN5JWsfQZ2VkB8rFStQ8CwFG0DW9i6GSVsN1zmmzLQdVPAyP4Uzy8844 -/ceZsmlkIe6uk3BiRRNThUcJKlFJJroCBJ8y7AJA8s3teLWskRLik+0xintked6z -fMaQRgzOSPDc062QTODHB2IkShVZAeLh7OcCAwEAAaNnMGUwDgYDVR0PAQH/BAQD -AgWgMBMGA1UdJQQMMAoGCCsGAQUFBwMBMAwGA1UdEwEB/wQCMAAwHwYDVR0jBBgw -FoAUhscCtqKglSlRCx9YNN1D/Xz5MpMwDwYDVR0RBAgwBocEfwAAATANBgkqhkiG -9w0BAQsFAAOCAgEAcxCGmWNtuM9V2yi0SOSLfxlSK7F1OS5qQuLWkZSDB1f+XG82 -PNDwSrV4J42qZcFfKrEt1HtS/Ws6VRlLrSnYArNSyQvNTeNf0q/rX9yO8wRcCM5K -sb8uv27xdawjN6F3z59rpJOf9ldX4ASrRZHzX/ttvvNcUdbCZsF5+h2EhcwNVVUa -Q6vlAW8u4Ik9HXxJ+W2HEdlSYWcCdTK4hDCtJhwqEBua1VdKwSdrlDZJPswAdCzv -CR3Fj4NsmPRDw8uCPIL4hwk5fbffcn3rZrsOXSMTKxvdyP+XBPKWEoIs6cxvdE3o -sG65EZeGpj0vLN+rLEbumkvgCAHACgVxvyDeJnajoA6lPNxfzby+9CmhLZfgaVbE -nVQTj8hkUW1gtvnAP3v1Zbe3QMhlqWqwfwD5MgtdQZQ1IwAojaHwt105amQLxESe -vxAQdabKNkW/HUOjKmxz0XtadCLAwPBhP9/j6rQrDVH62zjHmEOjU/WsJQflgLfh -VJ/oIhLTDdGHhamJabpBr6RroxJVXo+vjfUf5LUc5vw54emx7PWOW4opuL3cE6J/ -URmA9CJX4wBeYyEHHoHFlPD9vV5GxOFQ57UimQK/bNl5jh1zxPPAY0j5OwReJRGH -1mrzt6pfGQeYFHMXbkFA5HU5P7Q5VF0cDCef9s1bxglSiMsEDaLOOyKvJ/w= ------END CERTIFICATE----- diff --git a/internal/tls/testdata/server.key b/internal/tls/testdata/server.key deleted file mode 100644 index 0efcb40bb3..0000000000 --- a/internal/tls/testdata/server.key +++ /dev/null @@ -1,28 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQD9zHhy7+7K2oJM -w/Ijo++WAibzqMrOCMvhFcGYSwHbcfivPSDia+hCpJ6CNPxWaMIOcbs+LBN7CvLk -FQI7TdLuDLuqEJveLkgeUcRr5AVer5/tycVHQvn4/iok4NMfqRKA8VV0qy94Bdmb -b3zXQtOfhQpN/aXRD3ubW54fiD7XF0qlprVhAS4hz4UlNBzeSVrH0GdlZAfKxUrU -PAsBRtA1vYuhklbDdc5psy0HVTwMj+FM8vPOOP3HmbJpZCHurpNwYkUTU4VHCSpR -SSa6AgSfMuwCQPLN7Xi1rJES4pPtMYp7ZHnes3zGkEYMzkjw3NOtkEzgxwdiJEoV -WQHi4eznAgMBAAECggEAGv7z1O32jXc+ouG40NewNVmXQRW0NMQ0w4Vn6UYZNXlj -BWjQJaVquCQAhEMUkDBma9jnHM7dZ5obie0+Joa5p/6Mu6M2oSR1IVx7Myq284Jk -1Ys/w7u5ESYf33pWmqiGQlbpSxamXvLoWaM7OT5veilRlkgjqiAmerj9EceRP5mR -aheVE5ctY5oZmdnvA9OcN6B8Oxk59EWEOECs8qAF/ChYfC8pWKOp4U0RILQ/9jXD -lu4p0C2XyadrJReer3whudUADWa/WSxgVCrx85/g/RTKs38KTPy7W4g3bv8KBRhc -bjLzoYTl6esRAi8nMG5Fc/85t07hkVv+j5a1NccxgQKBgQD/y/tdH90C8CrAn9y5 -mgr+1QcqJeJWaBpNf+yBqtLbybl6uLY2OzbZtzllU+TDUVlKZB36t2GnG6EIRUcd -WTHUVq8zEIRtDBa12SZlJ08FrNFyKfBByehEOvMazTxb7rdGbiv5+XztCrcPUn1f -cu30v5nBRjqDxtiAbKccFcZPpwKBgQD+ABUQ0h3aX5qDqzhvSLNuqy+hOZPLsqva -VxsDsIlzzzGwmdf+m2Dn9JR/xJNMPf717LJs8IYIqW4KX/HW0pC3Y8fu/6h4fFM9 -ZpJkM2hjRu0uLxDWLc+AombMavFtkZ1tffKxv6mG7Ud163ulBIt24b701XfhR5UX -RWvWI50gwQKBgQCFa1i9luwJJ0m1VOyk5kML7gMhqcbneL8XYzzx2S7IMuyKpSNt -H++ZGWdXga2Vbq3bDmNQrSvDJLcWgEP6e9ZwwZH6WYgo9KA3036iTiF6fUx1doh5 -WB3M0M6SUTBFZzqzAq3vYYEWhns7A7Se/2w8N1+0HrRQnXu5aHK1RGo+iwKBgEST -hRx7fi/dK/xsl9oDyN4SPdPLlcmjPZ6/cb23RgUMZaAGiThmfu1hLU6pphMpkdKX -yzx6W9Wu2NTYPpT/WK8Ks4olYDjXaCnlrZR8BKz5E0Qq1OLej21ta0+5d+FbNSPA -o2u2EXEqUubVYxaUeYrpPAMiNzGNgAU+avTvvJaBAoGBAKely84irwZicpex79T/ -KYV3VOMYs6wETNSLdKkR9G2WEI4OsEw0qGm9pZXzS6lKOSXxW6fOzwI0/vhnmT4O -hBxeR+PriPFqD80ASXeVeVOEjMXQEjZ8Po+FfUvVu8zMNIPDwJ+hy94FE5kkDBte -vy6FnjMquTvekIsRwo/OOdz3 ------END PRIVATE KEY----- diff --git a/internal/tls/testdata/tls-ca.crt b/internal/tls/testdata/tls-ca.crt deleted file mode 100644 index 4b861f4f40..0000000000 --- a/internal/tls/testdata/tls-ca.crt +++ /dev/null @@ -1,29 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIE9jCCAt6gAwIBAgIBATANBgkqhkiG9w0BAQsFADASMRAwDgYDVQQKEwdSb290 -IENBMB4XDTI0MTAyMjIyMzYzOFoXDTM0MTAyMjIyMzYzOFowEjEQMA4GA1UEChMH -Um9vdCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAJrhBEDtGS+b -WAVvkFJuQAkDbCCToy39eJ+51ZS/wA1yuVde8JvtX95dZC0STNm7GnxfIwH8tOut -gc82vz2Bim2K1N9uzBDu6flGWXpzXZSSuMAj1q8MRhEs1OpfbLuqxMs/TAbGYGqa -6FPlUWCwMZvpiRpV+hGxRIp9OsAYn/oVUvuACXnADEwBUYnGL3c9FYPn+kkjpsfH -vaH5kY8uTpKNIbmerYBCIt7X0QXcLOxk2CdnapKuIjaTML8og1/rMPbRsnGvIebh -FZsA60QnhuNiL6MowdJn17/Stl6Rs7cNV5zq3/WmaiipOoTcrKIMgA5ci23A69Pz -0WRfomoFsstRWHhimGrKrT5ewznEitvnWWxfiblmy21LTTJf6nrf7cD1B7Xlrfq+ -BORuHwSxyTapXw0HKSYPwjiSiijywWInS3QR3b5uvpyFy8rGfXI2PSTpnAsK/nws -38jUO7qPsj3AnwHpgLZe0XpGFqmemSmlPif0VT3Pn6CwsCpnEf7vFNYu+rmVjrqT -sBXv7qJMryXL4MDQcTsrX0+XDVWGlKrhVemPI94go86IuqASx7BldMounk9Pra70 -oRUZXsEbzBMGOyF/U3ZmCyJSryV4S+tkUckb/VInpmex1pkx15Q3EGmpRwTQ4/M/ -hGz0foplN1HGRQsVxuPt58Wj7/EW6BDrAgMBAAGjVzBVMA4GA1UdDwEB/wQEAwIC -hDATBgNVHSUEDDAKBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW -BBSWf0MJGMR6k7iAcZMQBOIFr+DoGjANBgkqhkiG9w0BAQsFAAOCAgEAHQib14b2 -y1b9Xa+lvvpyho887AF5LJMvcAaFA2qrZTgsct9YAUyP4jwHSMa82F34GF5zWm96 -wKGa2V7rykKPLtL2o0ilfnz3bJndL2MUjgVqK7GzkOGIhhXreG6/4WZ+oNReMzjw -uZ6zWKhUrTKfvt5J70Nzk+aPrwEzOOcb078QfrFvxElrkoyBz5LGF6HVdIVYUO94 -1DW1egaduIZCcmavO/CJ5QvzXMyiHkpuT0SoltvVFskoP9aS0OxnaQIrmNx+vajT -9zd/WMroxBh+z4Y6sQmy6zZF+rAPItgZNHncMt8AJBGUlAhjgtiP37v/+w5zKpOD -nXPKVf9mOPNdIDUBpzWxRXwkE1GI8kFjQsuiBD0RGNj1Db/m9lnxOX2B1vKOHRtg -Q3LQURMd3oNj2L4dnJG4SbfXi6CdxRPPtvAb2TJh8qIsL7WxDRey1uhmQWknYXso -kiQ1VgEBVwOXR3NGqlDboWYma/wDOtbaMSd0dl2m2PP7FXJNakUmrATNdwrWjuto -onSHblkkDI5NvA+xUPXm4n7os3DHl6upvToVRTIDaWDBJLGyWEVw87y8+VPR0l5g -XpYDatVLlgC/Nz3Ggn3vepGPV25d7COORXI1EXfjn2PUx7lEK0MbaPfkqPC43Q1g -qubuxdWE+FRLgfs3Ywv1JckNAec7Jg0FA2w= ------END CERTIFICATE----- diff --git a/plugins/inputs/logfile/README.md b/plugins/inputs/logfile/README.md index e514ce75c4..8e2af5f67c 100644 --- a/plugins/inputs/logfile/README.md +++ b/plugins/inputs/logfile/README.md @@ -47,6 +47,7 @@ The plugin expects messages in one of the timestamp_regex = "^(\\d{2} \\w{3} \\d{4} \\d{2}:\\d{2}:\\d{2}).*$" timestamp_layout = ["_2 Jan 2006 15:04:05"] timezone = "UTC" + trim_timestamp = false multi_line_start_pattern = "{timestamp_regex}" ## Read file from beginning. from_beginning = false @@ -65,6 +66,7 @@ The plugin expects messages in one of the timestamp_regex = "^(\\d{2} \\w{3} \\d{4} \\d{2}:\\d{2}:\\d{2}).*$" timestamp_layout = ["_2 Jan 2006 15:04:05"] timezone = "UTC" + trim_timestamp = true multi_line_start_pattern = "{timestamp_regex}" ## Read file from beginning. from_beginning = false diff --git a/plugins/inputs/logfile/fileconfig.go b/plugins/inputs/logfile/fileconfig.go index 1f41abe033..fe0b8d8d17 100644 --- a/plugins/inputs/logfile/fileconfig.go +++ b/plugins/inputs/logfile/fileconfig.go @@ -48,6 +48,8 @@ type FileConfig struct { TimestampLayout []string `toml:"timestamp_layout"` //The time zone used to parse the timestampFromLogLine in the log entry. Timezone string `toml:"timezone"` + //Trim timestamp from log line + TrimTimestamp bool `toml:"trim_timestamp"` //Indicate whether it is a start of multiline. //If this config is not present, it means the multiline mode is disabled. @@ -171,9 +173,9 @@ func (config *FileConfig) init() error { // Try to parse the timestampFromLogLine value from the log entry line. // The parser logic will be based on the timestampFromLogLine regex, and time zone info. // If the parsing operation encounters any issue, int64(0) is returned. -func (config *FileConfig) timestampFromLogLine(logValue string) time.Time { +func (config *FileConfig) timestampFromLogLine(logValue string) (time.Time, string) { if config.TimestampRegexP == nil { - return time.Time{} + return time.Time{}, logValue } index := config.TimestampRegexP.FindStringSubmatchIndex(logValue) if len(index) > 3 { @@ -196,7 +198,7 @@ func (config *FileConfig) timestampFromLogLine(logValue string) time.Time { } if err != nil { log.Printf("E! Error parsing timestampFromLogLine: %s", err) - return time.Time{} + return time.Time{}, logValue } if timestamp.Year() == 0 { now := time.Now() @@ -208,9 +210,14 @@ func (config *FileConfig) timestampFromLogLine(logValue string) time.Time { timestamp = timestamp.AddDate(-1, 0, 0) } } - return timestamp + if config.TrimTimestamp { + // Trim the entire timestamp portion and leading whitespaces + // The whitespace characters being removed are: space, tab, newline, and carriage return ( " \t\n\r") + return timestamp, strings.TrimLeft(logValue[:index[0]]+logValue[index[1]:], " \t\n\r") + } + return timestamp, logValue } - return time.Time{} + return time.Time{}, logValue } // This method determine whether the line is a start line for multiline log entry. diff --git a/plugins/inputs/logfile/fileconfig_test.go b/plugins/inputs/logfile/fileconfig_test.go index 7baa7cbbea..5315fb182a 100644 --- a/plugins/inputs/logfile/fileconfig_test.go +++ b/plugins/inputs/logfile/fileconfig_test.go @@ -130,15 +130,34 @@ func TestTimestampParser(t *testing.T) { expectedTimestamp := time.Unix(1497882318, 0) timestampString := "19 Jun 2017 14:25:18" logEntry := fmt.Sprintf("%s [INFO] This is a test message.", timestampString) - timestamp := fileConfig.timestampFromLogLine(logEntry) + timestamp, modifiedLogEntry := fileConfig.timestampFromLogLine(logEntry) assert.Equal(t, expectedTimestamp.UnixNano(), timestamp.UnixNano(), fmt.Sprintf("The timestampFromLogLine value %v is not the same as expected %v.", timestamp, expectedTimestamp)) + assert.Equal(t, logEntry, modifiedLogEntry) // Test regex match for multiline, the first timestamp in multiline should be matched logEntry = fmt.Sprintf("%s [INFO] This is the first line.\n19 Jun 2017 14:25:19 [INFO] This is the second line.\n", timestampString) - timestamp = fileConfig.timestampFromLogLine(logEntry) + timestamp, modifiedLogEntry = fileConfig.timestampFromLogLine(logEntry) assert.Equal(t, expectedTimestamp.UnixNano(), timestamp.UnixNano(), fmt.Sprintf("The timestampFromLogLine value %v is not the same as expected %v.", timestamp, expectedTimestamp)) + assert.Equal(t, logEntry, modifiedLogEntry) + + // Test TrimTimeStamp for single line + fileConfig.TrimTimestamp = true + logEntry = fmt.Sprintf("%s [INFO] This is a test message.", timestampString) + trimmedTimestampString := "[INFO] This is a test message." + timestamp, modifiedLogEntry = fileConfig.timestampFromLogLine(logEntry) + assert.Equal(t, expectedTimestamp.UnixNano(), timestamp.UnixNano(), + fmt.Sprintf("The timestampFromLogLine value %v is not the same as expected %v.", timestamp, expectedTimestamp)) + assert.Equal(t, trimmedTimestampString, modifiedLogEntry) + + // Test TrimTimeStamp for multiline, the first timestamp in multiline should be matched + logEntry = fmt.Sprintf("%s [INFO] This is the first line.\n19 Jun 2017 14:25:19 [INFO] This is the second line.\n", timestampString) + trimmedTimestampString = "[INFO] This is the first line.\n19 Jun 2017 14:25:19 [INFO] This is the second line.\n" + timestamp, modifiedLogEntry = fileConfig.timestampFromLogLine(logEntry) + assert.Equal(t, expectedTimestamp.UnixNano(), timestamp.UnixNano(), + fmt.Sprintf("The timestampFromLogLine value %v is not the same as expected %v.", timestamp, expectedTimestamp)) + assert.Equal(t, trimmedTimestampString, modifiedLogEntry) } func TestTimestampParserWithPadding(t *testing.T) { @@ -155,15 +174,39 @@ func TestTimestampParserWithPadding(t *testing.T) { Timezone: timezone, TimezoneLoc: timezoneLoc} - logEntry := fmt.Sprintf(" 2 1 07:10:06 instance-id: i-02fce21a425a2efb3") - timestamp := fileConfig.timestampFromLogLine(logEntry) + logEntry := " 2 1 07:10:06 instance-id: i-02fce21a425a2efb3" + timestamp, modifiedLogEntry := fileConfig.timestampFromLogLine(logEntry) assert.Equal(t, 7, timestamp.Hour(), fmt.Sprintf("Timestamp does not match: %v, act: %v", "7", timestamp.Hour())) assert.Equal(t, 10, timestamp.Minute(), fmt.Sprintf("Timestamp does not match: %v, act: %v", "10", timestamp.Minute())) + assert.Equal(t, logEntry, modifiedLogEntry) - logEntry = fmt.Sprintf("2 1 07:10:06 instance-id: i-02fce21a425a2efb3") - timestamp = fileConfig.timestampFromLogLine(logEntry) + logEntry = "2 1 07:10:06 instance-id: i-02fce21a425a2efb3" + timestamp, modifiedLogEntry = fileConfig.timestampFromLogLine(logEntry) assert.Equal(t, 7, timestamp.Hour(), fmt.Sprintf("Timestamp does not match: %v, act: %v", "7", timestamp.Hour())) assert.Equal(t, 10, timestamp.Minute(), fmt.Sprintf("Timestamp does not match: %v, act: %v", "10", timestamp.Minute())) + assert.Equal(t, logEntry, modifiedLogEntry) + + //Test when TrimTimeStamp is enabled + fileConfig.TrimTimestamp = true + logEntry = " 2 1 07:10:06 instance-id: i-02fce21a425a2efb3" + trimmedTimestampString := "instance-id: i-02fce21a425a2efb3" + timestamp, modifiedLogEntry = fileConfig.timestampFromLogLine(logEntry) + assert.Equal(t, 7, timestamp.Hour(), fmt.Sprintf("Timestamp does not match: %v, act: %v", "7", timestamp.Hour())) + assert.Equal(t, 10, timestamp.Minute(), fmt.Sprintf("Timestamp does not match: %v, act: %v", "10", timestamp.Minute())) + assert.Equal(t, trimmedTimestampString, modifiedLogEntry) + + logEntry = "2 1 07:10:06 instance-id: i-02fce21a425a2efb3" + timestamp, modifiedLogEntry = fileConfig.timestampFromLogLine(logEntry) + assert.Equal(t, 7, timestamp.Hour(), fmt.Sprintf("Timestamp does not match: %v, act: %v", "7", timestamp.Hour())) + assert.Equal(t, 10, timestamp.Minute(), fmt.Sprintf("Timestamp does not match: %v, act: %v", "10", timestamp.Minute())) + assert.Equal(t, trimmedTimestampString, modifiedLogEntry) + + logEntry = " instance-id: i-02fce21a425a2efb3 2 1 07:10:06" + trimmedTimestampString = "instance-id: i-02fce21a425a2efb3 " + timestamp, modifiedLogEntry = fileConfig.timestampFromLogLine(logEntry) + assert.Equal(t, 7, timestamp.Hour(), fmt.Sprintf("Timestamp does not match: %v, act: %v", "7", timestamp.Hour())) + assert.Equal(t, 10, timestamp.Minute(), fmt.Sprintf("Timestamp does not match: %v, act: %v", "10", timestamp.Minute())) + assert.Equal(t, trimmedTimestampString, modifiedLogEntry) } func TestTimestampParserDefault(t *testing.T) { @@ -183,26 +226,56 @@ func TestTimestampParserDefault(t *testing.T) { TimezoneLoc: timezoneLoc} // make sure layout is compatible for "Sep 9", "Sep 9" , "Sep 09", "Sep 09" options - logEntry := fmt.Sprintf("Sep 9 02:00:43 ip-10-4-213-132 \n") - timestamp := fileConfig.timestampFromLogLine(logEntry) + logEntry := "Sep 9 02:00:43 ip-10-4-213-132 \n" + timestamp, modifiedLogEntry := fileConfig.timestampFromLogLine(logEntry) + assert.Equal(t, 02, timestamp.Hour()) + assert.Equal(t, 00, timestamp.Minute()) + assert.Equal(t, logEntry, modifiedLogEntry) + + logEntry = "Sep 9 02:00:43 ip-10-4-213-132 \n" + timestamp, modifiedLogEntry = fileConfig.timestampFromLogLine(logEntry) + assert.Equal(t, 02, timestamp.Hour()) + assert.Equal(t, 00, timestamp.Minute()) + assert.Equal(t, logEntry, modifiedLogEntry) + + logEntry = "Sep 09 02:00:43 ip-10-4-213-132 \n" + timestamp, modifiedLogEntry = fileConfig.timestampFromLogLine(logEntry) assert.Equal(t, 02, timestamp.Hour()) assert.Equal(t, 00, timestamp.Minute()) + assert.Equal(t, logEntry, modifiedLogEntry) - logEntry = fmt.Sprintf("Sep 9 02:00:43 ip-10-4-213-132 \n") - timestamp = fileConfig.timestampFromLogLine(logEntry) + logEntry = "Sep 09 02:00:43 ip-10-4-213-132 \n" + timestamp, modifiedLogEntry = fileConfig.timestampFromLogLine(logEntry) assert.Equal(t, 02, timestamp.Hour()) assert.Equal(t, 00, timestamp.Minute()) + assert.Equal(t, logEntry, modifiedLogEntry) - logEntry = fmt.Sprintf("Sep 09 02:00:43 ip-10-4-213-132 \n") - timestamp = fileConfig.timestampFromLogLine(logEntry) + // When TrimTimestamp is enabled, make sure layout is compatible for "Sep 9", "Sep 9" , "Sep 09", "Sep 09" options and log value is trimmed correctly + fileConfig.TrimTimestamp = true + logEntry = "Sep 9 02:00:43 ip-10-4-213-132 \n" + trimmedTimestampString := "ip-10-4-213-132 \n" + timestamp, modifiedLogEntry = fileConfig.timestampFromLogLine(logEntry) assert.Equal(t, 02, timestamp.Hour()) assert.Equal(t, 00, timestamp.Minute()) + assert.Equal(t, trimmedTimestampString, modifiedLogEntry) - logEntry = fmt.Sprintf("Sep 09 02:00:43 ip-10-4-213-132 \n") - timestamp = fileConfig.timestampFromLogLine(logEntry) + logEntry = "Sep 9 02:00:43 ip-10-4-213-132 \n" + timestamp, modifiedLogEntry = fileConfig.timestampFromLogLine(logEntry) assert.Equal(t, 02, timestamp.Hour()) assert.Equal(t, 00, timestamp.Minute()) + assert.Equal(t, trimmedTimestampString, modifiedLogEntry) + logEntry = "Sep 09 02:00:43 ip-10-4-213-132 \n" + timestamp, modifiedLogEntry = fileConfig.timestampFromLogLine(logEntry) + assert.Equal(t, 02, timestamp.Hour()) + assert.Equal(t, 00, timestamp.Minute()) + assert.Equal(t, trimmedTimestampString, modifiedLogEntry) + + logEntry = "Sep 09 02:00:43 ip-10-4-213-132 \n" + timestamp, modifiedLogEntry = fileConfig.timestampFromLogLine(logEntry) + assert.Equal(t, 02, timestamp.Hour()) + assert.Equal(t, 00, timestamp.Minute()) + assert.Equal(t, trimmedTimestampString, modifiedLogEntry) } func TestTimestampParserWithFracSeconds(t *testing.T) { @@ -222,15 +295,34 @@ func TestTimestampParserWithFracSeconds(t *testing.T) { expectedTimestamp := time.Unix(1497882318, 234000000) timestampString := "19 Jun 2017 14:25:18,234088 UTC" logEntry := fmt.Sprintf("%s [INFO] This is a test message.", timestampString) - timestamp := fileConfig.timestampFromLogLine(logEntry) + timestamp, modifiedLogEntry := fileConfig.timestampFromLogLine(logEntry) assert.Equal(t, expectedTimestamp.UnixNano(), timestamp.UnixNano(), fmt.Sprintf("The timestampFromLogLine value %v is not the same as expected %v.", timestamp, expectedTimestamp)) + assert.Equal(t, logEntry, modifiedLogEntry) // Test regex match for multiline, the first timestamp in multiline should be matched logEntry = fmt.Sprintf("%s [INFO] This is the first line.\n19 Jun 2017 14:25:19,123456 UTC [INFO] This is the second line.\n", timestampString) - timestamp = fileConfig.timestampFromLogLine(logEntry) + timestamp, modifiedLogEntry = fileConfig.timestampFromLogLine(logEntry) + assert.Equal(t, expectedTimestamp.UnixNano(), timestamp.UnixNano(), + fmt.Sprintf("The timestampFromLogLine value %v is not the same as expected %v.", timestamp, expectedTimestamp)) + assert.Equal(t, logEntry, modifiedLogEntry) + + // Test TrimTimeStamp for single line + fileConfig.TrimTimestamp = true + logEntry = fmt.Sprintf("%s [INFO] This is a test message.", timestampString) + trimmedTimestampString := "[INFO] This is a test message." + timestamp, modifiedLogEntry = fileConfig.timestampFromLogLine(logEntry) + assert.Equal(t, expectedTimestamp.UnixNano(), timestamp.UnixNano(), + fmt.Sprintf("The timestampFromLogLine value %v is not the same as expected %v.", timestamp, expectedTimestamp)) + assert.Equal(t, trimmedTimestampString, modifiedLogEntry) + + // Test TrimTimeStamp for multiline, the first timestamp in multiline should be matched + logEntry = fmt.Sprintf("%s [INFO] This is the first line.\n19 Jun 2017 14:25:19,123456 UTC [INFO] This is the second line.\n", timestampString) + trimmedTimestampString = "[INFO] This is the first line.\n19 Jun 2017 14:25:19,123456 UTC [INFO] This is the second line.\n" + timestamp, modifiedLogEntry = fileConfig.timestampFromLogLine(logEntry) assert.Equal(t, expectedTimestamp.UnixNano(), timestamp.UnixNano(), fmt.Sprintf("The timestampFromLogLine value %v is not the same as expected %v.", timestamp, expectedTimestamp)) + assert.Equal(t, trimmedTimestampString, modifiedLogEntry) } func TestNonAllowlistedTimezone(t *testing.T) { diff --git a/plugins/inputs/logfile/logfile.go b/plugins/inputs/logfile/logfile.go index 1626957ebc..f73755f290 100644 --- a/plugins/inputs/logfile/logfile.go +++ b/plugins/inputs/logfile/logfile.go @@ -76,6 +76,7 @@ const sampleConfig = ` timestamp_regex = "^(\\d{2} \\w{3} \\d{4} \\d{2}:\\d{2}:\\d{2}).*$" timestamp_layout = ["_2 Jan 2006 15:04:05"] timezone = "UTC" + trim_timestamp = false multi_line_start_pattern = "{timestamp_regex}" ## Read file from beginning. from_beginning = false diff --git a/plugins/inputs/logfile/tailersrc.go b/plugins/inputs/logfile/tailersrc.go index df5b5967f9..466ae74562 100644 --- a/plugins/inputs/logfile/tailersrc.go +++ b/plugins/inputs/logfile/tailersrc.go @@ -67,7 +67,7 @@ type tailerSrc struct { stateFilePath string tailer *tail.Tail autoRemoval bool - timestampFn func(string) time.Time + timestampFn func(string) (time.Time, string) enc encoding.Encoding maxEventSize int truncateSuffix string @@ -91,7 +91,7 @@ func NewTailerSrc( autoRemoval bool, isMultilineStartFn func(string) bool, filters []*LogFilter, - timestampFn func(string) time.Time, + timestampFn func(string) (time.Time, string), enc encoding.Encoding, maxEventSize int, truncateSuffix string, @@ -195,9 +195,10 @@ func (ts *tailerSrc) runTail() { if !ok { if msgBuf.Len() > 0 { msg := msgBuf.String() + timestamp, modifiedMsg := ts.timestampFn(msg) e := &LogEvent{ - msg: msg, - t: ts.timestampFn(msg), + msg: modifiedMsg, + t: timestamp, offset: *fo, src: ts, } @@ -249,9 +250,10 @@ func (ts *tailerSrc) runTail() { if msgBuf.Len() > 0 { msg := msgBuf.String() + timestamp, modifiedMsg := ts.timestampFn(msg) e := &LogEvent{ - msg: msg, - t: ts.timestampFn(msg), + msg: modifiedMsg, + t: timestamp, offset: *fo, src: ts, } @@ -276,9 +278,10 @@ func (ts *tailerSrc) runTail() { } msg := msgBuf.String() + timestamp, modifiedMsg := ts.timestampFn(msg) e := &LogEvent{ - msg: msg, - t: ts.timestampFn(msg), + msg: modifiedMsg, + t: timestamp, offset: *fo, src: ts, } diff --git a/plugins/inputs/logfile/tailersrc_test.go b/plugins/inputs/logfile/tailersrc_test.go index 23a8ae8ba4..c29ea9aab5 100644 --- a/plugins/inputs/logfile/tailersrc_test.go +++ b/plugins/inputs/logfile/tailersrc_test.go @@ -324,7 +324,7 @@ func TestTailerSrcFiltersMultiLineLogs(t *testing.T) { assertExpectedLogsPublished(t, n, int(*resources.consumed)) } -func parseRFC3339Timestamp(line string) time.Time { +func parseRFC3339Timestamp(line string) (time.Time, string) { // Use RFC3339 for testing `2006-01-02T15:04:05Z07:00` re := regexp.MustCompile(`\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}[Z+\-]\d{2}:\d{2}`) tstr := re.FindString(line) @@ -332,7 +332,7 @@ func parseRFC3339Timestamp(line string) time.Time { if tstr != "" { t, _ = time.Parse(time.RFC3339, tstr) } - return t + return t, line } func logLine(s string, l int, t time.Time) string { diff --git a/plugins/inputs/prometheus/target_allocator.go b/plugins/inputs/prometheus/target_allocator.go index fc32931d27..f87a958d7e 100644 --- a/plugins/inputs/prometheus/target_allocator.go +++ b/plugins/inputs/prometheus/target_allocator.go @@ -134,7 +134,6 @@ func (tam *TargetAllocatorManager) loadManager(logLevel *promlog.AllowedLevel) { MeterProvider: nil, MetricsLevel: 0, Resource: pcommon.Resource{}, - ReportStatus: nil, }, } diff --git a/plugins/outputs/cloudwatch/factory.go b/plugins/outputs/cloudwatch/factory.go index c97c87cd8a..135920473a 100644 --- a/plugins/outputs/cloudwatch/factory.go +++ b/plugins/outputs/cloudwatch/factory.go @@ -43,14 +43,14 @@ func createDefaultConfig() component.Config { func createMetricsExporter( ctx context.Context, - settings exporter.CreateSettings, + settings exporter.Settings, config component.Config, ) (exporter.Metrics, error) { cw := &CloudWatch{ config: config.(*Config), logger: settings.Logger, } - exp, err := exporterhelper.NewMetricsExporter( + exp, err := exporterhelper.NewMetrics( ctx, settings, config, diff --git a/plugins/outputs/cloudwatch/factory_test.go b/plugins/outputs/cloudwatch/factory_test.go index 175182e18c..22bbc79b03 100644 --- a/plugins/outputs/cloudwatch/factory_test.go +++ b/plugins/outputs/cloudwatch/factory_test.go @@ -8,9 +8,9 @@ import ( "testing" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/exporter/exportertest" + "go.opentelemetry.io/collector/pipeline" ) func TestCreateDefaultConfig(t *testing.T) { @@ -24,16 +24,16 @@ func TestCreateExporter(t *testing.T) { factory := NewFactory() cfg := factory.CreateDefaultConfig() - creationSet := exportertest.NewNopCreateSettings() - tExporter, err := factory.CreateTracesExporter(context.Background(), creationSet, cfg) - assert.Equal(t, err, component.ErrDataTypeIsNotSupported) + creationSet := exportertest.NewNopSettings() + tExporter, err := factory.CreateTraces(context.Background(), creationSet, cfg) + assert.Equal(t, err, pipeline.ErrSignalNotSupported) assert.Nil(t, tExporter) - mExporter, err := factory.CreateMetricsExporter(context.Background(), creationSet, cfg) + mExporter, err := factory.CreateMetrics(context.Background(), creationSet, cfg) assert.NoError(t, err) assert.NotNil(t, mExporter) - tLogs, err := factory.CreateLogsExporter(context.Background(), creationSet, cfg) - assert.Equal(t, err, component.ErrDataTypeIsNotSupported) + tLogs, err := factory.CreateLogs(context.Background(), creationSet, cfg) + assert.Equal(t, err, pipeline.ErrSignalNotSupported) assert.Nil(t, tLogs) } diff --git a/plugins/outputs/cloudwatchlogs/internal/pusher/convert_test.go b/plugins/outputs/cloudwatchlogs/internal/pusher/convert_test.go index 52b40ef71c..badedb89d1 100644 --- a/plugins/outputs/cloudwatchlogs/internal/pusher/convert_test.go +++ b/plugins/outputs/cloudwatchlogs/internal/pusher/convert_test.go @@ -4,16 +4,13 @@ package pusher import ( - "bytes" - "io" - "log" - "os" "strings" "testing" "time" - "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" + + "github.com/aws/amazon-cloudwatch-agent/tool/testutil" ) type stubLogEvent struct { @@ -44,7 +41,7 @@ func newStubLogEvent(message string, timestamp time.Time) *stubLogEvent { } func TestConverter(t *testing.T) { - logger := testutil.Logger{Name: "converter"} + logger := testutil.NewNopLogger() target := Target{Group: "testGroup", Stream: "testStream"} t.Run("WithValidTimestamp", func(t *testing.T) { @@ -86,20 +83,19 @@ func TestConverter(t *testing.T) { t.Run("WithOldTimestampWarning", func(t *testing.T) { oldTime := time.Now().Add(-25 * time.Hour) - conv := newConverter(logger, target) + logSink := testutil.NewLogSink() + conv := newConverter(logSink, target) conv.lastValidTime = oldTime conv.lastUpdateTime = oldTime - var logbuf bytes.Buffer - log.SetOutput(io.MultiWriter(&logbuf, os.Stdout)) le := conv.convert(newStubLogEvent("Test message", time.Time{})) assert.Equal(t, oldTime, le.timestamp) assert.Equal(t, "Test message", le.message) - loglines := strings.Split(strings.TrimSpace(logbuf.String()), "\n") - assert.Len(t, loglines, 1) - logline := loglines[0] - assert.True(t, strings.Contains(logline, "W!")) - assert.True(t, strings.Contains(logline, "Unable to parse timestamp")) + logLines := logSink.Lines() + assert.Len(t, logLines, 1) + logLine := logLines[0] + assert.True(t, strings.Contains(logLine, "W!")) + assert.True(t, strings.Contains(logLine, "Unable to parse timestamp")) }) } diff --git a/plugins/outputs/cloudwatchlogs/internal/pusher/pool.go b/plugins/outputs/cloudwatchlogs/internal/pusher/pool.go index 3bf9eb7f34..f5afeb309a 100644 --- a/plugins/outputs/cloudwatchlogs/internal/pusher/pool.go +++ b/plugins/outputs/cloudwatchlogs/internal/pusher/pool.go @@ -19,6 +19,7 @@ type workerPool struct { workerCount atomic.Int32 wg sync.WaitGroup stopCh chan struct{} + stopLock sync.RWMutex } // NewWorkerPool creates a pool of workers of the specified size. @@ -53,6 +54,8 @@ func (p *workerPool) worker() { // Submit adds a task to the pool. Blocks until a worker is available to receive the task or the pool is stopped. func (p *workerPool) Submit(task func()) { + p.stopLock.RLock() + defer p.stopLock.RUnlock() select { case <-p.stopCh: return @@ -72,6 +75,8 @@ func (p *workerPool) WorkerCount() int32 { // Stop closes the channels and waits for the workers to stop. func (p *workerPool) Stop() { + p.stopLock.Lock() + defer p.stopLock.Unlock() select { case <-p.stopCh: return diff --git a/plugins/outputs/cloudwatchlogs/internal/pusher/pool_test.go b/plugins/outputs/cloudwatchlogs/internal/pusher/pool_test.go index d2962e85ff..b706688034 100644 --- a/plugins/outputs/cloudwatchlogs/internal/pusher/pool_test.go +++ b/plugins/outputs/cloudwatchlogs/internal/pusher/pool_test.go @@ -9,11 +9,11 @@ import ( "testing" "time" - "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/aws/amazon-cloudwatch-agent/sdk/service/cloudwatchlogs" + "github.com/aws/amazon-cloudwatch-agent/tool/testutil" ) func TestWorkerPool(t *testing.T) { @@ -104,7 +104,7 @@ func TestWorkerPool(t *testing.T) { } func TestSenderPool(t *testing.T) { - logger := testutil.Logger{Name: "test"} + logger := testutil.NewNopLogger() stop := make(chan struct{}) mockService := new(mockLogsService) mockService.On("PutLogEvents", mock.Anything).Return(&cloudwatchlogs.PutLogEventsOutput{}, nil) diff --git a/plugins/outputs/cloudwatchlogs/internal/pusher/pusher_test.go b/plugins/outputs/cloudwatchlogs/internal/pusher/pusher_test.go index 2ab3970f78..54f68621f9 100644 --- a/plugins/outputs/cloudwatchlogs/internal/pusher/pusher_test.go +++ b/plugins/outputs/cloudwatchlogs/internal/pusher/pusher_test.go @@ -9,10 +9,10 @@ import ( "testing" "time" - "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/aws/amazon-cloudwatch-agent/sdk/service/cloudwatchlogs" + "github.com/aws/amazon-cloudwatch-agent/tool/testutil" ) const eventCount = 100000 @@ -22,7 +22,7 @@ func TestPusher(t *testing.T) { t.Parallel() stop := make(chan struct{}) var wg sync.WaitGroup - pusher := setupPusher(t, "single", nil, stop, &wg) + pusher := setupPusher(t, nil, stop, &wg) var completed atomic.Int32 generateEvents(t, pusher, &completed) @@ -36,7 +36,7 @@ func TestPusher(t *testing.T) { stop := make(chan struct{}) var wg sync.WaitGroup wp := NewWorkerPool(5) - pusher := setupPusher(t, "pool", wp, stop, &wg) + pusher := setupPusher(t, wp, stop, &wg) _, isSenderPool := pusher.Sender.(*senderPool) assert.True(t, isSenderPool) @@ -63,9 +63,9 @@ func generateEvents(t *testing.T, pusher *Pusher, completed *atomic.Int32) { } } -func setupPusher(t *testing.T, name string, workerPool WorkerPool, stop chan struct{}, wg *sync.WaitGroup) *Pusher { +func setupPusher(t *testing.T, workerPool WorkerPool, stop chan struct{}, wg *sync.WaitGroup) *Pusher { t.Helper() - logger := testutil.Logger{Name: name} + logger := testutil.NewNopLogger() target := Target{Group: "G", Stream: "S", Retention: 7} service := new(stubLogsService) service.ple = func(*cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) { diff --git a/plugins/outputs/cloudwatchlogs/internal/pusher/queue.go b/plugins/outputs/cloudwatchlogs/internal/pusher/queue.go index 89f26faed4..da3a28a25a 100644 --- a/plugins/outputs/cloudwatchlogs/internal/pusher/queue.go +++ b/plugins/outputs/cloudwatchlogs/internal/pusher/queue.go @@ -33,7 +33,7 @@ type queue struct { flushCh chan struct{} resetTimerCh chan struct{} flushTimer *time.Timer - flushTimeout time.Duration + flushTimeout atomic.Value stop <-chan struct{} lastSentTime atomic.Value @@ -61,11 +61,11 @@ func newQueue( flushCh: make(chan struct{}), resetTimerCh: make(chan struct{}), flushTimer: time.NewTimer(flushTimeout), - flushTimeout: flushTimeout, stop: stop, startNonBlockCh: make(chan struct{}), wg: wg, } + q.flushTimeout.Store(flushTimeout) q.wg.Add(1) go q.start() return q @@ -112,13 +112,15 @@ func (q *queue) start() { // Merge events from both blocking and non-blocking channel go func() { + var nonBlockingEventsCh <-chan logs.LogEvent for { select { case e := <-q.eventsCh: mergeChan <- e - case e := <-q.nonBlockingEventsCh: + case e := <-nonBlockingEventsCh: mergeChan <- e case <-q.startNonBlockCh: + nonBlockingEventsCh = q.nonBlockingEventsCh case <-q.stop: return } @@ -141,7 +143,8 @@ func (q *queue) start() { q.batch.append(event) case <-q.flushCh: lastSentTime, _ := q.lastSentTime.Load().(time.Time) - if time.Since(lastSentTime) >= q.flushTimeout && len(q.batch.events) > 0 { + flushTimeout, _ := q.flushTimeout.Load().(time.Duration) + if time.Since(lastSentTime) >= flushTimeout && len(q.batch.events) > 0 { q.send() } else { q.resetFlushTimer() @@ -188,7 +191,9 @@ func (q *queue) manageFlushTimer() { q.flushCh <- struct{}{} case <-q.resetTimerCh: q.stopFlushTimer() - q.flushTimer.Reset(q.flushTimeout) + if flushTimeout, ok := q.flushTimeout.Load().(time.Duration); ok { + q.flushTimer.Reset(flushTimeout) + } case <-q.stop: q.stopFlushTimer() return diff --git a/plugins/outputs/cloudwatchlogs/internal/pusher/queue_test.go b/plugins/outputs/cloudwatchlogs/internal/pusher/queue_test.go index c270145ab1..d12fe0f48d 100644 --- a/plugins/outputs/cloudwatchlogs/internal/pusher/queue_test.go +++ b/plugins/outputs/cloudwatchlogs/internal/pusher/queue_test.go @@ -4,24 +4,22 @@ package pusher import ( - "bytes" "errors" "fmt" - "io" - "log" - "os" "strings" "sync" + "sync/atomic" "testing" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/influxdata/telegraf/testutil" + "github.com/influxdata/telegraf" "github.com/stretchr/testify/require" "github.com/aws/amazon-cloudwatch-agent/logs" "github.com/aws/amazon-cloudwatch-agent/sdk/service/cloudwatchlogs" + "github.com/aws/amazon-cloudwatch-agent/tool/testutil" "github.com/aws/amazon-cloudwatch-agent/tool/util" ) @@ -30,6 +28,7 @@ type stubLogsService struct { clg func(input *cloudwatchlogs.CreateLogGroupInput) (*cloudwatchlogs.CreateLogGroupOutput, error) cls func(input *cloudwatchlogs.CreateLogStreamInput) (*cloudwatchlogs.CreateLogStreamOutput, error) prp func(input *cloudwatchlogs.PutRetentionPolicyInput) (*cloudwatchlogs.PutRetentionPolicyOutput, error) + dlg func(input *cloudwatchlogs.DescribeLogGroupsInput) (*cloudwatchlogs.DescribeLogGroupsOutput, error) } func (s *stubLogsService) PutLogEvents(in *cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) { @@ -60,11 +59,18 @@ func (s *stubLogsService) PutRetentionPolicy(in *cloudwatchlogs.PutRetentionPoli return nil, nil } +func (s *stubLogsService) DescribeLogGroups(in *cloudwatchlogs.DescribeLogGroupsInput) (*cloudwatchlogs.DescribeLogGroupsOutput, error) { + if s.dlg != nil { + return s.dlg(in) + } + return nil, nil +} + func TestAddSingleEvent_WithAccountId(t *testing.T) { t.Parallel() var wg sync.WaitGroup var s stubLogsService - called := false + var called atomic.Bool expectedEntity := &cloudwatchlogs.Entity{ Attributes: map[string]*string{ "PlatformType": aws.String("AWS::EC2"), @@ -79,7 +85,7 @@ func TestAddSingleEvent_WithAccountId(t *testing.T) { } s.ple = func(in *cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) { - called = true + called.Store(true) if *in.LogGroupName != "G" || *in.LogStreamName != "S" { t.Errorf("PutLogEvents called with wrong group and stream: %v/%v", *in.LogGroupName, *in.LogStreamName) @@ -93,15 +99,16 @@ func TestAddSingleEvent_WithAccountId(t *testing.T) { } ep := newMockEntityProvider(expectedEntity) - stop, q := testPreparation(-1, &s, 1*time.Hour, 2*time.Hour, ep, &wg) + stop, q := testPreparation(t, -1, &s, 1*time.Hour, 2*time.Hour, ep, &wg) q.AddEvent(newStubLogEvent("MSG", time.Now())) - require.False(t, called, "PutLogEvents has been called too fast, it should wait until FlushTimeout.") + require.False(t, called.Load(), "PutLogEvents has been called too fast, it should wait until FlushTimeout.") - q.flushTimeout = time.Second + q.flushTimeout.Store(200 * time.Millisecond) + time.Sleep(10 * time.Millisecond) q.resetFlushTimer() - time.Sleep(2 * time.Second) - require.True(t, called, "PutLogEvents has not been called after FlushTimeout has been reached.") + time.Sleep(time.Second) + require.True(t, called.Load(), "PutLogEvents has not been called after FlushTimeout has been reached.") close(stop) wg.Wait() @@ -111,10 +118,10 @@ func TestAddSingleEvent_WithoutAccountId(t *testing.T) { t.Parallel() var wg sync.WaitGroup var s stubLogsService - called := false + var called atomic.Bool s.ple = func(in *cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) { - called = true + called.Store(true) if *in.LogGroupName != "G" || *in.LogStreamName != "S" { t.Errorf("PutLogEvents called with wrong group and stream: %v/%v", *in.LogGroupName, *in.LogStreamName) @@ -128,15 +135,16 @@ func TestAddSingleEvent_WithoutAccountId(t *testing.T) { } ep := newMockEntityProvider(nil) - stop, q := testPreparation(-1, &s, 1*time.Hour, 2*time.Hour, ep, &wg) + stop, q := testPreparation(t, -1, &s, 1*time.Hour, 2*time.Hour, ep, &wg) q.AddEvent(newStubLogEvent("MSG", time.Now())) - require.False(t, called, "PutLogEvents has been called too fast, it should wait until FlushTimeout.") + require.False(t, called.Load(), "PutLogEvents has been called too fast, it should wait until FlushTimeout.") - q.flushTimeout = time.Second + q.flushTimeout.Store(time.Second) + time.Sleep(10 * time.Millisecond) q.resetFlushTimer() time.Sleep(2 * time.Second) - require.True(t, called, "PutLogEvents has not been called after FlushTimeout has been reached.") + require.True(t, called.Load(), "PutLogEvents has not been called after FlushTimeout has been reached.") close(stop) wg.Wait() @@ -146,27 +154,27 @@ func TestStopQueueWouldDoFinalSend(t *testing.T) { t.Parallel() var wg sync.WaitGroup var s stubLogsService - called := false + var called atomic.Bool s.ple = func(in *cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) { - called = true + called.Store(true) if len(in.LogEvents) != 1 { t.Errorf("PutLogEvents called with incorrect number of message, expecting 1, but %v received", len(in.LogEvents)) } return &cloudwatchlogs.PutLogEventsOutput{}, nil } - stop, q := testPreparation(-1, &s, 1*time.Hour, 2*time.Hour, nil, &wg) + stop, q := testPreparation(t, -1, &s, 1*time.Hour, 2*time.Hour, nil, &wg) q.AddEvent(newStubLogEvent("MSG", time.Now())) time.Sleep(10 * time.Millisecond) - require.False(t, called, "PutLogEvents has been called too fast, it should wait until FlushTimeout.") + require.False(t, called.Load(), "PutLogEvents has been called too fast, it should wait until FlushTimeout.") close(stop) wg.Wait() - require.True(t, called, "PutLogEvents has not been called after FlushTimeout has been reached.") + require.True(t, called.Load(), "PutLogEvents has not been called after FlushTimeout has been reached.") } func TestStopPusherWouldStopRetries(t *testing.T) { @@ -174,27 +182,27 @@ func TestStopPusherWouldStopRetries(t *testing.T) { var wg sync.WaitGroup var s stubLogsService - s.ple = func(in *cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) { + s.ple = func(*cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) { return nil, &cloudwatchlogs.ServiceUnavailableException{} } - stop, q := testPreparation(-1, &s, 1*time.Hour, 2*time.Hour, nil, &wg) + logSink := testutil.NewLogSink() + stop, q := testPreparationWithLogger(t, logSink, -1, &s, 1*time.Hour, 2*time.Hour, nil, &wg) q.AddEvent(newStubLogEvent("MSG", time.Now())) + time.Sleep(10 * time.Millisecond) - sendComplete := make(chan struct{}) - - go func() { - defer close(sendComplete) - q.send() - }() - + triggerSend(t, q) + // stop should try flushing the remaining events with retry disabled close(stop) - select { - case <-time.After(50 * time.Millisecond): - t.Errorf("send did not quit retrying after p has been Stopped.") - case <-sendComplete: - } + time.Sleep(50 * time.Millisecond) + wg.Wait() + + logLines := logSink.Lines() + require.Equal(t, 3, len(logLines), fmt.Sprintf("Expecting 3 logs, but %d received", len(logLines))) + lastLine := logLines[len(logLines)-1] + require.True(t, strings.Contains(lastLine, "E!")) + require.True(t, strings.Contains(lastLine, "Stop requested after 0 retries to G/S failed for PutLogEvents, request dropped")) } func TestLongMessageGetsTruncated(t *testing.T) { @@ -223,14 +231,10 @@ func TestLongMessageGetsTruncated(t *testing.T) { return &cloudwatchlogs.PutLogEventsOutput{}, nil } - stop, q := testPreparation(-1, &s, 1*time.Hour, 2*time.Hour, nil, &wg) + stop, q := testPreparation(t, -1, &s, 1*time.Hour, 2*time.Hour, nil, &wg) q.AddEvent(newStubLogEvent(longMsg, time.Now())) - for len(q.batch.events) < 1 { - time.Sleep(10 * time.Millisecond) - } - - q.send() + triggerSend(t, q) close(stop) wg.Wait() } @@ -254,13 +258,13 @@ func TestRequestIsLessThan1MB(t *testing.T) { return &cloudwatchlogs.PutLogEventsOutput{}, nil } - stop, q := testPreparation(-1, &s, 1*time.Hour, 2*time.Hour, nil, &wg) + stop, q := testPreparation(t, -1, &s, 1*time.Hour, 2*time.Hour, nil, &wg) for i := 0; i < 8; i++ { q.AddEvent(newStubLogEvent(longMsg, time.Now())) } time.Sleep(10 * time.Millisecond) - q.send() - q.send() + triggerSend(t, q) + triggerSend(t, q) close(stop) wg.Wait() } @@ -279,13 +283,13 @@ func TestRequestIsLessThan10kEvents(t *testing.T) { return &cloudwatchlogs.PutLogEventsOutput{}, nil } - stop, q := testPreparation(-1, &s, 1*time.Hour, 2*time.Hour, nil, &wg) + stop, q := testPreparation(t, -1, &s, 1*time.Hour, 2*time.Hour, nil, &wg) for i := 0; i < 30000; i++ { q.AddEvent(newStubLogEvent(msg, time.Now())) } time.Sleep(10 * time.Millisecond) for i := 0; i < 5; i++ { - q.send() + triggerSend(t, q) } close(stop) wg.Wait() @@ -304,44 +308,41 @@ func TestTimestampPopulation(t *testing.T) { return &cloudwatchlogs.PutLogEventsOutput{}, nil } - stop, q := testPreparation(-1, &s, 1*time.Hour, 2*time.Hour, nil, &wg) + stop, q := testPreparation(t, -1, &s, 1*time.Hour, 2*time.Hour, nil, &wg) for i := 0; i < 3; i++ { q.AddEvent(newStubLogEvent("msg", time.Time{})) } time.Sleep(10 * time.Millisecond) for i := 0; i < 5; i++ { - q.send() + triggerSend(t, q) } close(stop) wg.Wait() } func TestIgnoreOutOfTimeRangeEvent(t *testing.T) { + t.Parallel() var wg sync.WaitGroup var s stubLogsService - s.ple = func(in *cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) { + s.ple = func(*cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) { t.Errorf("PutLogEvents should not be called for out of range events") return &cloudwatchlogs.PutLogEventsOutput{}, nil } - var logbuf bytes.Buffer - log.SetOutput(io.MultiWriter(&logbuf, os.Stdout)) - - stop, q := testPreparation(-1, &s, 10*time.Millisecond, 2*time.Hour, nil, &wg) + logSink := testutil.NewLogSink() + stop, q := testPreparationWithLogger(t, logSink, -1, &s, 10*time.Millisecond, 2*time.Hour, nil, &wg) q.AddEvent(newStubLogEvent("MSG", time.Now().Add(-15*24*time.Hour))) q.AddEventNonBlocking(newStubLogEvent("MSG", time.Now().Add(2*time.Hour+1*time.Minute))) - loglines := strings.Split(strings.TrimSpace(logbuf.String()), "\n") - require.Equal(t, 2, len(loglines), fmt.Sprintf("Expecting 2 error logs, but %d received", len(loglines))) + logLines := logSink.Lines() + require.Equal(t, 2, len(logLines), fmt.Sprintf("Expecting 2 error logs, but %d received", len(logLines))) - for _, logline := range loglines { - require.True(t, strings.Contains(logline, "E!"), fmt.Sprintf("Expecting error log with unhandled error, but received '%s' in the log", logbuf.String())) - require.True(t, strings.Contains(logline, "Discard the log entry"), fmt.Sprintf("Expecting error log with unhandled error, but received '%s' in the log", logbuf.String())) + for _, logLine := range logLines { + require.True(t, strings.Contains(logLine, "E!"), fmt.Sprintf("Expecting error log with unhandled error, but received '%s' in the log", logSink)) + require.True(t, strings.Contains(logLine, "Discard the log entry"), fmt.Sprintf("Expecting error log with unhandled error, but received '%s' in the log", logSink)) } - log.SetOutput(os.Stderr) - time.Sleep(20 * time.Millisecond) close(stop) wg.Wait() @@ -382,12 +383,13 @@ func TestAddMultipleEvents(t *testing.T) { )) } evts[10], evts[90] = evts[90], evts[10] // make events out of order - stop, q := testPreparation(-1, &s, 1*time.Hour, 2*time.Hour, nil, &wg) + stop, q := testPreparation(t, -1, &s, 1*time.Hour, 2*time.Hour, nil, &wg) for _, e := range evts { q.AddEvent(e) } - q.flushTimeout = 10 * time.Millisecond + q.flushTimeout.Store(10 * time.Millisecond) + time.Sleep(10 * time.Millisecond) q.resetFlushTimer() time.Sleep(time.Second) @@ -400,10 +402,10 @@ func TestSendReqWhenEventsSpanMoreThan24Hrs(t *testing.T) { t.Parallel() var wg sync.WaitGroup var s stubLogsService + var ci atomic.Int32 - ci := 0 s.ple = func(in *cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) { - if ci == 0 { + if ci.Load() == 0 { if len(in.LogEvents) != 3 { t.Errorf("PutLogEvents called with incorrect number of message, expecting 3, but %v received", len(in.LogEvents)) } @@ -414,9 +416,9 @@ func TestSendReqWhenEventsSpanMoreThan24Hrs(t *testing.T) { } } - ci++ + ci.Add(1) return &cloudwatchlogs.PutLogEventsOutput{}, nil - } else if ci == 1 { + } else if ci.Load() == 1 { if len(in.LogEvents) != 1 { t.Errorf("PutLogEvents called with incorrect number of message, expecting 1, but %v received", len(in.LogEvents)) } @@ -432,12 +434,13 @@ func TestSendReqWhenEventsSpanMoreThan24Hrs(t *testing.T) { return nil, nil } - stop, q := testPreparation(-1, &s, 1*time.Hour, 2*time.Hour, nil, &wg) + stop, q := testPreparation(t, -1, &s, 1*time.Hour, 2*time.Hour, nil, &wg) q.AddEvent(newStubLogEvent("MSG 25hrs ago", time.Now().Add(-25*time.Hour))) q.AddEvent(newStubLogEvent("MSG 24hrs ago", time.Now().Add(-24*time.Hour))) q.AddEvent(newStubLogEvent("MSG 23hrs ago", time.Now().Add(-23*time.Hour))) q.AddEvent(newStubLogEvent("MSG now", time.Now())) - q.flushTimeout = 10 * time.Millisecond + q.flushTimeout.Store(10 * time.Millisecond) + time.Sleep(10 * time.Millisecond) q.resetFlushTimer() time.Sleep(20 * time.Millisecond) close(stop) @@ -445,45 +448,43 @@ func TestSendReqWhenEventsSpanMoreThan24Hrs(t *testing.T) { } func TestUnhandledErrorWouldNotResend(t *testing.T) { + t.Parallel() var wg sync.WaitGroup var s stubLogsService + var cnt atomic.Int32 - cnt := 0 - s.ple = func(in *cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) { - if cnt == 0 { - cnt++ + s.ple = func(*cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) { + if cnt.Load() == 0 { + cnt.Add(1) return nil, errors.New("unhandled error") } t.Errorf("Pusher should not attempt a resend when an unhandled error has been returned") return &cloudwatchlogs.PutLogEventsOutput{}, nil } - var logbuf bytes.Buffer - log.SetOutput(io.MultiWriter(&logbuf, os.Stdout)) - - stop, q := testPreparation(-1, &s, 10*time.Millisecond, 2*time.Hour, nil, &wg) + logSink := testutil.NewLogSink() + stop, q := testPreparationWithLogger(t, logSink, -1, &s, 10*time.Millisecond, 2*time.Hour, nil, &wg) q.AddEvent(newStubLogEvent("msg", time.Now())) time.Sleep(2 * time.Second) - logline := logbuf.String() - require.True(t, strings.Contains(logline, "E!"), fmt.Sprintf("Expecting error log with unhandled error, but received '%s' in the log", logbuf.String())) - require.True(t, strings.Contains(logline, "unhandled error"), fmt.Sprintf("Expecting error log with unhandled error, but received '%s' in the log", logbuf.String())) - - log.SetOutput(os.Stderr) + logLine := logSink.String() + require.True(t, strings.Contains(logLine, "E!"), fmt.Sprintf("Expecting error log with unhandled error, but received '%s' in the log", logLine)) + require.True(t, strings.Contains(logLine, "unhandled error"), fmt.Sprintf("Expecting error log with unhandled error, but received '%s' in the log", logLine)) close(stop) wg.Wait() - require.Equal(t, 1, cnt, fmt.Sprintf("Expecting pusher to call send 1 time, but %d times called", cnt)) + require.EqualValues(t, 1, cnt.Load(), fmt.Sprintf("Expecting pusher to call send 1 time, but %d times called", cnt.Load())) } func TestCreateLogGroupAndLogStreamWhenNotFound(t *testing.T) { + t.Parallel() var wg sync.WaitGroup var s stubLogsService - var plec, clgc, clsc int - s.ple = func(in *cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) { + var plec, clgc, clsc atomic.Int32 + s.ple = func(*cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) { var e error - switch plec { + switch plec.Load() { case 0: e = &cloudwatchlogs.ResourceNotFoundException{} case 1: @@ -491,40 +492,39 @@ func TestCreateLogGroupAndLogStreamWhenNotFound(t *testing.T) { case 2: return &cloudwatchlogs.PutLogEventsOutput{}, nil default: - t.Errorf("Unexpected PutLogEvents call (%d time)", plec) + t.Errorf("Unexpected PutLogEvents call (%d time)", plec.Load()) } - plec++ + plec.Add(1) return nil, e } - s.clg = func(in *cloudwatchlogs.CreateLogGroupInput) (*cloudwatchlogs.CreateLogGroupOutput, error) { - clgc++ + s.clg = func(*cloudwatchlogs.CreateLogGroupInput) (*cloudwatchlogs.CreateLogGroupOutput, error) { + clgc.Add(1) return nil, nil } - s.cls = func(in *cloudwatchlogs.CreateLogStreamInput) (*cloudwatchlogs.CreateLogStreamOutput, error) { - clsc++ + s.cls = func(*cloudwatchlogs.CreateLogStreamInput) (*cloudwatchlogs.CreateLogStreamOutput, error) { + clsc.Add(1) return nil, nil } - var logbuf bytes.Buffer - log.SetOutput(io.MultiWriter(&logbuf, os.Stdout)) - - stop, q := testPreparation(-1, &s, 1*time.Hour, 2*time.Hour, nil, &wg) - q.AddEvent(newStubLogEvent("msg", time.Now())) + logSink := testutil.NewLogSink() + stop, q := testPreparationWithLogger(t, logSink, -1, &s, 1*time.Hour, 2*time.Hour, nil, &wg) + var eventWG sync.WaitGroup + eventWG.Add(1) + q.AddEvent(&stubLogEvent{message: "msg", timestamp: time.Now(), done: eventWG.Done}) time.Sleep(10 * time.Millisecond) - q.send() + triggerSend(t, q) + eventWG.Wait() foundUnknownErr := false - loglines := strings.Split(strings.TrimSpace(logbuf.String()), "\n") - for _, logline := range loglines { - if strings.Contains(logline, "E!") && strings.Contains(logline, "Unknown Error") { + logLines := logSink.Lines() + for _, logLine := range logLines { + if strings.Contains(logLine, "E!") && strings.Contains(logLine, "Unknown Error") { foundUnknownErr = true } } - require.True(t, foundUnknownErr, fmt.Sprintf("Expecting error log with unknown error, but received '%s' in the log", logbuf.String())) - - log.SetOutput(os.Stderr) + require.True(t, foundUnknownErr, fmt.Sprintf("Expecting error log with unknown error, but received '%s' in the log", logSink)) close(stop) wg.Wait() @@ -534,7 +534,7 @@ func TestLogRejectedLogEntryInfo(t *testing.T) { var wg sync.WaitGroup var s stubLogsService - s.ple = func(in *cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) { + s.ple = func(*cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) { return &cloudwatchlogs.PutLogEventsOutput{ RejectedLogEventsInfo: &cloudwatchlogs.RejectedLogEventsInfo{ TooOldLogEventEndIndex: aws.Int64(100), @@ -544,30 +544,29 @@ func TestLogRejectedLogEntryInfo(t *testing.T) { }, nil } - var logbuf bytes.Buffer - log.SetOutput(io.MultiWriter(&logbuf, os.Stdout)) - - stop, q := testPreparation(-1, &s, 1*time.Hour, 2*time.Hour, nil, &wg) - q.AddEvent(newStubLogEvent("msg", time.Now())) + logSink := testutil.NewLogSink() + stop, q := testPreparationWithLogger(t, logSink, -1, &s, 1*time.Hour, 2*time.Hour, nil, &wg) + var eventWG sync.WaitGroup + eventWG.Add(1) + q.AddEvent(&stubLogEvent{message: "msg", timestamp: time.Now(), done: eventWG.Done}) time.Sleep(10 * time.Millisecond) - q.send() + triggerSend(t, q) - loglines := strings.Split(strings.TrimSpace(logbuf.String()), "\n") - require.Len(t, loglines, 4, fmt.Sprintf("Expecting 3 error logs, but %d received", len(loglines))) + eventWG.Wait() + logLines := logSink.Lines() + require.Len(t, logLines, 4, fmt.Sprintf("Expecting 3 error logs, but %d received", len(logLines))) - logline := loglines[0] - require.True(t, strings.Contains(logline, "W!"), fmt.Sprintf("Expecting error log events too old, but received '%s' in the log", logbuf.String())) - require.True(t, strings.Contains(logline, "100"), fmt.Sprintf("Expecting error log events too old, but received '%s' in the log", logbuf.String())) + logLine := logLines[0] + require.True(t, strings.Contains(logLine, "W!"), fmt.Sprintf("Expecting error log events too old, but received '%s' in the log", logSink.String())) + require.True(t, strings.Contains(logLine, "100"), fmt.Sprintf("Expecting error log events too old, but received '%s' in the log", logSink.String())) - logline = loglines[1] - require.True(t, strings.Contains(logline, "W!"), fmt.Sprintf("Expecting error log events too new, but received '%s' in the log", logbuf.String())) - require.True(t, strings.Contains(logline, "200"), fmt.Sprintf("Expecting error log events too new, but received '%s' in the log", logbuf.String())) + logLine = logLines[1] + require.True(t, strings.Contains(logLine, "W!"), fmt.Sprintf("Expecting error log events too new, but received '%s' in the log", logSink.String())) + require.True(t, strings.Contains(logLine, "200"), fmt.Sprintf("Expecting error log events too new, but received '%s' in the log", logSink.String())) - logline = loglines[2] - require.True(t, strings.Contains(logline, "W!"), fmt.Sprintf("Expecting error log events too expired, but received '%s' in the log", logbuf.String())) - require.True(t, strings.Contains(logline, "300"), fmt.Sprintf("Expecting error log events too expired, but received '%s' in the log", logbuf.String())) - - log.SetOutput(os.Stderr) + logLine = logLines[2] + require.True(t, strings.Contains(logLine, "W!"), fmt.Sprintf("Expecting error log events too expired, but received '%s' in the log", logSink.String())) + require.True(t, strings.Contains(logLine, "300"), fmt.Sprintf("Expecting error log events too expired, but received '%s' in the log", logSink.String())) close(stop) wg.Wait() @@ -595,9 +594,7 @@ func TestAddEventNonBlocking(t *testing.T) { start.Add(time.Duration(i)*time.Millisecond), )) } - stop, q := testPreparation(-1, &s, 1*time.Hour, 2*time.Hour, nil, &wg) - q.flushTimeout = 50 * time.Millisecond - q.resetFlushTimer() + stop, q := testPreparation(t, -1, &s, 1*time.Hour, 2*time.Hour, nil, &wg) time.Sleep(200 * time.Millisecond) // Wait until pusher started, merge channel is blocked for _, e := range evts { @@ -605,49 +602,78 @@ func TestAddEventNonBlocking(t *testing.T) { } time.Sleep(time.Second) + triggerSend(t, q) + time.Sleep(20 * time.Millisecond) close(stop) wg.Wait() } func TestResendWouldStopAfterExhaustedRetries(t *testing.T) { + t.Parallel() var wg sync.WaitGroup var s stubLogsService + var cnt atomic.Int32 - cnt := 0 - s.ple = func(in *cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) { - cnt++ + s.ple = func(*cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) { + cnt.Add(1) return nil, &cloudwatchlogs.ServiceUnavailableException{} } - var logbuf bytes.Buffer - log.SetOutput(io.MultiWriter(&logbuf, os.Stdout)) - - stop, q := testPreparation(-1, &s, 10*time.Millisecond, time.Second, nil, &wg) + logSink := testutil.NewLogSink() + stop, q := testPreparationWithLogger(t, logSink, -1, &s, 10*time.Millisecond, time.Second, nil, &wg) q.AddEvent(newStubLogEvent("msg", time.Now())) time.Sleep(2 * time.Second) - loglines := strings.Split(strings.TrimSpace(logbuf.String()), "\n") - lastline := loglines[len(loglines)-1] - expected := fmt.Sprintf("All %v retries to G/S failed for PutLogEvents, request dropped.", cnt-1) - require.True(t, strings.HasSuffix(lastline, expected), fmt.Sprintf("Expecting error log to end with request dropped, but received '%s' in the log", logbuf.String())) - - log.SetOutput(os.Stderr) + logLines := logSink.Lines() + lastLine := logLines[len(logLines)-1] + expected := fmt.Sprintf("All %v retries to G/S failed for PutLogEvents, request dropped.", cnt.Load()-1) + require.True(t, strings.HasSuffix(lastLine, expected), fmt.Sprintf("Expecting error log to end with request dropped, but received '%s' in the log", logSink.String())) close(stop) wg.Wait() } +// Cannot call q.send() directly as it would cause a race condition. Reset last sent time and trigger flush. +func triggerSend(t *testing.T, q *queue) { + t.Helper() + q.lastSentTime.Store(time.Time{}) + q.flushCh <- struct{}{} +} + func testPreparation( + t *testing.T, + retention int, + service cloudWatchLogsService, + flushTimeout time.Duration, + retryDuration time.Duration, + entityProvider logs.LogEntityProvider, + wg *sync.WaitGroup, +) (chan struct{}, *queue) { + return testPreparationWithLogger( + t, + testutil.NewNopLogger(), + retention, + service, + flushTimeout, + retryDuration, + entityProvider, + wg, + ) +} + +func testPreparationWithLogger( + t *testing.T, + logger telegraf.Logger, retention int, - service *stubLogsService, + service cloudWatchLogsService, flushTimeout time.Duration, retryDuration time.Duration, entityProvider logs.LogEntityProvider, wg *sync.WaitGroup, ) (chan struct{}, *queue) { + t.Helper() stop := make(chan struct{}) - logger := testutil.Logger{Name: "test"} tm := NewTargetManager(logger, service) s := newSender(logger, service, tm, retryDuration, stop) q := newQueue( diff --git a/plugins/outputs/cloudwatchlogs/internal/pusher/retry.go b/plugins/outputs/cloudwatchlogs/internal/pusher/retry.go index edb12eb49d..b981211582 100644 --- a/plugins/outputs/cloudwatchlogs/internal/pusher/retry.go +++ b/plugins/outputs/cloudwatchlogs/internal/pusher/retry.go @@ -34,10 +34,6 @@ const ( maxRetryDelay = 1 * time.Minute ) -var ( - seededRand = rand.New(rand.NewSource(time.Now().UnixNano())) // nolint:gosec -) - type retryWaitStrategy int const ( @@ -61,7 +57,11 @@ func retryWait(baseRetryDelay time.Duration, maxBackoffRetries int, retryCount i if retryCount < maxBackoffRetries { d = baseRetryDelay * time.Duration(1< 0 { + if newGroup { + m.logger.Debugf("sending new log group %v to prp channel", target.Group) + m.prp <- target + } else { + m.logger.Debugf("sending existing log group %v to dlg channel", target.Group) + m.dlg <- target + } + } m.cache[target] = struct{}{} } return nil } -func (m *targetManager) createLogGroupAndStream(t Target) error { +func (m *targetManager) PutRetentionPolicy(target Target) { + // new pusher will call this so start with dlg + if target.Retention > 0 { + m.logger.Debugf("sending log group %v to dlg channel by pusher", target.Group) + m.dlg <- target + } +} + +func (m *targetManager) createLogGroupAndStream(t Target) (bool, error) { err := m.createLogStream(t) - if err == nil { - return nil + if m.isLogStreamCreated(err, t.Stream) { + return false, nil } - m.logger.Debugf("creating stream fail due to : %v", err) + m.logger.Debugf("creating stream %v fail due to: %v", t.Stream, err) if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == cloudwatchlogs.ErrCodeResourceNotFoundException { err = m.createLogGroup(t) // attempt to create stream again if group created successfully. - if err == nil { - m.logger.Debugf("successfully created log group %v. Retrying log stream %v", t.Group, t.Stream) + if m.isLogGroupCreated(err, t.Group) { + m.logger.Debugf("retrying log stream %v", t.Stream) err = m.createLogStream(t) + if m.isLogStreamCreated(err, t.Stream) { + return true, nil + } } else { - m.logger.Debugf("creating group fail due to : %v", err) + m.logger.Debugf("creating group %v fail due to: %v", t.Group, err) } } + return false, err +} + +func (m *targetManager) isLogGroupCreated(err error, group string) bool { + return m.isResourceCreated(err, fmt.Sprintf("log group %v", group)) +} + +func (m *targetManager) isLogStreamCreated(err error, stream string) bool { + return m.isResourceCreated(err, fmt.Sprintf("log stream %v", stream)) +} + +func (m *targetManager) isResourceCreated(err error, resourceName string) bool { + if err == nil { + return true + } + // if the resource already exist if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == cloudwatchlogs.ErrCodeResourceAlreadyExistsException { - m.logger.Debugf("Resource was already created. %v\n", err) - return nil // if the log group or log stream already exist, this is not worth returning an error for + m.logger.Debugf("%s was already created. %v\n", resourceName, err) + return true } - - return err + return false } func (m *targetManager) createLogGroup(t Target) error { - var err error + var input *cloudwatchlogs.CreateLogGroupInput if t.Class != "" { - _, err = m.service.CreateLogGroup(&cloudwatchlogs.CreateLogGroupInput{ + input = &cloudwatchlogs.CreateLogGroupInput{ LogGroupName: &t.Group, LogGroupClass: &t.Class, - }) + } } else { - _, err = m.service.CreateLogGroup(&cloudwatchlogs.CreateLogGroupInput{ + input = &cloudwatchlogs.CreateLogGroupInput{ LogGroupName: &t.Group, - }) + } + } + _, err := m.service.CreateLogGroup(input) + if err == nil { + m.logger.Debugf("successfully created log group %v", t.Group) + return nil } return err } @@ -109,26 +167,88 @@ func (m *targetManager) createLogStream(t Target) error { return err } -// PutRetentionPolicy tries to set the retention policy for a log group. Does not retry on failure. -func (m *targetManager) PutRetentionPolicy(t Target) { - if t.Retention > 0 { - i := aws.Int64(int64(t.Retention)) - putRetentionInput := &cloudwatchlogs.PutRetentionPolicyInput{ - LogGroupName: &t.Group, - RetentionInDays: i, +func (m *targetManager) processDescribeLogGroup() { + for target := range m.dlg { + for attempt := 0; attempt < numBackoffRetries; attempt++ { + currentRetention, err := m.getRetention(target) + if err != nil { + m.logger.Errorf("failed to describe log group retention for target %v: %v", target, err) + time.Sleep(m.calculateBackoff(attempt)) + continue + } + + if currentRetention != target.Retention && target.Retention > 0 { + m.logger.Debugf("queueing log group %v to update retention policy", target.Group) + m.prp <- target + } + break // no change in retention } - _, err := m.service.PutRetentionPolicy(putRetentionInput) - if err != nil { - // since this gets called both before we start pushing logs, and after we first attempt - // to push a log to a non-existent log group, we don't want to dirty the log with an error - // if the error is that the log group doesn't exist (yet). - if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == cloudwatchlogs.ErrCodeResourceNotFoundException { - m.logger.Debugf("Log group %v not created yet: %v", t.Group, err) - } else { - m.logger.Errorf("Unable to put retention policy for log group %v: %v ", t.Group, err) + } +} + +func (m *targetManager) getRetention(target Target) (int, error) { + input := &cloudwatchlogs.DescribeLogGroupsInput{ + LogGroupNamePrefix: aws.String(target.Group), + } + + output, err := m.service.DescribeLogGroups(input) + if err != nil { + return 0, fmt.Errorf("describe log groups failed: %w", err) + } + + for _, group := range output.LogGroups { + if *group.LogGroupName == target.Group { + if group.RetentionInDays == nil { + return 0, nil } - } else { - m.logger.Debugf("successfully updated log retention policy for log group %v", t.Group) + return int(*group.RetentionInDays), nil } } + + return 0, fmt.Errorf("log group %v not found", target.Group) +} + +func (m *targetManager) processPutRetentionPolicy() { + for target := range m.prp { + var updated bool + for attempt := 0; attempt < numBackoffRetries; attempt++ { + err := m.updateRetentionPolicy(target) + if err == nil { + updated = true + break + } + + m.logger.Debugf("retrying to update retention policy for target (%v) %v: %v", attempt, target, err) + time.Sleep(m.calculateBackoff(attempt)) + } + + if !updated { + m.logger.Errorf("failed to update retention policy for target %v after %d attempts", target, numBackoffRetries) + } + } +} + +func (m *targetManager) updateRetentionPolicy(target Target) error { + input := &cloudwatchlogs.PutRetentionPolicyInput{ + LogGroupName: aws.String(target.Group), + RetentionInDays: aws.Int64(int64(target.Retention)), + } + + _, err := m.service.PutRetentionPolicy(input) + if err != nil { + return fmt.Errorf("put retention policy failed: %w", err) + } + m.logger.Debugf("successfully updated retention policy for log group %v", target.Group) + return nil +} + +func (m *targetManager) calculateBackoff(retryCount int) time.Duration { + delay := baseRetryDelay + if retryCount < numBackoffRetries { + delay = baseRetryDelay * time.Duration(1< maxRetryDelayTarget { + delay = maxRetryDelayTarget + } + return withJitter(delay) } diff --git a/plugins/outputs/cloudwatchlogs/internal/pusher/target_test.go b/plugins/outputs/cloudwatchlogs/internal/pusher/target_test.go index 88a417f9e3..55e57c7b44 100644 --- a/plugins/outputs/cloudwatchlogs/internal/pusher/target_test.go +++ b/plugins/outputs/cloudwatchlogs/internal/pusher/target_test.go @@ -9,16 +9,17 @@ import ( "testing" "time" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/aws/amazon-cloudwatch-agent/sdk/service/cloudwatchlogs" + "github.com/aws/amazon-cloudwatch-agent/tool/testutil" ) func TestTargetManager(t *testing.T) { - logger := testutil.Logger{Name: "test"} + logger := testutil.NewNopLogger() t.Run("CreateLogStream", func(t *testing.T) { target := Target{Group: "G", Stream: "S"} @@ -31,6 +32,7 @@ func TestTargetManager(t *testing.T) { assert.NoError(t, err) mockService.AssertExpectations(t) + assertCacheLen(t, manager, 1) }) t.Run("CreateLogGroupAndStream", func(t *testing.T) { @@ -47,6 +49,58 @@ func TestTargetManager(t *testing.T) { assert.NoError(t, err) mockService.AssertExpectations(t) + assertCacheLen(t, manager, 1) + }) + + t.Run("CreateLogGroupAndStream/GroupAlreadyExists", func(t *testing.T) { + target := Target{Group: "G1", Stream: "S1"} + + mockService := new(mockLogsService) + mockService.On("CreateLogStream", mock.Anything). + Return(&cloudwatchlogs.CreateLogStreamOutput{}, &cloudwatchlogs.ResourceNotFoundException{}).Once() + mockService.On("CreateLogGroup", mock.Anything).Return(&cloudwatchlogs.CreateLogGroupOutput{}, &cloudwatchlogs.ResourceAlreadyExistsException{}).Once() + mockService.On("CreateLogStream", mock.Anything).Return(&cloudwatchlogs.CreateLogStreamOutput{}, nil).Once() + + manager := NewTargetManager(logger, mockService) + err := manager.InitTarget(target) + + assert.NoError(t, err) + mockService.AssertExpectations(t) + assertCacheLen(t, manager, 1) + }) + + t.Run("CreateLogGroupAndStream/RetryStreamFail", func(t *testing.T) { + target := Target{Group: "G1", Stream: "S1"} + + mockService := new(mockLogsService) + mockService.On("CreateLogStream", mock.Anything). + Return(&cloudwatchlogs.CreateLogStreamOutput{}, &cloudwatchlogs.ResourceNotFoundException{}).Once() + mockService.On("CreateLogGroup", mock.Anything).Return(&cloudwatchlogs.CreateLogGroupOutput{}, &cloudwatchlogs.ResourceAlreadyExistsException{}).Once() + mockService.On("CreateLogStream", mock.Anything).Return(&cloudwatchlogs.CreateLogStreamOutput{}, &cloudwatchlogs.AccessDeniedException{}).Once() + + manager := NewTargetManager(logger, mockService) + err := manager.InitTarget(target) + + assert.Error(t, err) + mockService.AssertExpectations(t) + assertCacheLen(t, manager, 0) + }) + + t.Run("CreateLogGroupAndStream/RetryStreamAlreadyExists", func(t *testing.T) { + target := Target{Group: "G1", Stream: "S1"} + + mockService := new(mockLogsService) + mockService.On("CreateLogStream", mock.Anything). + Return(&cloudwatchlogs.CreateLogStreamOutput{}, &cloudwatchlogs.ResourceNotFoundException{}).Once() + mockService.On("CreateLogGroup", mock.Anything).Return(&cloudwatchlogs.CreateLogGroupOutput{}, nil).Once() + mockService.On("CreateLogStream", mock.Anything).Return(&cloudwatchlogs.CreateLogStreamOutput{}, &cloudwatchlogs.ResourceAlreadyExistsException{}).Once() + + manager := NewTargetManager(logger, mockService) + err := manager.InitTarget(target) + + assert.NoError(t, err) + mockService.AssertExpectations(t) + assertCacheLen(t, manager, 1) }) t.Run("CreateLogGroup/Error", func(t *testing.T) { @@ -63,6 +117,7 @@ func TestTargetManager(t *testing.T) { assert.Error(t, err) mockService.AssertExpectations(t) + assertCacheLen(t, manager, 0) }) t.Run("SetRetentionPolicy", func(t *testing.T) { @@ -70,43 +125,90 @@ func TestTargetManager(t *testing.T) { mockService := new(mockLogsService) mockService.On("CreateLogStream", mock.Anything).Return(&cloudwatchlogs.CreateLogStreamOutput{}, nil).Once() + mockService.On("DescribeLogGroups", mock.Anything).Return(&cloudwatchlogs.DescribeLogGroupsOutput{ + LogGroups: []*cloudwatchlogs.LogGroup{ + { + LogGroupName: aws.String(target.Group), + RetentionInDays: aws.Int64(0), + }, + }, + }, nil).Once() mockService.On("PutRetentionPolicy", mock.Anything).Return(&cloudwatchlogs.PutRetentionPolicyOutput{}, nil).Once() manager := NewTargetManager(logger, mockService) err := manager.InitTarget(target) + assert.NoError(t, err) + // Wait for async operations to complete + time.Sleep(100 * time.Millisecond) + mockService.AssertExpectations(t) + assertCacheLen(t, manager, 1) + }) + t.Run("SetRetentionPolicy/NoChange", func(t *testing.T) { + target := Target{Group: "G", Stream: "S", Retention: 7} + + mockService := new(mockLogsService) + mockService.On("CreateLogStream", mock.Anything).Return(&cloudwatchlogs.CreateLogStreamOutput{}, nil).Once() + mockService.On("DescribeLogGroups", mock.Anything).Return(&cloudwatchlogs.DescribeLogGroupsOutput{ + LogGroups: []*cloudwatchlogs.LogGroup{ + { + LogGroupName: aws.String(target.Group), + RetentionInDays: aws.Int64(7), + }, + }, + }, nil).Once() + + manager := NewTargetManager(logger, mockService) + err := manager.InitTarget(target) assert.NoError(t, err) + time.Sleep(100 * time.Millisecond) mockService.AssertExpectations(t) + mockService.AssertNotCalled(t, "PutRetentionPolicy") + assertCacheLen(t, manager, 1) }) t.Run("SetRetentionPolicy/LogGroupNotFound", func(t *testing.T) { + t.Parallel() target := Target{Group: "G", Stream: "S", Retention: 7} mockService := new(mockLogsService) mockService.On("CreateLogStream", mock.Anything).Return(&cloudwatchlogs.CreateLogStreamOutput{}, nil).Once() - mockService.On("PutRetentionPolicy", mock.Anything). - Return(&cloudwatchlogs.PutRetentionPolicyOutput{}, &cloudwatchlogs.ResourceNotFoundException{}).Once() + mockService.On("DescribeLogGroups", mock.Anything). + Return(&cloudwatchlogs.DescribeLogGroupsOutput{}, &cloudwatchlogs.ResourceNotFoundException{}).Times(numBackoffRetries) manager := NewTargetManager(logger, mockService) err := manager.InitTarget(target) - - assert.NoError(t, err) // The overall operation should still succeed even if setting retention policy fails + assert.NoError(t, err) + time.Sleep(30 * time.Second) mockService.AssertExpectations(t) + mockService.AssertNotCalled(t, "PutRetentionPolicy") + assertCacheLen(t, manager, 1) }) t.Run("SetRetentionPolicy/Error", func(t *testing.T) { + t.Parallel() target := Target{Group: "G", Stream: "S", Retention: 7} mockService := new(mockLogsService) mockService.On("CreateLogStream", mock.Anything).Return(&cloudwatchlogs.CreateLogStreamOutput{}, nil).Once() + mockService.On("DescribeLogGroups", mock.Anything).Return(&cloudwatchlogs.DescribeLogGroupsOutput{ + LogGroups: []*cloudwatchlogs.LogGroup{ + { + LogGroupName: aws.String(target.Group), + RetentionInDays: aws.Int64(0), + }, + }, + }, nil).Once() mockService.On("PutRetentionPolicy", mock.Anything). - Return(&cloudwatchlogs.PutRetentionPolicyOutput{}, awserr.New("SomeAWSError", "Failed to set retention policy", nil)).Once() + Return(&cloudwatchlogs.PutRetentionPolicyOutput{}, + awserr.New("SomeAWSError", "Failed to set retention policy", nil)).Times(numBackoffRetries) manager := NewTargetManager(logger, mockService) err := manager.InitTarget(target) - - assert.NoError(t, err) // The overall operation should still succeed even if setting retention policy fails + assert.NoError(t, err) + time.Sleep(30 * time.Second) mockService.AssertExpectations(t) + assertCacheLen(t, manager, 1) }) t.Run("SetRetentionPolicy/Negative", func(t *testing.T) { @@ -118,6 +220,7 @@ func TestTargetManager(t *testing.T) { manager.PutRetentionPolicy(target) mockService.AssertNotCalled(t, "PutRetentionPolicy", mock.Anything) + assertCacheLen(t, manager, 0) }) t.Run("ConcurrentInit", func(t *testing.T) { @@ -147,5 +250,86 @@ func TestTargetManager(t *testing.T) { wg.Wait() assert.EqualValues(t, len(targets), count.Load()) + assertCacheLen(t, manager, 2) + }) + + t.Run("InitTarget/ZeroRetention", func(t *testing.T) { + target := Target{Group: "G", Stream: "S", Retention: 0} + + mockService := new(mockLogsService) + mockService.On("CreateLogStream", mock.Anything).Return(&cloudwatchlogs.CreateLogStreamOutput{}, nil).Once() + + manager := NewTargetManager(logger, mockService) + err := manager.InitTarget(target) + assert.NoError(t, err) + + mockService.AssertExpectations(t) + mockService.AssertNotCalled(t, "DescribeLogGroups") + mockService.AssertNotCalled(t, "PutRetentionPolicy") + assertCacheLen(t, manager, 1) + }) + + t.Run("NewLogGroup/SetRetention", func(t *testing.T) { + target := Target{Group: "G", Stream: "S", Retention: 7} + + mockService := new(mockLogsService) + // fails with ResourceNotFound + mockService.On("CreateLogStream", mock.Anything).Return(&cloudwatchlogs.CreateLogStreamOutput{}, awserr.New(cloudwatchlogs.ErrCodeResourceNotFoundException, "Log group not found", nil)).Once() + mockService.On("CreateLogGroup", mock.Anything).Return(&cloudwatchlogs.CreateLogGroupOutput{}, nil).Once() + mockService.On("CreateLogStream", mock.Anything).Return(&cloudwatchlogs.CreateLogStreamOutput{}, nil).Once() + // should be called directly without DescribeLogGroups + mockService.On("PutRetentionPolicy", mock.MatchedBy(func(input *cloudwatchlogs.PutRetentionPolicyInput) bool { + return *input.LogGroupName == target.Group && *input.RetentionInDays == int64(target.Retention) + })).Return(&cloudwatchlogs.PutRetentionPolicyOutput{}, nil).Once() + + manager := NewTargetManager(logger, mockService) + err := manager.InitTarget(target) + assert.NoError(t, err) + + time.Sleep(100 * time.Millisecond) + mockService.AssertExpectations(t) + mockService.AssertNotCalled(t, "DescribeLogGroups") + assertCacheLen(t, manager, 1) }) + + t.Run("NewLogGroup/RetentionError", func(t *testing.T) { + t.Parallel() + target := Target{Group: "G", Stream: "S", Retention: 7} + + mockService := new(mockLogsService) + mockService.On("CreateLogStream", mock.Anything).Return(&cloudwatchlogs.CreateLogStreamOutput{}, awserr.New(cloudwatchlogs.ErrCodeResourceNotFoundException, "Log group not found", nil)).Once() + mockService.On("CreateLogGroup", mock.Anything).Return(&cloudwatchlogs.CreateLogGroupOutput{}, nil).Once() + mockService.On("CreateLogStream", mock.Anything).Return(&cloudwatchlogs.CreateLogStreamOutput{}, nil).Once() + // fails but should retry + mockService.On("PutRetentionPolicy", mock.Anything).Return(&cloudwatchlogs.PutRetentionPolicyOutput{}, awserr.New("InternalError", "Internal error", nil)).Times(numBackoffRetries) + + manager := NewTargetManager(logger, mockService) + err := manager.InitTarget(target) + assert.NoError(t, err) + + time.Sleep(30 * time.Second) + + mockService.AssertExpectations(t) + mockService.AssertNotCalled(t, "DescribeLogGroups") + assertCacheLen(t, manager, 1) + }) +} + +func TestCalculateBackoff(t *testing.T) { + manager := &targetManager{} + // should never exceed 30sec of total wait time + totalDelay := time.Duration(0) + for i := 0; i < numBackoffRetries; i++ { + delay := manager.calculateBackoff(i) + totalDelay += delay + } + assert.True(t, totalDelay <= 30*time.Second, "Total delay across all attempts should not exceed 30 seconds, but was %v", totalDelay) +} + +func assertCacheLen(t *testing.T, manager TargetManager, count int) { + t.Helper() + tm := manager.(*targetManager) + tm.mu.Lock() + defer tm.mu.Unlock() + assert.Len(t, tm.cache, count) } diff --git a/plugins/processors/awsapplicationsignals/factory.go b/plugins/processors/awsapplicationsignals/factory.go index 18b5cb5a54..9388bf9c28 100644 --- a/plugins/processors/awsapplicationsignals/factory.go +++ b/plugins/processors/awsapplicationsignals/factory.go @@ -44,7 +44,7 @@ func createDefaultConfig() component.Config { func createTracesProcessor( ctx context.Context, - set processor.CreateSettings, + set processor.Settings, cfg component.Config, next consumer.Traces, ) (processor.Traces, error) { @@ -53,7 +53,7 @@ func createTracesProcessor( return nil, err } - return processorhelper.NewTracesProcessor( + return processorhelper.NewTraces( ctx, set, cfg, @@ -66,7 +66,7 @@ func createTracesProcessor( func createMetricsProcessor( ctx context.Context, - set processor.CreateSettings, + set processor.Settings, cfg component.Config, nextMetricsConsumer consumer.Metrics, ) (processor.Metrics, error) { @@ -75,7 +75,7 @@ func createMetricsProcessor( return nil, err } - return processorhelper.NewMetricsProcessor( + return processorhelper.NewMetrics( ctx, set, cfg, @@ -87,7 +87,7 @@ func createMetricsProcessor( } func createProcessor( - params processor.CreateSettings, + params processor.Settings, cfg component.Config, ) (*awsapplicationsignalsprocessor, error) { pCfg, ok := cfg.(*appsignalsconfig.Config) diff --git a/plugins/processors/awsapplicationsignals/internal/resolver/endpointslicewatcher.go b/plugins/processors/awsapplicationsignals/internal/resolver/endpointslicewatcher.go new file mode 100644 index 0000000000..8f04bd546c --- /dev/null +++ b/plugins/processors/awsapplicationsignals/internal/resolver/endpointslicewatcher.go @@ -0,0 +1,293 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package resolver + +import ( + "fmt" + "sync" + + "go.uber.org/zap" + discv1 "k8s.io/api/discovery/v1" + "k8s.io/client-go/informers" + "k8s.io/client-go/tools/cache" +) + +// endpointSliceWatcher watches EndpointSlices and builds: +// 1. ip/ip:port -> "workload@namespace" +// 2. service@namespace -> "workload@namespace" +type endpointSliceWatcher struct { + logger *zap.Logger + informer cache.SharedIndexInformer + ipToWorkload *sync.Map // key: "ip" or "ip:port", val: "workload@ns" + serviceToWorkload *sync.Map // key: "service@namespace", val: "workload@ns" + + // For bookkeeping, so we can remove old mappings upon EndpointSlice deletion + sliceToKeysMap sync.Map // map[sliceUID string] -> []string of keys we inserted, which are "ip", "ip:port", or "service@namespace" + deleter Deleter +} + +// kvPair holds one mapping from key -> value. The isService flag +// indicates whether this key is for a Service or for an IP/IP:port. +type kvPair struct { + key string // key: "ip" or "ip:port" or "service@namespace" + value string // value: "workload@namespace" + isService bool // true if key = "service@namespace" +} + +// newEndpointSliceWatcher creates an EndpointSlice watcher for the new approach (when USE_LIST_POD=false). +func newEndpointSliceWatcher( + logger *zap.Logger, + factory informers.SharedInformerFactory, + deleter Deleter, +) *endpointSliceWatcher { + + esInformer := factory.Discovery().V1().EndpointSlices().Informer() + err := esInformer.SetTransform(minimizeEndpointSlice) + if err != nil { + logger.Error("failed to minimize Service objects", zap.Error(err)) + } + + return &endpointSliceWatcher{ + logger: logger, + informer: esInformer, + ipToWorkload: &sync.Map{}, + serviceToWorkload: &sync.Map{}, + deleter: deleter, + } +} + +// run starts the endpointSliceWatcher. +func (w *endpointSliceWatcher) Run(stopCh chan struct{}) { + w.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + w.handleSliceAdd(obj) + }, + UpdateFunc: func(oldObj, newObj interface{}) { + w.handleSliceUpdate(newObj, oldObj) + }, + DeleteFunc: func(obj interface{}) { + w.handleSliceDelete(obj) + }, + }) + go w.informer.Run(stopCh) +} + +func (w *endpointSliceWatcher) waitForCacheSync(stopCh chan struct{}) { + if !cache.WaitForNamedCacheSync("endpointSliceWatcher", stopCh, w.informer.HasSynced) { + w.logger.Fatal("timed out waiting for endpointSliceWatcher cache to sync") + } + w.logger.Info("endpointSliceWatcher: Cache synced") +} + +// extractEndpointSliceKeyValuePairs computes the relevant mappings from an EndpointSlice. +// +// It returns a list of kvPair: +// - All IP and IP:port keys (isService=false) -> "workload@ns" +// - The Service name key (isService=true) -> first "workload@ns" found +// +// This function does NOT modify ipToWorkload or serviceToWorkload. It's purely for computing +// the pairs, so it can be reused by both add and update methods. +func (w *endpointSliceWatcher) extractEndpointSliceKeyValuePairs(slice *discv1.EndpointSlice) []kvPair { + var pairs []kvPair + + isFirstPod := true + svcName := slice.Labels["kubernetes.io/service-name"] + + for _, endpoint := range slice.Endpoints { + if endpoint.TargetRef != nil { + if endpoint.TargetRef.Kind != "Pod" { + continue + } + + podName := endpoint.TargetRef.Name + ns := endpoint.TargetRef.Namespace + + derivedWorkload := inferWorkloadName(podName, svcName) + if derivedWorkload == "" { + w.logger.Warn("failed to infer workload name from Pod name", zap.String("podName", podName)) + continue + } + fullWl := derivedWorkload + "@" + ns + + // Build IP and IP:port pairs + for _, addr := range endpoint.Addresses { + // "ip" -> "workload@namespace" + pairs = append(pairs, kvPair{ + key: addr, + value: fullWl, + isService: false, + }) + + // "ip:port" -> "workload@namespace" for each port + for _, portDef := range slice.Ports { + if portDef.Port != nil { + ipPort := fmt.Sprintf("%s:%d", addr, *portDef.Port) + pairs = append(pairs, kvPair{ + key: ipPort, + value: fullWl, + isService: false, + }) + } + } + } + + // Build service name -> "workload@namespace" pair from the first pod + if isFirstPod { + isFirstPod = false + if svcName != "" { + pairs = append(pairs, kvPair{ + key: svcName + "@" + ns, + value: fullWl, + isService: true, + }) + } + } + } + + } + + return pairs +} + +// handleSliceAdd handles a new EndpointSlice that wasn't seen before. +// It computes all keys and directly stores them. Then it records those keys +// in sliceToKeysMap so that we can remove them later upon deletion. +func (w *endpointSliceWatcher) handleSliceAdd(obj interface{}) { + newSlice := obj.(*discv1.EndpointSlice) + sliceUID := string(newSlice.UID) + + // Compute all key-value pairs for this new slice + pairs := w.extractEndpointSliceKeyValuePairs(newSlice) + + // Insert them into our ipToWorkload / serviceToWorkload, and track the keys. + keys := make([]string, 0, len(pairs)) + for _, kv := range pairs { + if kv.isService { + w.serviceToWorkload.Store(kv.key, kv.value) + } else { + w.ipToWorkload.Store(kv.key, kv.value) + } + keys = append(keys, kv.key) + } + + // Save these keys so we can remove them on delete + w.sliceToKeysMap.Store(sliceUID, keys) +} + +// handleSliceUpdate handles an update from oldSlice -> newSlice. +// Instead of blindly removing all old keys and adding new ones, it diffs them: +// - remove only keys that no longer exist, +// - add only new keys that didn't exist before, +// - keep those that haven't changed. +func (w *endpointSliceWatcher) handleSliceUpdate(oldObj, newObj interface{}) { + oldSlice := oldObj.(*discv1.EndpointSlice) + newSlice := newObj.(*discv1.EndpointSlice) + + oldUID := string(oldSlice.UID) + newUID := string(newSlice.UID) + + // 1) Fetch old keys from sliceToKeysMap (if present). + var oldKeys []string + if val, ok := w.sliceToKeysMap.Load(oldUID); ok { + oldKeys = val.([]string) + } + + // 2) Compute fresh pairs (and thus keys) from the new slice. + newPairs := w.extractEndpointSliceKeyValuePairs(newSlice) + var newKeys []string + for _, kv := range newPairs { + newKeys = append(newKeys, kv.key) + } + + // Convert oldKeys/newKeys to sets for easy diff + oldKeysSet := make(map[string]struct{}, len(oldKeys)) + for _, k := range oldKeys { + oldKeysSet[k] = struct{}{} + } + newKeysSet := make(map[string]struct{}, len(newKeys)) + for _, k := range newKeys { + newKeysSet[k] = struct{}{} + } + + // 3) For each key in oldKeys that doesn't exist in newKeys, remove it + for k := range oldKeysSet { + if _, stillPresent := newKeysSet[k]; !stillPresent { + w.deleter.DeleteWithDelay(w.ipToWorkload, k) + w.deleter.DeleteWithDelay(w.serviceToWorkload, k) + } + } + + // 4) For each key in newKeys that wasn't in oldKeys, we need to store it + // in the appropriate sync.Map. We'll look up the value from newPairs. + for _, kv := range newPairs { + if _, alreadyHad := oldKeysSet[kv.key]; !alreadyHad { + if kv.isService { + w.serviceToWorkload.Store(kv.key, kv.value) + } else { + w.ipToWorkload.Store(kv.key, kv.value) + } + } + } + + // 5) Update sliceToKeysMap for the new slice UID + // (Often the UID doesn't change across updates, but we'll handle it properly.) + w.sliceToKeysMap.Delete(oldUID) + w.sliceToKeysMap.Store(newUID, newKeys) +} + +// handleSliceDelete removes any IP->workload or service->workload keys that were created by this slice. +func (w *endpointSliceWatcher) handleSliceDelete(obj interface{}) { + slice := obj.(*discv1.EndpointSlice) + w.removeSliceKeys(slice) +} + +func (w *endpointSliceWatcher) removeSliceKeys(slice *discv1.EndpointSlice) { + sliceUID := string(slice.UID) + val, ok := w.sliceToKeysMap.Load(sliceUID) + if !ok { + return + } + + keys := val.([]string) + for _, k := range keys { + w.deleter.DeleteWithDelay(w.ipToWorkload, k) + w.deleter.DeleteWithDelay(w.serviceToWorkload, k) + } + w.sliceToKeysMap.Delete(sliceUID) +} + +// minimizeEndpointSlice removes fields that are not required by our mapping logic, +// retaining only the minimal set of fields needed (ObjectMeta.Name, Namespace, UID, Labels, +// Endpoints (with their Addresses and TargetRef) and Ports). +func minimizeEndpointSlice(obj interface{}) (interface{}, error) { + eps, ok := obj.(*discv1.EndpointSlice) + if !ok { + return obj, fmt.Errorf("object is not an EndpointSlice") + } + + // Minimize metadata: we only really need Name, Namespace, UID and Labels. + eps.Annotations = nil + eps.ManagedFields = nil + eps.Finalizers = nil + + // The watcher only uses: + // - eps.Labels["kubernetes.io/service-name"] + // - eps.Namespace (from metadata) + // - eps.UID (from metadata) + // - eps.Endpoints: for each endpoint, its Addresses and TargetRef. + // - eps.Ports: each port's Port (and optionally Name/Protocol) + // + // For each endpoint, clear fields that we don’t use. + for i := range eps.Endpoints { + // We only need Addresses and TargetRef. Hostname, NodeName, and Zone are not used. + eps.Endpoints[i].Hostname = nil + eps.Endpoints[i].NodeName = nil + eps.Endpoints[i].Zone = nil + eps.Endpoints[i].DeprecatedTopology = nil + eps.Endpoints[i].Hints = nil + } + + // No transformation is needed for eps.Ports because we use them directly. + return eps, nil +} diff --git a/plugins/processors/awsapplicationsignals/internal/resolver/endpointslicewatcher_test.go b/plugins/processors/awsapplicationsignals/internal/resolver/endpointslicewatcher_test.go new file mode 100644 index 0000000000..57bb9dc583 --- /dev/null +++ b/plugins/processors/awsapplicationsignals/internal/resolver/endpointslicewatcher_test.go @@ -0,0 +1,296 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package resolver + +import ( + "fmt" + "reflect" + "sort" + "sync" + "testing" + + "github.com/stretchr/testify/assert" + "go.uber.org/zap" + v1 "k8s.io/api/core/v1" + discv1 "k8s.io/api/discovery/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +func newEndpointSliceWatcherForTest() *endpointSliceWatcher { + return &endpointSliceWatcher{ + logger: zap.NewNop(), + ipToWorkload: &sync.Map{}, + serviceToWorkload: &sync.Map{}, + deleter: mockDeleter, + } +} + +// createTestEndpointSlice is a helper to build a minimal EndpointSlice. +// The slice will have one Endpoint (with its TargetRef) and a list of Ports. +// svcName is stored in the Labels (key "kubernetes.io/service-name") if non-empty. +func createTestEndpointSlice(uid, namespace, svcName, podName string, addresses []string, portNumbers []int32) *discv1.EndpointSlice { + // Build the port list. + var ports []discv1.EndpointPort + for i, p := range portNumbers { + portVal := p // need a pointer + name := fmt.Sprintf("port-%d", i) + protocol := v1.ProtocolTCP + ports = append(ports, discv1.EndpointPort{ + Name: &name, + Protocol: &protocol, + Port: &portVal, + }) + } + + // Build a single endpoint with the given addresses and a TargetRef. + endpoint := discv1.Endpoint{ + Addresses: addresses, + TargetRef: &v1.ObjectReference{ + Kind: "Pod", + Name: podName, + Namespace: namespace, + }, + } + + labels := map[string]string{} + if svcName != "" { + labels["kubernetes.io/service-name"] = svcName + } + + return &discv1.EndpointSlice{ + ObjectMeta: metav1.ObjectMeta{ + UID: types.UID(uid), + Namespace: namespace, + Labels: labels, + }, + Endpoints: []discv1.Endpoint{endpoint}, + Ports: ports, + } +} + +// --- Tests --- + +// TestEndpointSliceAddition verifies that when a new EndpointSlice is added, +// the appropriate keys are inserted into the maps. +func TestEndpointSliceAddition(t *testing.T) { + watcher := newEndpointSliceWatcherForTest() + + // Create a test EndpointSlice: + // - UID: "uid-1", Namespace: "testns" + // - Labels: "kubernetes.io/service-name" = "mysvc" + // - One Endpoint with TargetRef.Kind "Pod", Name "workload-69dww", Namespace "testns" + // - Endpoint.Addresses: ["1.2.3.4"] + // - One Port with value 80. + slice := createTestEndpointSlice("uid-1", "testns", "mysvc", "workload-69dww", []string{"1.2.3.4"}, []int32{80}) + + // Call the add handler. + watcher.handleSliceAdd(slice) + + // The dummy inferWorkloadName returns "workload", so full workload becomes "workload@testns" + expectedVal := "workload@testns" + + // We expect the following keys: + // - For the endpoint: "1.2.3.4" and "1.2.3.4:80" + // - From the service label: "mysvc@testns" + var expectedIPKeys = []string{"1.2.3.4", "1.2.3.4:80"} + var expectedSvcKeys = []string{"mysvc@testns"} + + // Verify ipToWorkload. + for _, key := range expectedIPKeys { + val, ok := watcher.ipToWorkload.Load(key) + assert.True(t, ok, "expected ipToWorkload key %s", key) + assert.Equal(t, expectedVal, val, "ipToWorkload[%s] mismatch", key) + } + + // Verify serviceToWorkload. + for _, key := range expectedSvcKeys { + val, ok := watcher.serviceToWorkload.Load(key) + assert.True(t, ok, "expected serviceToWorkload key %s", key) + assert.Equal(t, expectedVal, val, "serviceToWorkload[%s] mismatch", key) + } + + // Verify that sliceToKeysMap recorded all keys. + val, ok := watcher.sliceToKeysMap.Load(string(slice.UID)) + assert.True(t, ok, "expected sliceToKeysMap to contain UID %s", slice.UID) + keysIface := val.([]string) + // Sort for comparison. + sort.Strings(keysIface) + allExpected := append(expectedIPKeys, expectedSvcKeys...) + sort.Strings(allExpected) + assert.Equal(t, allExpected, keysIface, "sliceToKeysMap keys mismatch") +} + +// TestEndpointSliceDeletion verifies that when an EndpointSlice is deleted, +// all keys that were added are removed. +func TestEndpointSliceDeletion(t *testing.T) { + watcher := newEndpointSliceWatcherForTest() + + // Create a test EndpointSlice (same as addition test). + slice := createTestEndpointSlice("uid-1", "testns", "mysvc", "workload-76977669dc-lwx64", []string{"1.2.3.4"}, []int32{80}) + watcher.handleSliceAdd(slice) + + // Now call deletion. + watcher.handleSliceDelete(slice) + + // Verify that the keys are removed from ipToWorkload. + removedKeys := []string{"1.2.3.4", "1.2.3.4:80", "mysvc@testns"} + for _, key := range removedKeys { + _, ok := watcher.ipToWorkload.Load(key) + _, okSvc := watcher.serviceToWorkload.Load(key) + assert.False(t, ok, "expected ipToWorkload key %s to be deleted", key) + assert.False(t, okSvc, "expected serviceToWorkload key %s to be deleted", key) + } + + // Also verify that sliceToKeysMap no longer contains an entry. + _, ok := watcher.sliceToKeysMap.Load(string(slice.UID)) + assert.False(t, ok, "expected sliceToKeysMap entry for UID %s to be deleted", slice.UID) +} + +// TestEndpointSliceUpdate verifies that on updates, keys are added and/or removed as appropriate. +func TestEndpointSliceUpdate(t *testing.T) { + // --- Subtest: Complete change (no overlap) --- + t.Run("complete change", func(t *testing.T) { + watcher := newEndpointSliceWatcherForTest() + + // Old slice: + // UID "uid-2", Namespace "testns", svc label "mysvc", + // One endpoint with TargetRef Name "workload-75d9d5968d-fx8px", Addresses ["1.2.3.4"], Port 80. + oldSlice := createTestEndpointSlice("uid-2", "testns", "mysvc", "workload-75d9d5968d-fx8px", []string{"1.2.3.4"}, []int32{80}) + watcher.handleSliceAdd(oldSlice) + + // New slice: same UID, but svc label changed to "othersvc" + // and a different endpoint: TargetRef Name "workload-6d9b7f8597-wbvxn", Addresses ["1.2.3.5"], Port 443. + newSlice := createTestEndpointSlice("uid-2", "testns", "othersvc", "workload-6d9b7f8597-wbvxn", []string{"1.2.3.5"}, []int32{443}) + + // Call update handler. + watcher.handleSliceUpdate(oldSlice, newSlice) + + expectedVal := "workload@testns" + + // Old keys that should be removed: + // "1.2.3.4" and "1.2.3.4:80" and service key "mysvc@testns" + removedKeys := []string{"1.2.3.4", "1.2.3.4:80", "mysvc@testns"} + for _, key := range removedKeys { + _, ok := watcher.ipToWorkload.Load(key) + _, okSvc := watcher.serviceToWorkload.Load(key) + assert.False(t, ok, "expected ipToWorkload key %s to be removed", key) + assert.False(t, okSvc, "expected serviceToWorkload key %s to be removed", key) + } + + // New keys that should be added: + // "1.2.3.5", "1.2.3.5:443", and service key "othersvc@testns" + addedKeys := []string{"1.2.3.5", "1.2.3.5:443", "othersvc@testns"} + for _, key := range addedKeys { + var val interface{} + var ok bool + // For service key, check serviceToWorkload; for others, check ipToWorkload. + if key == "othersvc@testns" { + val, ok = watcher.serviceToWorkload.Load(key) + } else { + val, ok = watcher.ipToWorkload.Load(key) + } + assert.True(t, ok, "expected key %s to be added", key) + assert.Equal(t, expectedVal, val, "value for key %s mismatch", key) + } + + // Check that sliceToKeysMap now contains exactly the new keys. + val, ok := watcher.sliceToKeysMap.Load(string(newSlice.UID)) + assert.True(t, ok, "expected sliceToKeysMap entry for UID %s", newSlice.UID) + gotKeys := val.([]string) + sort.Strings(gotKeys) + expectedKeys := []string{"1.2.3.5", "1.2.3.5:443", "othersvc@testns"} + sort.Strings(expectedKeys) + assert.True(t, reflect.DeepEqual(expectedKeys, gotKeys), "sliceToKeysMap keys mismatch, got: %v, want: %v", gotKeys, expectedKeys) + }) + + // --- Subtest: Partial overlap --- + t.Run("partial overlap", func(t *testing.T) { + watcher := newEndpointSliceWatcherForTest() + + // Old slice: UID "uid-3", Namespace "testns", svc label "mysvc", + // with one endpoint: TargetRef "workload-6d9b7f8597-b5l2j", Addresses ["1.2.3.4"], Port 80. + oldSlice := createTestEndpointSlice("uid-3", "testns", "mysvc", "workload-6d9b7f8597-b5l2j", []string{"1.2.3.4"}, []int32{80}) + watcher.handleSliceAdd(oldSlice) + + // New slice: same UID, same svc label ("mysvc") but now two endpoints. + // First endpoint: same as before: Addresses ["1.2.3.4"], Port 80. + // Second endpoint: Addresses ["1.2.3.5"], Port 80. + // (Since svc label remains, the service key "mysvc@testns" remains the same.) + // We expect the new keys to be the union of: + // From first endpoint: "1.2.3.4", "1.2.3.4:80" + // From second endpoint: "1.2.3.5", "1.2.3.5:80" + // And the service key "mysvc@testns". + name := "port-0" + protocol := v1.ProtocolTCP + newSlice := &discv1.EndpointSlice{ + ObjectMeta: metav1.ObjectMeta{ + UID: "uid-3", // same UID + Namespace: "testns", + Labels: map[string]string{ + "kubernetes.io/service-name": "mysvc", + }, + }, + // Two endpoints. + Endpoints: []discv1.Endpoint{ + { + Addresses: []string{"1.2.3.4"}, + TargetRef: &v1.ObjectReference{ + Kind: "Pod", + Name: "workload-6d9b7f8597-b5l2j", + Namespace: "testns", + }, + }, + { + Addresses: []string{"1.2.3.5"}, + TargetRef: &v1.ObjectReference{ + Kind: "Pod", + Name: "workload-6d9b7f8597-fx8px", + Namespace: "testns", + }, + }, + }, + // Single port: 80. + Ports: []discv1.EndpointPort{ + { + Name: &name, + Protocol: &protocol, + Port: func() *int32 { p := int32(80); return &p }(), + }, + }, + } + + // Call update handler. + watcher.handleSliceUpdate(oldSlice, newSlice) + + expectedVal := "workload@testns" + // Expected keys now: + // From endpoint 1: "1.2.3.4", "1.2.3.4:80" + // From endpoint 2: "1.2.3.5", "1.2.3.5:80" + // And service key: "mysvc@testns" + expectedKeysIP := []string{"1.2.3.4", "1.2.3.4:80", "1.2.3.5", "1.2.3.5:80"} + expectedKeysSvc := []string{"mysvc@testns"} + + // Verify that all expected keys are present. + for _, key := range expectedKeysIP { + val, ok := watcher.ipToWorkload.Load(key) + assert.True(t, ok, "expected ipToWorkload key %s", key) + assert.Equal(t, expectedVal, val, "ipToWorkload[%s] mismatch", key) + } + for _, key := range expectedKeysSvc { + val, ok := watcher.serviceToWorkload.Load(key) + assert.True(t, ok, "expected serviceToWorkload key %s", key) + assert.Equal(t, expectedVal, val, "serviceToWorkload[%s] mismatch", key) + } + + // And check that sliceToKeysMap contains the union of the keys. + val, ok := watcher.sliceToKeysMap.Load("uid-3") + assert.True(t, ok, "expected sliceToKeysMap to contain uid-3") + gotKeys := val.([]string) + allExpected := append(expectedKeysIP, expectedKeysSvc...) + sort.Strings(gotKeys) + sort.Strings(allExpected) + assert.True(t, reflect.DeepEqual(allExpected, gotKeys), "sliceToKeysMap keys mismatch, got: %v, want: %v", gotKeys, allExpected) + }) +} diff --git a/plugins/processors/awsapplicationsignals/internal/resolver/kubernetes.go b/plugins/processors/awsapplicationsignals/internal/resolver/kubernetes.go index 753af4e60e..a6510de4e5 100644 --- a/plugins/processors/awsapplicationsignals/internal/resolver/kubernetes.go +++ b/plugins/processors/awsapplicationsignals/internal/resolver/kubernetes.go @@ -8,17 +8,15 @@ import ( "errors" "fmt" "math/rand" + "os" "sync" "time" - mapset "github.com/deckarep/golang-set/v2" "go.opentelemetry.io/collector/pdata/pcommon" semconv "go.opentelemetry.io/collector/semconv/v1.22.0" "go.uber.org/zap" - corev1 "k8s.io/api/core/v1" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/clientcmd" "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsapplicationsignals/common" @@ -39,39 +37,38 @@ const ( deletionDelay = 2 * time.Minute jitterKubernetesAPISeconds = 10 + + // this is an environmental variable that might deprecate in future + // when it's "true", we will use list pods API to get ip to workload mapping + // otherwise, we will use list endpoint slices API instead + appSignalsUseListPod = "APP_SIGNALS_USE_LIST_POD" ) type kubernetesResolver struct { - logger *zap.Logger - clientset kubernetes.Interface - clusterName string - platformCode string - ipToPod *sync.Map - podToWorkloadAndNamespace *sync.Map - ipToServiceAndNamespace *sync.Map - serviceAndNamespaceToSelectors *sync.Map - workloadAndNamespaceToLabels *sync.Map - serviceToWorkload *sync.Map // computed from serviceAndNamespaceToSelectors and workloadAndNamespaceToLabels every 1 min - workloadPodCount map[string]int - safeStopCh *safeChannel // trace and metric processors share the same kubernetesResolver and might close the same channel separately -} + logger *zap.Logger + clientset kubernetes.Interface + clusterName string + platformCode string + // if ListPod api is used, the following maps are needed + ipToPod *sync.Map + podToWorkloadAndNamespace *sync.Map + workloadAndNamespaceToLabels *sync.Map + workloadPodCount map[string]int -// a safe channel which can be closed multiple times -type safeChannel struct { - sync.Mutex + // if ListEndpointSlice api is used, the following maps are needed + ipToWorkloadAndNamespace *sync.Map - ch chan struct{} - closed bool -} + // if ListService api is used, the following maps are needed + ipToServiceAndNamespace *sync.Map + serviceAndNamespaceToSelectors *sync.Map -func (sc *safeChannel) Close() { - sc.Lock() - defer sc.Unlock() + // if ListPod and ListService apis are used, the serviceToWorkload map is computed by ServiceToWorkloadMapper + // from serviceAndNamespaceToSelectors and workloadAndNamespaceToLabels every 1 min + // if ListEndpointSlice is used, we can get serviceToWorkload directly from endpointSlice watcher + serviceToWorkload *sync.Map // - if !sc.closed { - close(sc.ch) - sc.closed = true - } + safeStopCh *safeChannel // trace and metric processors share the same kubernetesResolver and might close the same channel separately + useListPod bool } var ( @@ -84,378 +81,6 @@ func jitterSleep(seconds int) { time.Sleep(jitter) } -// Deleter represents a type that can delete a key from a map after a certain delay. -type Deleter interface { - DeleteWithDelay(m *sync.Map, key interface{}) -} - -// TimedDeleter deletes a key after a specified delay. -type TimedDeleter struct { - Delay time.Duration -} - -func (td *TimedDeleter) DeleteWithDelay(m *sync.Map, key interface{}) { - go func() { - time.Sleep(td.Delay) - m.Delete(key) - }() -} - -func (s *serviceWatcher) onAddOrUpdateService(service *corev1.Service) { - // service can also have an external IP (or ingress IP) that could be accessed - // this field can be either an IP address (in some edge case) or a hostname (see "EXTERNAL-IP" column in "k get svc" output) - // [ec2-user@ip-172-31-11-104 one-step]$ k get svc -A - // NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - // default pet-clinic-frontend ClusterIP 10.100.216.182 8080/TCP 108m - // default vets-service ClusterIP 10.100.62.167 8083/TCP 108m - // default visits-service ClusterIP 10.100.96.5 8082/TCP 108m - // ingress-nginx default-http-backend ClusterIP 10.100.11.231 80/TCP 108m - // ingress-nginx ingress-nginx LoadBalancer 10.100.154.5 aex7997ece08c435dbd2b912fd5aa5bd-5372117830.xxxxx.elb.amazonaws.com 80:32080/TCP,443:32081/TCP,9113:30410/TCP 108m - // kube-system kube-dns ClusterIP 10.100.0.10 - // - // we ignore such case for now and may need to consider it in the future - if service.Spec.ClusterIP != "" && service.Spec.ClusterIP != corev1.ClusterIPNone { - s.ipToServiceAndNamespace.Store(service.Spec.ClusterIP, getServiceAndNamespace(service)) - } - labelSet := mapset.NewSet[string]() - for key, value := range service.Spec.Selector { - labelSet.Add(key + "=" + value) - } - if labelSet.Cardinality() > 0 { - s.serviceAndNamespaceToSelectors.Store(getServiceAndNamespace(service), labelSet) - } -} - -func (s *serviceWatcher) onDeleteService(service *corev1.Service, deleter Deleter) { - if service.Spec.ClusterIP != "" && service.Spec.ClusterIP != corev1.ClusterIPNone { - deleter.DeleteWithDelay(s.ipToServiceAndNamespace, service.Spec.ClusterIP) - } - deleter.DeleteWithDelay(s.serviceAndNamespaceToSelectors, getServiceAndNamespace(service)) -} - -func (p *podWatcher) removeHostNetworkRecords(pod *corev1.Pod) { - for _, port := range getHostNetworkPorts(pod) { - p.deleter.DeleteWithDelay(p.ipToPod, pod.Status.HostIP+":"+port) - } -} - -func (p *podWatcher) handlePodAdd(pod *corev1.Pod) { - if pod.Spec.HostNetwork && pod.Status.HostIP != "" { - for _, port := range getHostNetworkPorts(pod) { - p.ipToPod.Store(pod.Status.HostIP+":"+port, pod.Name) - } - } - if pod.Status.PodIP != "" { - p.ipToPod.Store(pod.Status.PodIP, pod.Name) - } -} - -func (p *podWatcher) handlePodUpdate(newPod *corev1.Pod, oldPod *corev1.Pod) { - // HostNetwork is an immutable field - if newPod.Spec.HostNetwork && oldPod.Status.HostIP != newPod.Status.HostIP { - if oldPod.Status.HostIP != "" { - p.logger.Debug("deleting host ip from cache", zap.String("hostNetwork", oldPod.Status.HostIP)) - p.removeHostNetworkRecords(oldPod) - } - if newPod.Status.HostIP != "" { - for _, port := range getHostNetworkPorts(newPod) { - p.ipToPod.Store(newPod.Status.HostIP+":"+port, newPod.Name) - } - } - } - if oldPod.Status.PodIP != newPod.Status.PodIP { - if oldPod.Status.PodIP != "" { - p.logger.Debug("deleting pod ip from cache", zap.String("podNetwork", oldPod.Status.PodIP)) - p.deleter.DeleteWithDelay(p.ipToPod, oldPod.Status.PodIP) - } - if newPod.Status.PodIP != "" { - p.ipToPod.Store(newPod.Status.PodIP, newPod.Name) - } - } -} - -func (p *podWatcher) onAddOrUpdatePod(pod, oldPod *corev1.Pod) { - if oldPod == nil { - p.handlePodAdd(pod) - } else { - p.handlePodUpdate(pod, oldPod) - } - - workloadAndNamespace := getWorkloadAndNamespace(pod) - - if workloadAndNamespace != "" { - p.podToWorkloadAndNamespace.Store(pod.Name, workloadAndNamespace) - podLabels := mapset.NewSet[string]() - for key, value := range pod.ObjectMeta.Labels { - podLabels.Add(key + "=" + value) - } - if podLabels.Cardinality() > 0 { - p.workloadAndNamespaceToLabels.Store(workloadAndNamespace, podLabels) - } - if oldPod == nil { - p.workloadPodCount[workloadAndNamespace]++ - p.logger.Debug("Added pod", zap.String("pod", pod.Name), zap.String("workload", workloadAndNamespace), zap.Int("count", p.workloadPodCount[workloadAndNamespace])) - } - } -} - -func (p *podWatcher) onDeletePod(obj interface{}) { - pod := obj.(*corev1.Pod) - if pod.Spec.HostNetwork && pod.Status.HostIP != "" { - p.logger.Debug("deleting host ip from cache", zap.String("hostNetwork", pod.Status.HostIP)) - p.removeHostNetworkRecords(pod) - } - if pod.Status.PodIP != "" { - p.logger.Debug("deleting pod ip from cache", zap.String("podNetwork", pod.Status.PodIP)) - p.deleter.DeleteWithDelay(p.ipToPod, pod.Status.PodIP) - } - - if workloadKey, ok := p.podToWorkloadAndNamespace.Load(pod.Name); ok { - workloadAndNamespace := workloadKey.(string) - p.workloadPodCount[workloadAndNamespace]-- - p.logger.Debug("decrementing pod count", zap.String("workload", workloadAndNamespace), zap.Int("podCount", p.workloadPodCount[workloadAndNamespace])) - if p.workloadPodCount[workloadAndNamespace] == 0 { - p.deleter.DeleteWithDelay(p.workloadAndNamespaceToLabels, workloadAndNamespace) - } - } else { - p.logger.Error("failed to load pod workloadKey", zap.String("pod", pod.Name)) - } - p.deleter.DeleteWithDelay(p.podToWorkloadAndNamespace, pod.Name) -} - -type podWatcher struct { - ipToPod *sync.Map - podToWorkloadAndNamespace *sync.Map - workloadAndNamespaceToLabels *sync.Map - workloadPodCount map[string]int - logger *zap.Logger - informer cache.SharedIndexInformer - deleter Deleter -} - -func newPodWatcher(logger *zap.Logger, informer cache.SharedIndexInformer, deleter Deleter) *podWatcher { - return &podWatcher{ - ipToPod: &sync.Map{}, - podToWorkloadAndNamespace: &sync.Map{}, - workloadAndNamespaceToLabels: &sync.Map{}, - workloadPodCount: make(map[string]int), - logger: logger, - informer: informer, - deleter: deleter, - } -} - -func (p *podWatcher) run(stopCh chan struct{}) { - p.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { - pod := obj.(*corev1.Pod) - p.logger.Debug("list and watch for pod: ADD " + pod.Name) - p.onAddOrUpdatePod(pod, nil) - }, - UpdateFunc: func(oldObj, newObj interface{}) { - pod := newObj.(*corev1.Pod) - oldPod := oldObj.(*corev1.Pod) - p.logger.Debug("list and watch for pods: UPDATE " + pod.Name) - p.onAddOrUpdatePod(pod, oldPod) - }, - DeleteFunc: func(obj interface{}) { - pod := obj.(*corev1.Pod) - p.logger.Debug("list and watch for pods: DELETE " + pod.Name) - p.onDeletePod(obj) - }, - }) - - go p.informer.Run(stopCh) - -} - -func (p *podWatcher) waitForCacheSync(stopCh chan struct{}) { - if !cache.WaitForNamedCacheSync("podWatcher", stopCh, p.informer.HasSynced) { - p.logger.Fatal("timed out waiting for kubernetes pod watcher caches to sync") - } - - p.logger.Info("podWatcher: Cache synced") -} - -type serviceWatcher struct { - ipToServiceAndNamespace *sync.Map - serviceAndNamespaceToSelectors *sync.Map - logger *zap.Logger - informer cache.SharedIndexInformer - deleter Deleter -} - -func newServiceWatcher(logger *zap.Logger, informer cache.SharedIndexInformer, deleter Deleter) *serviceWatcher { - return &serviceWatcher{ - ipToServiceAndNamespace: &sync.Map{}, - serviceAndNamespaceToSelectors: &sync.Map{}, - logger: logger, - informer: informer, - deleter: deleter, - } -} - -func (s *serviceWatcher) Run(stopCh chan struct{}) { - s.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { - service := obj.(*corev1.Service) - s.logger.Debug("list and watch for services: ADD " + service.Name) - s.onAddOrUpdateService(service) - }, - UpdateFunc: func(oldObj, newObj interface{}) { - service := newObj.(*corev1.Service) - s.logger.Debug("list and watch for services: UPDATE " + service.Name) - s.onAddOrUpdateService(service) - }, - DeleteFunc: func(obj interface{}) { - service := obj.(*corev1.Service) - s.logger.Debug("list and watch for services: DELETE " + service.Name) - s.onDeleteService(service, s.deleter) - }, - }) - go s.informer.Run(stopCh) -} - -func (s *serviceWatcher) waitForCacheSync(stopCh chan struct{}) { - if !cache.WaitForNamedCacheSync("serviceWatcher", stopCh, s.informer.HasSynced) { - s.logger.Fatal("timed out waiting for kubernetes service watcher caches to sync") - } - - s.logger.Info("serviceWatcher: Cache synced") -} - -type serviceToWorkloadMapper struct { - serviceAndNamespaceToSelectors *sync.Map - workloadAndNamespaceToLabels *sync.Map - serviceToWorkload *sync.Map - logger *zap.Logger - deleter Deleter -} - -func newServiceToWorkloadMapper(serviceAndNamespaceToSelectors, workloadAndNamespaceToLabels, serviceToWorkload *sync.Map, logger *zap.Logger, deleter Deleter) *serviceToWorkloadMapper { - return &serviceToWorkloadMapper{ - serviceAndNamespaceToSelectors: serviceAndNamespaceToSelectors, - workloadAndNamespaceToLabels: workloadAndNamespaceToLabels, - serviceToWorkload: serviceToWorkload, - logger: logger, - deleter: deleter, - } -} - -func (m *serviceToWorkloadMapper) mapServiceToWorkload() { - m.logger.Debug("Map service to workload at:", zap.Time("time", time.Now())) - - m.serviceAndNamespaceToSelectors.Range(func(key, value interface{}) bool { - var workloads []string - serviceAndNamespace := key.(string) - _, serviceNamespace := extractResourceAndNamespace(serviceAndNamespace) - serviceLabels := value.(mapset.Set[string]) - - m.workloadAndNamespaceToLabels.Range(func(workloadKey, labelsValue interface{}) bool { - labels := labelsValue.(mapset.Set[string]) - workloadAndNamespace := workloadKey.(string) - _, workloadNamespace := extractResourceAndNamespace(workloadAndNamespace) - if workloadNamespace == serviceNamespace && workloadNamespace != "" && serviceLabels.IsSubset(labels) { - m.logger.Debug("Found workload for service", zap.String("service", serviceAndNamespace), zap.String("workload", workloadAndNamespace)) - workloads = append(workloads, workloadAndNamespace) - } - - return true - }) - - if len(workloads) > 1 { - m.logger.Info("Multiple workloads found for service. You will get unexpected results.", zap.String("service", serviceAndNamespace), zap.Strings("workloads", workloads)) - } else if len(workloads) == 1 { - m.serviceToWorkload.Store(serviceAndNamespace, workloads[0]) - } else { - m.logger.Debug("No workload found for service", zap.String("service", serviceAndNamespace)) - m.deleter.DeleteWithDelay(m.serviceToWorkload, serviceAndNamespace) - } - return true - }) -} - -func (m *serviceToWorkloadMapper) Start(stopCh chan struct{}) { - // do the first mapping immediately - m.mapServiceToWorkload() - m.logger.Debug("First-time map service to workload at:", zap.Time("time", time.Now())) - - go func() { - for { - select { - case <-stopCh: - return - case <-time.After(time.Minute + 30*time.Second): - m.mapServiceToWorkload() - m.logger.Debug("Map service to workload at:", zap.Time("time", time.Now())) - } - } - }() -} - -// minimizePod removes fields that could contain large objects, and retain essential -// fields needed for IP/name translation. The following fields must be kept: -// - ObjectMeta: Namespace, Name, Labels, OwnerReference -// - Spec: HostNetwork, ContainerPorts -// - Status: PodIP/s, HostIP/s -func minimizePod(obj interface{}) (interface{}, error) { - if pod, ok := obj.(*corev1.Pod); ok { - pod.Annotations = nil - pod.Finalizers = nil - pod.ManagedFields = nil - - pod.Spec.Volumes = nil - pod.Spec.InitContainers = nil - pod.Spec.EphemeralContainers = nil - pod.Spec.ImagePullSecrets = nil - pod.Spec.HostAliases = nil - pod.Spec.SchedulingGates = nil - pod.Spec.ResourceClaims = nil - pod.Spec.Tolerations = nil - pod.Spec.Affinity = nil - - pod.Status.InitContainerStatuses = nil - pod.Status.ContainerStatuses = nil - pod.Status.EphemeralContainerStatuses = nil - - for i := 0; i < len(pod.Spec.Containers); i++ { - c := &pod.Spec.Containers[i] - c.Image = "" - c.Command = nil - c.Args = nil - c.EnvFrom = nil - c.Env = nil - c.Resources = corev1.ResourceRequirements{} - c.VolumeMounts = nil - c.VolumeDevices = nil - c.SecurityContext = nil - } - } - return obj, nil -} - -// minimizeService removes fields that could contain large objects, and retain essential -// fields needed for IP/name translation. The following fields must be kept: -// - ObjectMeta: Namespace, Name -// - Spec: Selectors, ClusterIP -func minimizeService(obj interface{}) (interface{}, error) { - if svc, ok := obj.(*corev1.Service); ok { - svc.Annotations = nil - svc.Finalizers = nil - svc.ManagedFields = nil - - svc.Spec.LoadBalancerSourceRanges = nil - svc.Spec.SessionAffinityConfig = nil - svc.Spec.IPFamilies = nil - svc.Spec.IPFamilyPolicy = nil - svc.Spec.InternalTrafficPolicy = nil - svc.Spec.InternalTrafficPolicy = nil - - svc.Status.Conditions = nil - } - return obj, nil -} - func getKubernetesResolver(platformCode, clusterName string, logger *zap.Logger) subResolver { once.Do(func() { config, err := clientcmd.BuildConfigFromFlags("", "") @@ -471,47 +96,74 @@ func getKubernetesResolver(platformCode, clusterName string, logger *zap.Logger) // jitter calls to the kubernetes api jitterSleep(jitterKubernetesAPISeconds) - sharedInformerFactory := informers.NewSharedInformerFactory(clientset, 0) - podInformer := sharedInformerFactory.Core().V1().Pods().Informer() - err = podInformer.SetTransform(minimizePod) - if err != nil { - logger.Error("failed to minimize Pod objects", zap.Error(err)) - } - serviceInformer := sharedInformerFactory.Core().V1().Services().Informer() - err = serviceInformer.SetTransform(minimizeService) - if err != nil { - logger.Error("failed to minimize Service objects", zap.Error(err)) - } + useListPod := (os.Getenv(appSignalsUseListPod) == "true") + + if useListPod { + sharedInformerFactory := informers.NewSharedInformerFactory(clientset, 0) + timedDeleter := &TimedDeleter{Delay: deletionDelay} + + poWatcher := newPodWatcher(logger, sharedInformerFactory, timedDeleter) + svcWatcher := newServiceWatcher(logger, sharedInformerFactory, timedDeleter) + + safeStopCh := &safeChannel{ch: make(chan struct{}), closed: false} + // initialize the pod and service watchers for the cluster + poWatcher.run(safeStopCh.ch) + svcWatcher.Run(safeStopCh.ch) + // wait for caches to sync (for once) so that clients knows about the pods and services in the cluster + poWatcher.waitForCacheSync(safeStopCh.ch) + svcWatcher.waitForCacheSync(safeStopCh.ch) + + serviceToWorkload := &sync.Map{} + svcToWorkloadMapper := newServiceToWorkloadMapper(svcWatcher.serviceAndNamespaceToSelectors, poWatcher.workloadAndNamespaceToLabels, serviceToWorkload, logger, timedDeleter) + svcToWorkloadMapper.Start(safeStopCh.ch) + + instance = &kubernetesResolver{ + logger: logger, + clientset: clientset, + clusterName: clusterName, + platformCode: platformCode, + ipToServiceAndNamespace: svcWatcher.ipToServiceAndNamespace, + serviceAndNamespaceToSelectors: svcWatcher.serviceAndNamespaceToSelectors, + ipToPod: poWatcher.ipToPod, + podToWorkloadAndNamespace: poWatcher.podToWorkloadAndNamespace, + workloadAndNamespaceToLabels: poWatcher.workloadAndNamespaceToLabels, + serviceToWorkload: serviceToWorkload, + workloadPodCount: poWatcher.workloadPodCount, + ipToWorkloadAndNamespace: nil, + safeStopCh: safeStopCh, + useListPod: useListPod, + } + } else { + sharedInformerFactory := informers.NewSharedInformerFactory(clientset, 0) + timedDeleter := &TimedDeleter{Delay: deletionDelay} + + svcWatcher := newServiceWatcher(logger, sharedInformerFactory, timedDeleter) + endptSliceWatcher := newEndpointSliceWatcher(logger, sharedInformerFactory, timedDeleter) + + safeStopCh := &safeChannel{ch: make(chan struct{}), closed: false} + // initialize the pod and service watchers for the cluster + svcWatcher.Run(safeStopCh.ch) + endptSliceWatcher.Run(safeStopCh.ch) + // wait for caches to sync (for once) so that clients knows about the pods and services in the cluster + svcWatcher.waitForCacheSync(safeStopCh.ch) + endptSliceWatcher.waitForCacheSync(safeStopCh.ch) + + instance = &kubernetesResolver{ + logger: logger, + clientset: clientset, + clusterName: clusterName, + platformCode: platformCode, + ipToWorkloadAndNamespace: endptSliceWatcher.ipToWorkload, // endpointSlice provides pod IP → workload mapping + ipToPod: nil, + podToWorkloadAndNamespace: nil, + workloadAndNamespaceToLabels: nil, + workloadPodCount: nil, + ipToServiceAndNamespace: svcWatcher.ipToServiceAndNamespace, + serviceToWorkload: endptSliceWatcher.serviceToWorkload, // endpointSlice also provides service → workload mapping + safeStopCh: safeStopCh, + useListPod: useListPod, + } - timedDeleter := &TimedDeleter{Delay: deletionDelay} - poWatcher := newPodWatcher(logger, podInformer, timedDeleter) - svcWatcher := newServiceWatcher(logger, serviceInformer, timedDeleter) - - safeStopCh := &safeChannel{ch: make(chan struct{}), closed: false} - // initialize the pod and service watchers for the cluster - poWatcher.run(safeStopCh.ch) - svcWatcher.Run(safeStopCh.ch) - // wait for caches to sync (for once) so that clients knows about the pods and services in the cluster - poWatcher.waitForCacheSync(safeStopCh.ch) - svcWatcher.waitForCacheSync(safeStopCh.ch) - - serviceToWorkload := &sync.Map{} - svcToWorkloadMapper := newServiceToWorkloadMapper(svcWatcher.serviceAndNamespaceToSelectors, poWatcher.workloadAndNamespaceToLabels, serviceToWorkload, logger, timedDeleter) - svcToWorkloadMapper.Start(safeStopCh.ch) - - instance = &kubernetesResolver{ - logger: logger, - clientset: clientset, - clusterName: clusterName, - platformCode: platformCode, - ipToServiceAndNamespace: svcWatcher.ipToServiceAndNamespace, - serviceAndNamespaceToSelectors: svcWatcher.serviceAndNamespaceToSelectors, - ipToPod: poWatcher.ipToPod, - podToWorkloadAndNamespace: poWatcher.podToWorkloadAndNamespace, - workloadAndNamespaceToLabels: poWatcher.workloadAndNamespaceToLabels, - serviceToWorkload: serviceToWorkload, - workloadPodCount: poWatcher.workloadPodCount, - safeStopCh: safeStopCh, } }) @@ -526,9 +178,19 @@ func (e *kubernetesResolver) Stop(_ context.Context) error { // add a method to kubernetesResolver func (e *kubernetesResolver) getWorkloadAndNamespaceByIP(ip string) (string, string, error) { var workload, namespace string - if podKey, ok := e.ipToPod.Load(ip); ok { - pod := podKey.(string) - if workloadKey, ok := e.podToWorkloadAndNamespace.Load(pod); ok { + + if e.useListPod { + // use results from pod watcher + if podKey, ok := e.ipToPod.Load(ip); ok { + pod := podKey.(string) + if workloadKey, ok := e.podToWorkloadAndNamespace.Load(pod); ok { + workload, namespace = extractResourceAndNamespace(workloadKey.(string)) + return workload, namespace, nil + } + } + } else { + // use results from endpoint slice watcher + if workloadKey, ok := e.ipToWorkloadAndNamespace.Load(ip); ok { workload, namespace = extractResourceAndNamespace(workloadKey.(string)) return workload, namespace, nil } @@ -541,7 +203,6 @@ func (e *kubernetesResolver) getWorkloadAndNamespaceByIP(ip string) (string, str return workload, namespace, nil } } - return "", "", errors.New("no kubernetes workload found for ip: " + ip) } diff --git a/plugins/processors/awsapplicationsignals/internal/resolver/kubernetes_test.go b/plugins/processors/awsapplicationsignals/internal/resolver/kubernetes_test.go index 254a02c14c..a0ce5b2225 100644 --- a/plugins/processors/awsapplicationsignals/internal/resolver/kubernetes_test.go +++ b/plugins/processors/awsapplicationsignals/internal/resolver/kubernetes_test.go @@ -9,15 +9,11 @@ import ( "strings" "sync" "testing" - "time" - mapset "github.com/deckarep/golang-set/v2" "github.com/stretchr/testify/assert" "go.opentelemetry.io/collector/pdata/pcommon" semconv "go.opentelemetry.io/collector/semconv/v1.22.0" "go.uber.org/zap" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsapplicationsignals/common" "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsapplicationsignals/config" @@ -34,677 +30,6 @@ func (md *MockDeleter) DeleteWithDelay(m *sync.Map, key interface{}) { var mockDeleter = &MockDeleter{} -// TestAttachNamespace function -func TestAttachNamespace(t *testing.T) { - result := attachNamespace("testResource", "testNamespace") - if result != "testResource@testNamespace" { - t.Errorf("attachNamespace was incorrect, got: %s, want: %s.", result, "testResource@testNamespace") - } -} - -// TestGetServiceAndNamespace function -func TestGetServiceAndNamespace(t *testing.T) { - service := &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "testService", - Namespace: "testNamespace", - }, - } - result := getServiceAndNamespace(service) - if result != "testService@testNamespace" { - t.Errorf("getServiceAndNamespace was incorrect, got: %s, want: %s.", result, "testService@testNamespace") - } -} - -// TestExtractResourceAndNamespace function -func TestExtractResourceAndNamespace(t *testing.T) { - // Test normal case - name, namespace := extractResourceAndNamespace("testService@testNamespace") - if name != "testService" || namespace != "testNamespace" { - t.Errorf("extractResourceAndNamespace was incorrect, got: %s and %s, want: %s and %s.", name, namespace, "testService", "testNamespace") - } - - // Test invalid case - name, namespace = extractResourceAndNamespace("invalid") - if name != "" || namespace != "" { - t.Errorf("extractResourceAndNamespace was incorrect, got: %s and %s, want: %s and %s.", name, namespace, "", "") - } -} - -func TestExtractWorkloadNameFromRS(t *testing.T) { - testCases := []struct { - name string - replicaSetName string - want string - shouldErr bool - }{ - { - name: "Valid ReplicaSet Name", - replicaSetName: "my-deployment-5859ffc7ff", - want: "my-deployment", - shouldErr: false, - }, - { - name: "Invalid ReplicaSet Name - No Hyphen", - replicaSetName: "mydeployment5859ffc7ff", - want: "", - shouldErr: true, - }, - { - name: "Invalid ReplicaSet Name - Less Than 10 Suffix Characters", - replicaSetName: "my-deployment-bc2", - want: "", - shouldErr: true, - }, - { - name: "Invalid ReplicaSet Name - More Than 10 Suffix Characters", - replicaSetName: "my-deployment-5859ffc7ffx", - want: "", - shouldErr: true, - }, - { - name: "Invalid ReplicaSet Name - Invalid Characters in Suffix", - replicaSetName: "my-deployment-aeiou12345", - want: "", - shouldErr: true, - }, - { - name: "Invalid ReplicaSet Name - Empty String", - replicaSetName: "", - want: "", - shouldErr: true, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - got, err := extractWorkloadNameFromRS(tc.replicaSetName) - - if (err != nil) != tc.shouldErr { - t.Errorf("extractWorkloadNameFromRS() error = %v, wantErr %v", err, tc.shouldErr) - return - } - - if got != tc.want { - t.Errorf("extractWorkloadNameFromRS() = %v, want %v", got, tc.want) - } - }) - } -} - -func TestExtractWorkloadNameFromPodName(t *testing.T) { - testCases := []struct { - name string - podName string - want string - shouldErr bool - }{ - { - name: "Valid Pod Name", - podName: "my-replicaset-bc24f", - want: "my-replicaset", - shouldErr: false, - }, - { - name: "Invalid Pod Name - No Hyphen", - podName: "myreplicasetbc24f", - want: "", - shouldErr: true, - }, - { - name: "Invalid Pod Name - Less Than 5 Suffix Characters", - podName: "my-replicaset-bc2", - want: "", - shouldErr: true, - }, - { - name: "Invalid Pod Name - More Than 5 Suffix Characters", - podName: "my-replicaset-bc24f5", - want: "", - shouldErr: true, - }, - { - name: "Invalid Pod Name - Empty String", - podName: "", - want: "", - shouldErr: true, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - got, err := extractWorkloadNameFromPodName(tc.podName) - - if (err != nil) != tc.shouldErr { - t.Errorf("extractWorkloadNameFromPodName() error = %v, wantErr %v", err, tc.shouldErr) - return - } - - if got != tc.want { - t.Errorf("extractWorkloadNameFromPodName() = %v, want %v", got, tc.want) - } - }) - } -} - -// TestGetWorkloadAndNamespace function -func TestGetWorkloadAndNamespace(t *testing.T) { - // Test ReplicaSet case - pod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "testPod", - Namespace: "testNamespace", - OwnerReferences: []metav1.OwnerReference{ - { - Kind: "ReplicaSet", - Name: "testDeployment-5d68bc5f49", - }, - }, - }, - } - result := getWorkloadAndNamespace(pod) - if result != "testDeployment@testNamespace" { - t.Errorf("getDeploymentAndNamespace was incorrect, got: %s, want: %s.", result, "testDeployment@testNamespace") - } - - // Test StatefulSet case - pod.ObjectMeta.OwnerReferences[0].Kind = "StatefulSet" - pod.ObjectMeta.OwnerReferences[0].Name = "testStatefulSet" - result = getWorkloadAndNamespace(pod) - if result != "testStatefulSet@testNamespace" { - t.Errorf("getWorkloadAndNamespace was incorrect, got: %s, want: %s.", result, "testStatefulSet@testNamespace") - } - - // Test Other case - pod.ObjectMeta.OwnerReferences[0].Kind = "Other" - pod.ObjectMeta.OwnerReferences[0].Name = "testOther" - result = getWorkloadAndNamespace(pod) - if result != "" { - t.Errorf("getWorkloadAndNamespace was incorrect, got: %s, want: %s.", result, "") - } - - // Test no OwnerReferences case - pod.ObjectMeta.OwnerReferences = nil - result = getWorkloadAndNamespace(pod) - if result != "" { - t.Errorf("getWorkloadAndNamespace was incorrect, got: %s, want: %s.", result, "") - } -} - -func TestServiceToWorkloadMapper_MapServiceToWorkload(t *testing.T) { - logger, _ := zap.NewDevelopment() - - serviceAndNamespaceToSelectors := &sync.Map{} - workloadAndNamespaceToLabels := &sync.Map{} - serviceToWorkload := &sync.Map{} - - serviceAndNamespaceToSelectors.Store("service1@namespace1", mapset.NewSet("label1=value1", "label2=value2")) - workloadAndNamespaceToLabels.Store("deployment1@namespace1", mapset.NewSet("label1=value1", "label2=value2", "label3=value3")) - - mapper := newServiceToWorkloadMapper(serviceAndNamespaceToSelectors, workloadAndNamespaceToLabels, serviceToWorkload, logger, mockDeleter) - mapper.mapServiceToWorkload() - - if _, ok := serviceToWorkload.Load("service1@namespace1"); !ok { - t.Errorf("Expected service1@namespace1 to be mapped to a workload, but it was not") - } -} - -func TestServiceToWorkloadMapper_MapServiceToWorkload_NoWorkload(t *testing.T) { - logger, _ := zap.NewDevelopment() - - serviceAndNamespaceToSelectors := &sync.Map{} - workloadAndNamespaceToLabels := &sync.Map{} - serviceToWorkload := &sync.Map{} - - // Add a service with no matching workload - serviceAndNamespace := "service@namespace" - serviceAndNamespaceToSelectors.Store(serviceAndNamespace, mapset.NewSet("label1=value1")) - serviceToWorkload.Store(serviceAndNamespace, "workload@namespace") - - mapper := newServiceToWorkloadMapper(serviceAndNamespaceToSelectors, workloadAndNamespaceToLabels, serviceToWorkload, logger, mockDeleter) - mapper.mapServiceToWorkload() - - // Check that the service was deleted from serviceToWorkload - if _, ok := serviceToWorkload.Load(serviceAndNamespace); ok { - t.Errorf("Service was not deleted from serviceToWorkload") - } -} - -func TestServiceToWorkloadMapper_MapServiceToWorkload_MultipleWorkloads(t *testing.T) { - logger, _ := zap.NewDevelopment() - - serviceAndNamespaceToSelectors := &sync.Map{} - workloadAndNamespaceToLabels := &sync.Map{} - serviceToWorkload := &sync.Map{} - - serviceAndNamespace := "service@namespace" - serviceAndNamespaceToSelectors.Store(serviceAndNamespace, mapset.NewSet("label1=value1", "label2=value2")) - - // Add two workloads with matching labels to the service - workloadAndNamespaceToLabels.Store("workload1@namespace", mapset.NewSet("label1=value1", "label2=value2", "label3=value3")) - workloadAndNamespaceToLabels.Store("workload2@namespace", mapset.NewSet("label1=value1", "label2=value2", "label4=value4")) - - mapper := newServiceToWorkloadMapper(serviceAndNamespaceToSelectors, workloadAndNamespaceToLabels, serviceToWorkload, logger, mockDeleter) - mapper.mapServiceToWorkload() - - // Check that the service does not map to any workload - if _, ok := serviceToWorkload.Load(serviceAndNamespace); ok { - t.Errorf("Unexpected mapping of service to multiple workloads") - } -} - -func TestMapServiceToWorkload_StopsWhenSignaled(t *testing.T) { - logger, _ := zap.NewDevelopment() - - serviceAndNamespaceToSelectors := &sync.Map{} - workloadAndNamespaceToLabels := &sync.Map{} - serviceToWorkload := &sync.Map{} - - stopchan := make(chan struct{}) - - // Signal the stopchan to stop after 100 milliseconds - time.AfterFunc(100*time.Millisecond, func() { - close(stopchan) - }) - - mapper := newServiceToWorkloadMapper(serviceAndNamespaceToSelectors, workloadAndNamespaceToLabels, serviceToWorkload, logger, mockDeleter) - - start := time.Now() - mapper.Start(stopchan) - duration := time.Since(start) - - // Check that the function stopped in a reasonable time after the stop signal - if duration > 200*time.Millisecond { - t.Errorf("mapServiceToWorkload did not stop in a reasonable time after the stop signal, duration: %v", duration) - } -} - -func TestOnAddOrUpdateService(t *testing.T) { - // Create a fake service - service := &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "myservice", - Namespace: "mynamespace", - }, - Spec: corev1.ServiceSpec{ - ClusterIP: "1.2.3.4", - Selector: map[string]string{ - "app": "myapp", - }, - }, - } - - // Create the maps - ipToServiceAndNamespace := &sync.Map{} - serviceAndNamespaceToSelectors := &sync.Map{} - - // Call the function - svcWatcher := newServiceWatcherForTesting(ipToServiceAndNamespace, serviceAndNamespaceToSelectors) - svcWatcher.onAddOrUpdateService(service) - - // Check that the maps contain the expected entries - if _, ok := ipToServiceAndNamespace.Load("1.2.3.4"); !ok { - t.Errorf("ipToServiceAndNamespace does not contain the service IP") - } - if _, ok := serviceAndNamespaceToSelectors.Load("myservice@mynamespace"); !ok { - t.Errorf("serviceAndNamespaceToSelectors does not contain the service") - } -} - -func TestOnDeleteService(t *testing.T) { - // Create a fake service - service := &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "myservice", - Namespace: "mynamespace", - }, - Spec: corev1.ServiceSpec{ - ClusterIP: "1.2.3.4", - Selector: map[string]string{ - "app": "myapp", - }, - }, - } - - // Create the maps and add the service to them - ipToServiceAndNamespace := &sync.Map{} - ipToServiceAndNamespace.Store("1.2.3.4", "myservice@mynamespace") - serviceAndNamespaceToSelectors := &sync.Map{} - serviceAndNamespaceToSelectors.Store("myservice@mynamespace", mapset.NewSet("app=myapp")) - - // Call the function - svcWatcher := newServiceWatcherForTesting(ipToServiceAndNamespace, serviceAndNamespaceToSelectors) - svcWatcher.onDeleteService(service, mockDeleter) - - // Check that the maps do not contain the service - if _, ok := ipToServiceAndNamespace.Load("1.2.3.4"); ok { - t.Errorf("ipToServiceAndNamespace still contains the service IP") - } - if _, ok := serviceAndNamespaceToSelectors.Load("myservice@mynamespace"); ok { - t.Errorf("serviceAndNamespaceToSelectors still contains the service") - } -} - -func TestOnAddOrUpdatePod(t *testing.T) { - t.Run("pod with both PodIP and HostIP", func(t *testing.T) { - ipToPod := &sync.Map{} - podToWorkloadAndNamespace := &sync.Map{} - workloadAndNamespaceToLabels := &sync.Map{} - workloadPodCount := map[string]int{} - - pod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "testPod", - Namespace: "testNamespace", - OwnerReferences: []metav1.OwnerReference{ - { - Kind: "ReplicaSet", - Name: "testDeployment-598b89cd8d", - }, - }, - }, - Status: corev1.PodStatus{ - PodIP: "1.2.3.4", - HostIP: "5.6.7.8", - }, - } - - poWatcher := newPodWatcherForTesting(ipToPod, podToWorkloadAndNamespace, workloadAndNamespaceToLabels, workloadPodCount) - poWatcher.onAddOrUpdatePod(pod, nil) - - // Test the mappings in ipToPod - if podName, _ := ipToPod.Load("1.2.3.4"); podName != "testPod" { - t.Errorf("ipToPod was incorrect, got: %s, want: %s.", podName, "testPod") - } - - // Test the mapping in podToWorkloadAndNamespace - if depAndNamespace, _ := podToWorkloadAndNamespace.Load("testPod"); depAndNamespace != "testDeployment@testNamespace" { - t.Errorf("podToWorkloadAndNamespace was incorrect, got: %s, want: %s.", depAndNamespace, "testDeployment@testNamespace") - } - - // Test the count in workloadPodCount - if count := workloadPodCount["testDeployment@testNamespace"]; count != 1 { - t.Errorf("workloadPodCount was incorrect, got: %d, want: %d.", count, 1) - } - }) - - t.Run("pod with only HostIP", func(t *testing.T) { - ipToPod := &sync.Map{} - podToWorkloadAndNamespace := &sync.Map{} - workloadAndNamespaceToLabels := &sync.Map{} - workloadPodCount := map[string]int{} - - pod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "testPod", - Namespace: "testNamespace", - OwnerReferences: []metav1.OwnerReference{ - { - Kind: "ReplicaSet", - Name: "testDeployment-7b74958fb8", - }, - }, - }, - Status: corev1.PodStatus{ - HostIP: "5.6.7.8", - }, - Spec: corev1.PodSpec{ - HostNetwork: true, - Containers: []corev1.Container{ - { - Ports: []corev1.ContainerPort{ - { - HostPort: int32(8080), - }, - }, - }, - }, - }, - } - - poWatcher := newPodWatcherForTesting(ipToPod, podToWorkloadAndNamespace, workloadAndNamespaceToLabels, workloadPodCount) - poWatcher.onAddOrUpdatePod(pod, nil) - - // Test the mappings in ipToPod - if podName, _ := ipToPod.Load("5.6.7.8:8080"); podName != "testPod" { - t.Errorf("ipToPod was incorrect, got: %s, want: %s.", podName, "testPod") - } - - // Test the mapping in podToWorkloadAndNamespace - if depAndNamespace, _ := podToWorkloadAndNamespace.Load("testPod"); depAndNamespace != "testDeployment@testNamespace" { - t.Errorf("podToWorkloadAndNamespace was incorrect, got: %s, want: %s.", depAndNamespace, "testDeployment@testNamespace") - } - - // Test the count in workloadPodCount - if count := workloadPodCount["testDeployment@testNamespace"]; count != 1 { - t.Errorf("workloadPodCount was incorrect, got: %d, want: %d.", count, 1) - } - }) - - t.Run("pod updated with different set of labels", func(t *testing.T) { - ipToPod := &sync.Map{} - podToWorkloadAndNamespace := &sync.Map{} - workloadAndNamespaceToLabels := &sync.Map{} - - pod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "testPod", - Namespace: "testNamespace", - Labels: map[string]string{ - "label1": "value1", - "label2": "value2", - }, - OwnerReferences: []metav1.OwnerReference{ - { - Kind: "ReplicaSet", - Name: "testDeployment-5d68bc5f49", - }, - }, - }, - Status: corev1.PodStatus{ - HostIP: "5.6.7.8", - }, - Spec: corev1.PodSpec{ - HostNetwork: true, - Containers: []corev1.Container{ - { - Ports: []corev1.ContainerPort{ - {HostPort: 8080}, - }, - }, - }, - }, - } - - // add the pod - poWatcher := newPodWatcherForTesting(ipToPod, podToWorkloadAndNamespace, workloadAndNamespaceToLabels, map[string]int{}) - poWatcher.onAddOrUpdatePod(pod, nil) - - // Test the mappings in ipToPod - if podName, ok := ipToPod.Load("5.6.7.8:8080"); !ok && podName != "testPod" { - t.Errorf("ipToPod[%s] was incorrect, got: %s, want: %s.", "5.6.7.8:8080", podName, "testPod") - } - - // Test the mapping in workloadAndNamespaceToLabels - labels, _ := workloadAndNamespaceToLabels.Load("testDeployment@testNamespace") - expectedLabels := []string{"label1=value1", "label2=value2"} - for _, label := range expectedLabels { - if !labels.(mapset.Set[string]).Contains(label) { - t.Errorf("deploymentAndNamespaceToLabels was incorrect, got: %v, want: %s.", labels, label) - } - } - - pod2 := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "testPod", - Namespace: "testNamespace", - Labels: map[string]string{ - "label1": "value1", - "label2": "value2", - "label3": "value3", - }, - OwnerReferences: []metav1.OwnerReference{ - { - Kind: "ReplicaSet", - Name: "testDeployment-5d68bc5f49", - }, - }, - }, - Status: corev1.PodStatus{ - PodIP: "1.2.3.4", - HostIP: "5.6.7.8", - }, - Spec: corev1.PodSpec{ - HostNetwork: true, - Containers: []corev1.Container{ - { - Ports: []corev1.ContainerPort{ - {HostPort: 8080}, - }, - }, - }, - }, - } - - // add the pod - poWatcher.onAddOrUpdatePod(pod2, pod) - - // Test the mappings in ipToPod - if podName, ok := ipToPod.Load("5.6.7.8:8080"); !ok && podName != "testPod" { - t.Errorf("ipToPod[%s] was incorrect, got: %s, want: %s.", "5.6.7.8:8080", podName, "testPod") - } - - if podName, ok := ipToPod.Load("1.2.3.4"); !ok && podName != "testPod" { - t.Errorf("ipToPod[%s] was incorrect, got: %s, want: %s.", "1.2.3.4", podName, "testPod") - } - // Test the mapping in workloadAndNamespaceToLabels - labels, _ = workloadAndNamespaceToLabels.Load("testDeployment@testNamespace") - expectedLabels = []string{"label1=value1", "label2=value2", "label3=value3"} - for _, label := range expectedLabels { - if !labels.(mapset.Set[string]).Contains(label) { - t.Errorf("workloadAndNamespaceToLabels was incorrect, got: %v, want: %s.", labels, label) - } - } - }) -} - -func TestOnDeletePod(t *testing.T) { - t.Run("pod with both PodIP and HostIP", func(t *testing.T) { - ipToPod := &sync.Map{} - podToWorkloadAndNamespace := &sync.Map{} - workloadAndNamespaceToLabels := &sync.Map{} - workloadPodCount := map[string]int{} - - pod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "testPod", - Namespace: "testNamespace", - OwnerReferences: []metav1.OwnerReference{ - { - Kind: "ReplicaSet", - Name: "testDeployment-xyz", - }, - }, - }, - Status: corev1.PodStatus{ - PodIP: "1.2.3.4", - HostIP: "5.6.7.8", - }, - } - - // Assume the pod has already been added - ipToPod.Store(pod.Status.PodIP, pod.Name) - ipToPod.Store(pod.Status.HostIP, pod.Name) - podToWorkloadAndNamespace.Store(pod.Name, "testDeployment@testNamespace") - workloadAndNamespaceToLabels.Store("testDeployment@testNamespace", "testLabels") - workloadPodCount["testDeployment@testNamespace"] = 1 - - poWatcher := newPodWatcherForTesting(ipToPod, podToWorkloadAndNamespace, workloadAndNamespaceToLabels, workloadPodCount) - poWatcher.onDeletePod(pod) - - // Test if the entries in ipToPod and podToWorkloadAndNamespace have been deleted - if _, ok := ipToPod.Load("1.2.3.4"); ok { - t.Errorf("ipToPod deletion was incorrect, key: %s still exists", "1.2.3.4") - } - - if _, ok := podToWorkloadAndNamespace.Load("testPod"); ok { - t.Errorf("podToWorkloadAndNamespace deletion was incorrect, key: %s still exists", "testPod") - } - - // Test if the count in workloadPodCount has been decremented and the entry in workloadAndNamespaceToLabels has been deleted - if count := workloadPodCount["testDeployment@testNamespace"]; count != 0 { - t.Errorf("workloadPodCount was incorrect, got: %d, want: %d.", count, 0) - } - - if _, ok := workloadAndNamespaceToLabels.Load("testDeployment@testNamespace"); ok { - t.Errorf("workloadAndNamespaceToLabels deletion was incorrect, key: %s still exists", "testDeployment@testNamespace") - } - }) - - t.Run("pod with only HostIP and some network ports", func(t *testing.T) { - ipToPod := &sync.Map{} - podToWorkloadAndNamespace := &sync.Map{} - workloadAndNamespaceToLabels := &sync.Map{} - workloadPodCount := map[string]int{} - - pod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "testPod", - Namespace: "testNamespace", - OwnerReferences: []metav1.OwnerReference{ - { - Kind: "ReplicaSet", - Name: "testDeployment-xyz", - }, - }, - }, - Status: corev1.PodStatus{ - HostIP: "5.6.7.8", - }, - Spec: corev1.PodSpec{ - HostNetwork: true, - Containers: []corev1.Container{ - { - Ports: []corev1.ContainerPort{ - { - HostPort: int32(8080), - }, - }, - }, - }, - }, - } - - // Assume the pod has already been added - ipToPod.Store(pod.Status.HostIP, pod.Name) - ipToPod.Store(pod.Status.HostIP+":8080", pod.Name) - podToWorkloadAndNamespace.Store(pod.Name, "testDeployment@testNamespace") - workloadAndNamespaceToLabels.Store("testDeployment@testNamespace", "testLabels") - workloadPodCount["testDeployment@testNamespace"] = 1 - - poWatcher := newPodWatcherForTesting(ipToPod, podToWorkloadAndNamespace, workloadAndNamespaceToLabels, workloadPodCount) - poWatcher.onDeletePod(pod) - - // Test if the entries in ipToPod and podToWorkloadAndNamespace have been deleted - if _, ok := ipToPod.Load("5.6.7.8:8080"); ok { - t.Errorf("ipToPod deletion was incorrect, key: %s still exists", "5.6.7.8:8080") - } - - if _, ok := podToWorkloadAndNamespace.Load("testPod"); ok { - t.Errorf("podToDeploymentAndNamespace deletion was incorrect, key: %s still exists", "testPod") - } - - // Test if the count in workloadPodCount has been decremented and the entry in workloadAndNamespaceToLabels has been deleted - if count := workloadPodCount["testDeployment@testNamespace"]; count != 0 { - t.Errorf("workloadPodCount was incorrect, got: %d, want: %d.", count, 0) - } - - if _, ok := workloadAndNamespaceToLabels.Load("testDeployment@testNamespace"); ok { - t.Errorf("workloadAndNamespaceToLabels deletion was incorrect, key: %s still exists", "testDeployment@testNamespace") - } - }) -} - func TestEksResolver(t *testing.T) { logger, _ := zap.NewProduction() ctx := context.Background() @@ -717,6 +42,7 @@ func TestEksResolver(t *testing.T) { podToWorkloadAndNamespace: &sync.Map{}, ipToServiceAndNamespace: &sync.Map{}, serviceToWorkload: &sync.Map{}, + useListPod: true, } ip := "1.2.3.4" @@ -772,7 +98,7 @@ func TestEksResolver(t *testing.T) { } }) - t.Run("Test Process", func(t *testing.T) { + t.Run("Test Process when useListPod is true", func(t *testing.T) { // helper function to get string values from the attributes getStrAttr := func(attributes pcommon.Map, key string, t *testing.T) string { if value, ok := attributes.Get(key); ok { @@ -792,6 +118,7 @@ func TestEksResolver(t *testing.T) { podToWorkloadAndNamespace: &sync.Map{}, ipToServiceAndNamespace: &sync.Map{}, serviceToWorkload: &sync.Map{}, + useListPod: true, } // Test case 1: "aws.remote.service" contains IP:Port @@ -832,6 +159,67 @@ func TestEksResolver(t *testing.T) { assert.NoError(t, err) assert.Equal(t, "192.168.1.2", getStrAttr(attributes, attr.AWSRemoteService, t)) }) + + t.Run("Test Process when useListPod is false", func(t *testing.T) { + // helper function to get string values from the attributes + getStrAttr := func(attributes pcommon.Map, key string, t *testing.T) string { + if value, ok := attributes.Get(key); ok { + return value.AsString() + } + t.Errorf("Failed to get value for key: %s", key) + return "" + } + + logger, _ := zap.NewProduction() + resolver := &kubernetesResolver{ + logger: logger, + clusterName: "test", + platformCode: config.PlatformEKS, + ipToWorkloadAndNamespace: &sync.Map{}, + ipToServiceAndNamespace: &sync.Map{}, + serviceToWorkload: &sync.Map{}, + useListPod: false, + } + + // Test case 1: "aws.remote.service" contains IP:Port + attributes := pcommon.NewMap() + attributes.PutStr(attr.AWSRemoteService, "192.0.2.1:8080") + resourceAttributes := pcommon.NewMap() + resolver.ipToWorkloadAndNamespace.Store("192.0.2.1:8080", "test-deployment@test-namespace") + err := resolver.Process(attributes, resourceAttributes) + assert.NoError(t, err) + assert.Equal(t, "test-deployment", getStrAttr(attributes, attr.AWSRemoteService, t)) + assert.Equal(t, "eks:test/test-namespace", getStrAttr(attributes, attr.AWSRemoteEnvironment, t)) + + // Test case 2: "aws.remote.service" contains only IP + attributes = pcommon.NewMap() + attributes.PutStr(attr.AWSRemoteService, "192.0.2.2") + resourceAttributes = pcommon.NewMap() + resolver.ipToWorkloadAndNamespace.Store("192.0.2.2", "test-deployment-2@test-namespace-2") + err = resolver.Process(attributes, resourceAttributes) + assert.NoError(t, err) + assert.Equal(t, "test-deployment-2", getStrAttr(attributes, attr.AWSRemoteService, t)) + assert.Equal(t, "eks:test/test-namespace-2", getStrAttr(attributes, attr.AWSRemoteEnvironment, t)) + + // Test case 3: "aws.remote.service" contains non-ip string + attributes = pcommon.NewMap() + attributes.PutStr(attr.AWSRemoteService, "not-an-ip") + resourceAttributes = pcommon.NewMap() + err = resolver.Process(attributes, resourceAttributes) + assert.NoError(t, err) + assert.Equal(t, "not-an-ip", getStrAttr(attributes, attr.AWSRemoteService, t)) + + // Test case 4: Process with cluster ip + attributes = pcommon.NewMap() + attributes.PutStr(attr.AWSRemoteService, "192.168.1.2") + resourceAttributes = pcommon.NewMap() + resolver.ipToServiceAndNamespace.Store("192.168.1.2", "service1@test-namespace-3") + resolver.serviceToWorkload.Store("service1@test-namespace-3", "service1-deployment@test-namespace-3") + err = resolver.Process(attributes, resourceAttributes) + assert.NoError(t, err) + assert.Equal(t, "service1-deployment", getStrAttr(attributes, attr.AWSRemoteService, t)) + assert.Equal(t, "eks:test/test-namespace-3", getStrAttr(attributes, attr.AWSRemoteEnvironment, t)) + }) } func TestK8sResourceAttributesResolverOnEKS(t *testing.T) { @@ -841,10 +229,9 @@ func TestK8sResourceAttributesResolverOnEKS(t *testing.T) { getStrAttr := func(attributes pcommon.Map, key string, t *testing.T) string { if value, ok := attributes.Get(key); ok { return value.AsString() - } else { - t.Errorf("Failed to get value for key: %s", key) - return "" } + t.Errorf("Failed to get value for key: %s", key) + return "" } resolver := newKubernetesResourceAttributesResolver(config.PlatformEKS, "test-cluster") @@ -1064,230 +451,3 @@ func TestK8sResourceAttributesResolverOnK8SOnPrem(t *testing.T) { }) } } - -func TestExtractIPPort(t *testing.T) { - // Test valid IP:Port - ip, port, ok := extractIPPort("192.0.2.0:8080") - assert.Equal(t, "192.0.2.0", ip) - assert.Equal(t, "8080", port) - assert.True(t, ok) - - // Test invalid IP:Port - ip, port, ok = extractIPPort("192.0.2:8080") - assert.Equal(t, "", ip) - assert.Equal(t, "", port) - assert.False(t, ok) - - // Test IP only - ip, port, ok = extractIPPort("192.0.2.0") - assert.Equal(t, "", ip) - assert.Equal(t, "", port) - assert.False(t, ok) -} - -func TestFilterPodIPFields(t *testing.T) { - meta := metav1.ObjectMeta{ - Name: "test", - Namespace: "default", - Labels: map[string]string{ - "name": "app", - }, - } - pod := &corev1.Pod{ - ObjectMeta: meta, - Spec: corev1.PodSpec{ - HostNetwork: true, - Containers: []corev1.Container{ - {}, - }, - }, - Status: corev1.PodStatus{}, - } - newPod, err := minimizePod(pod) - assert.Nil(t, err) - assert.Empty(t, getHostNetworkPorts(newPod.(*corev1.Pod))) - - podStatus := corev1.PodStatus{ - PodIP: "192.168.0.12", - HostIPs: []corev1.HostIP{ - { - IP: "132.168.3.12", - }, - }, - } - pod = &corev1.Pod{ - ObjectMeta: meta, - Spec: corev1.PodSpec{ - HostNetwork: true, - Containers: []corev1.Container{ - { - Ports: []corev1.ContainerPort{ - {HostPort: 8080}, - }, - }, - }, - }, - Status: podStatus, - } - newPod, err = minimizePod(pod) - assert.Nil(t, err) - assert.Equal(t, "app", newPod.(*corev1.Pod).Labels["name"]) - assert.Equal(t, []string{"8080"}, getHostNetworkPorts(newPod.(*corev1.Pod))) - assert.Equal(t, podStatus, newPod.(*corev1.Pod).Status) - - pod = &corev1.Pod{ - Spec: corev1.PodSpec{ - HostNetwork: true, - Containers: []corev1.Container{ - { - Ports: []corev1.ContainerPort{ - {HostPort: 8080}, - {HostPort: 8081}, - }, - }, - }, - }, - Status: podStatus, - } - newPod, err = minimizePod(pod) - assert.Nil(t, err) - assert.Equal(t, []string{"8080", "8081"}, getHostNetworkPorts(newPod.(*corev1.Pod))) - assert.Equal(t, podStatus, newPod.(*corev1.Pod).Status) -} - -func TestFilterServiceIPFields(t *testing.T) { - meta := metav1.ObjectMeta{ - Name: "test", - Namespace: "default", - } - svc := &corev1.Service{ - ObjectMeta: meta, - Spec: corev1.ServiceSpec{ - Selector: map[string]string{ - "name": "app", - }, - ClusterIP: "10.0.12.4", - }, - } - newSvc, err := minimizeService(svc) - assert.Nil(t, err) - assert.Equal(t, "10.0.12.4", newSvc.(*corev1.Service).Spec.ClusterIP) - assert.Equal(t, "app", newSvc.(*corev1.Service).Spec.Selector["name"]) -} - -func TestHandlePodUpdate(t *testing.T) { - testCases := []struct { - name string - oldPod *corev1.Pod - newPod *corev1.Pod - initialIPToPod map[string]string - expectedIPToPod map[string]string - }{ - { - name: "Old and New Pod Do Not Use Host Network, Different Pod IPs", - oldPod: &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "mypod", - }, - Status: corev1.PodStatus{ - PodIP: "10.0.0.3", - }, - Spec: corev1.PodSpec{ - HostNetwork: false, - }, - }, - newPod: &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "mypod", - }, - Status: corev1.PodStatus{ - PodIP: "10.0.0.4", - }, - Spec: corev1.PodSpec{ - HostNetwork: false, - }, - }, - initialIPToPod: map[string]string{ - "10.0.0.3": "mypod", - }, - expectedIPToPod: map[string]string{ - "10.0.0.4": "mypod", - }, - }, - { - name: "Old Pod Has Empty PodIP, New Pod Does Not Use Host Network, Non-Empty Pod IP", - oldPod: &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "mypod", - }, - Status: corev1.PodStatus{ - PodIP: "", - }, - Spec: corev1.PodSpec{ - HostNetwork: false, - }, - }, - newPod: &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "mypod", - }, - Status: corev1.PodStatus{ - PodIP: "10.0.0.5", - }, - Spec: corev1.PodSpec{ - HostNetwork: false, - }, - }, - initialIPToPod: map[string]string{}, - expectedIPToPod: map[string]string{ - "10.0.0.5": "mypod", - }, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - ipToPod := &sync.Map{} - // Initialize ipToPod map - for k, v := range tc.initialIPToPod { - ipToPod.Store(k, v) - } - poWatcher := newPodWatcherForTesting(ipToPod, nil, nil, map[string]int{}) - poWatcher.handlePodUpdate(tc.newPod, tc.oldPod) - - // Now validate that ipToPod map has been updated correctly - for key, expectedValue := range tc.expectedIPToPod { - val, ok := ipToPod.Load(key) - if !ok || val.(string) != expectedValue { - t.Errorf("Expected record for %v to be %v, got %v", key, expectedValue, val) - } - } - // Validate that old keys have been removed - for key := range tc.initialIPToPod { - if _, ok := tc.expectedIPToPod[key]; !ok { - if _, ok := ipToPod.Load(key); ok { - t.Errorf("Expected record for %v to be removed, but it was not", key) - } - } - } - }) - } -} - -func newServiceWatcherForTesting(ipToServiceAndNamespace, serviceAndNamespaceToSelectors *sync.Map) *serviceWatcher { - logger, _ := zap.NewDevelopment() - return &serviceWatcher{ipToServiceAndNamespace, serviceAndNamespaceToSelectors, logger, nil, nil} -} - -func newPodWatcherForTesting(ipToPod, podToWorkloadAndNamespace, workloadAndNamespaceToLabels *sync.Map, workloadPodCount map[string]int) *podWatcher { - logger, _ := zap.NewDevelopment() - return &podWatcher{ - ipToPod: ipToPod, - podToWorkloadAndNamespace: podToWorkloadAndNamespace, - workloadAndNamespaceToLabels: workloadAndNamespaceToLabels, - workloadPodCount: workloadPodCount, - logger: logger, - informer: nil, - deleter: mockDeleter, - } -} diff --git a/plugins/processors/awsapplicationsignals/internal/resolver/kubernetes_utils.go b/plugins/processors/awsapplicationsignals/internal/resolver/kubernetes_utils.go index 32befd4c79..e7cb48616b 100644 --- a/plugins/processors/awsapplicationsignals/internal/resolver/kubernetes_utils.go +++ b/plugins/processors/awsapplicationsignals/internal/resolver/kubernetes_utils.go @@ -10,6 +10,8 @@ import ( "regexp" "strconv" "strings" + "sync" + "time" corev1 "k8s.io/api/core/v1" ) @@ -30,10 +32,17 @@ var ( // Alphanumeric Mapping: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/util/rand/rand.go#L121) replicaSetWithDeploymentNamePattern = fmt.Sprintf(`^(.+)-[%s]{6,10}$`, kubeAllowedStringAlphaNums) deploymentFromReplicaSetPattern = regexp.MustCompile(replicaSetWithDeploymentNamePattern) - // if a pod is launched directly by a replicaSet (with a given name by users), its name has the following pattern: + // if a pod is launched directly by a replicaSet or daemonSet (with a given name by users), its name has the following pattern: // Pod name = ReplicaSet name + 5 alphanumeric characters long string - podWithReplicaSetNamePattern = fmt.Sprintf(`^(.+)-[%s]{5}$`, kubeAllowedStringAlphaNums) - replicaSetFromPodPattern = regexp.MustCompile(podWithReplicaSetNamePattern) + // some code reference for daemon set: + // 1. daemonset uses the strategy to create pods: https://github.com/kubernetes/kubernetes/blob/82e3a671e79d1740ab9a3b3fac8a3bb7d065a6fb/pkg/registry/apps/daemonset/strategy.go#L46 + // 2. the strategy uses SimpleNameGenerator to create names: https://github.com/kubernetes/kubernetes/blob/82e3a671e79d1740ab9a3b3fac8a3bb7d065a6fb/staging/src/k8s.io/apiserver/pkg/storage/names/generate.go#L53 + // 3. the random name generator only use non vowels char + numbers: https://github.com/kubernetes/kubernetes/blob/82e3a671e79d1740ab9a3b3fac8a3bb7d065a6fb/staging/src/k8s.io/apimachinery/pkg/util/rand/rand.go#L83 + podWithSuffixPattern = fmt.Sprintf(`^(.+)-[%s]{5}$`, kubeAllowedStringAlphaNums) + replicaSetOrDaemonSetFromPodPattern = regexp.MustCompile(podWithSuffixPattern) + + // Pattern for StatefulSet: - + reStatefulSet = regexp.MustCompile(`^(.+)-(\d+)$`) ) func attachNamespace(resourceName, namespace string) string { @@ -64,7 +73,7 @@ func extractWorkloadNameFromRS(replicaSetName string) (string, error) { } func extractWorkloadNameFromPodName(podName string) (string, error) { - match := replicaSetFromPodPattern.FindStringSubmatch(podName) + match := replicaSetOrDaemonSetFromPodPattern.FindStringSubmatch(podName) if match != nil { return match[1], nil } @@ -99,6 +108,51 @@ func getWorkloadAndNamespace(pod *corev1.Pod) string { return workloadAndNamespace } +// InferWorkloadName tries to parse the given podName to find the top-level workload name. +// +// 1) If it matches -, return . +// 2) If it matches -<5charSuffix>: +// - If is -<6–10charSuffix>, return . +// - Else return (likely a bare ReplicaSet or DaemonSet). +// +// 3) If no pattern matches, return the original podName. +// +// Caveat: You can't reliably distinguish DaemonSet vs. bare ReplicaSet by name alone. +// In some edge cases when the deployment name is longer than 47 characters, The regex pattern is +// not reliable. See reference: +// - https://pauldally.medium.com/why-you-try-to-keep-your-deployment-names-to-47-characters-or-less-1f93a848d34c +// - https://github.com/kubernetes/kubernetes/issues/116447#issuecomment-1530652258 +// +// For that, we fall back to use service name as last defense. +func inferWorkloadName(podName, fallbackServiceName string) string { + // 1) Check if it's a StatefulSet pod: - + if matches := reStatefulSet.FindStringSubmatch(podName); matches != nil { + return matches[1] // e.g. "mysql-0" => "mysql" + } + + // 2) Check if it's a Pod with a 5-char random suffix: -<5Chars> + if matches := replicaSetOrDaemonSetFromPodPattern.FindStringSubmatch(podName); matches != nil { + parentName := matches[1] + + // If parentName ends with 6–10 random chars, that parent is a Deployment-based ReplicaSet. + // So the top-level workload is the first part before that suffix. + if rsMatches := deploymentFromReplicaSetPattern.FindStringSubmatch(parentName); rsMatches != nil { + return rsMatches[1] // e.g. "nginx-a2b3c4" => "nginx" + } + + // Otherwise, it's a "bare" ReplicaSet or DaemonSet—just return parentName. + return parentName + } + + // 3) If none of the patterns matched, return the service name as fallback + if fallbackServiceName != "" { + return fallbackServiceName + } + + // 4) Finally return the full pod name (I don't think this will happen) + return podName +} + const IP_PORT_PATTERN = `^(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}):(\d+)$` var ipPortRegex = regexp.MustCompile(IP_PORT_PATTERN) @@ -140,3 +194,38 @@ func isIP(ipString string) bool { ip := net.ParseIP(ipString) return ip != nil } + +// a safe channel which can be closed multiple times +type safeChannel struct { + sync.Mutex + + ch chan struct{} + closed bool +} + +func (sc *safeChannel) Close() { + sc.Lock() + defer sc.Unlock() + + if !sc.closed { + close(sc.ch) + sc.closed = true + } +} + +// Deleter represents a type that can delete a key from a map after a certain delay. +type Deleter interface { + DeleteWithDelay(m *sync.Map, key interface{}) +} + +// TimedDeleter deletes a key after a specified delay. +type TimedDeleter struct { + Delay time.Duration +} + +func (td *TimedDeleter) DeleteWithDelay(m *sync.Map, key interface{}) { + go func() { + time.Sleep(td.Delay) + m.Delete(key) + }() +} diff --git a/plugins/processors/awsapplicationsignals/internal/resolver/kubernetes_utils_test.go b/plugins/processors/awsapplicationsignals/internal/resolver/kubernetes_utils_test.go new file mode 100644 index 0000000000..3c861acb5c --- /dev/null +++ b/plugins/processors/awsapplicationsignals/internal/resolver/kubernetes_utils_test.go @@ -0,0 +1,258 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package resolver + +import ( + "testing" + + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// TestAttachNamespace function +func TestAttachNamespace(t *testing.T) { + result := attachNamespace("testResource", "testNamespace") + if result != "testResource@testNamespace" { + t.Errorf("attachNamespace was incorrect, got: %s, want: %s.", result, "testResource@testNamespace") + } +} + +// TestGetServiceAndNamespace function +func TestGetServiceAndNamespace(t *testing.T) { + service := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testService", + Namespace: "testNamespace", + }, + } + result := getServiceAndNamespace(service) + if result != "testService@testNamespace" { + t.Errorf("getServiceAndNamespace was incorrect, got: %s, want: %s.", result, "testService@testNamespace") + } +} + +// TestExtractResourceAndNamespace function +func TestExtractResourceAndNamespace(t *testing.T) { + // Test normal case + name, namespace := extractResourceAndNamespace("testService@testNamespace") + if name != "testService" || namespace != "testNamespace" { + t.Errorf("extractResourceAndNamespace was incorrect, got: %s and %s, want: %s and %s.", name, namespace, "testService", "testNamespace") + } + + // Test invalid case + name, namespace = extractResourceAndNamespace("invalid") + if name != "" || namespace != "" { + t.Errorf("extractResourceAndNamespace was incorrect, got: %s and %s, want: %s and %s.", name, namespace, "", "") + } +} + +func TestExtractWorkloadNameFromRS(t *testing.T) { + testCases := []struct { + name string + replicaSetName string + want string + shouldErr bool + }{ + { + name: "Valid ReplicaSet Name", + replicaSetName: "my-deployment-5859ffc7ff", + want: "my-deployment", + shouldErr: false, + }, + { + name: "Invalid ReplicaSet Name - No Hyphen", + replicaSetName: "mydeployment5859ffc7ff", + want: "", + shouldErr: true, + }, + { + name: "Invalid ReplicaSet Name - Less Than 10 Suffix Characters", + replicaSetName: "my-deployment-bc2", + want: "", + shouldErr: true, + }, + { + name: "Invalid ReplicaSet Name - More Than 10 Suffix Characters", + replicaSetName: "my-deployment-5859ffc7ffx", + want: "", + shouldErr: true, + }, + { + name: "Invalid ReplicaSet Name - Invalid Characters in Suffix", + replicaSetName: "my-deployment-aeiou12345", + want: "", + shouldErr: true, + }, + { + name: "Invalid ReplicaSet Name - Empty String", + replicaSetName: "", + want: "", + shouldErr: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + got, err := extractWorkloadNameFromRS(tc.replicaSetName) + + if (err != nil) != tc.shouldErr { + t.Errorf("extractWorkloadNameFromRS() error = %v, wantErr %v", err, tc.shouldErr) + return + } + + if got != tc.want { + t.Errorf("extractWorkloadNameFromRS() = %v, want %v", got, tc.want) + } + }) + } +} + +func TestExtractWorkloadNameFromPodName(t *testing.T) { + testCases := []struct { + name string + podName string + want string + shouldErr bool + }{ + { + name: "Valid Pod Name", + podName: "my-replicaset-bc24f", + want: "my-replicaset", + shouldErr: false, + }, + { + name: "Invalid Pod Name - No Hyphen", + podName: "myreplicasetbc24f", + want: "", + shouldErr: true, + }, + { + name: "Invalid Pod Name - Less Than 5 Suffix Characters", + podName: "my-replicaset-bc2", + want: "", + shouldErr: true, + }, + { + name: "Invalid Pod Name - More Than 5 Suffix Characters", + podName: "my-replicaset-bc24f5", + want: "", + shouldErr: true, + }, + { + name: "Invalid Pod Name - Empty String", + podName: "", + want: "", + shouldErr: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + got, err := extractWorkloadNameFromPodName(tc.podName) + + if (err != nil) != tc.shouldErr { + t.Errorf("extractWorkloadNameFromPodName() error = %v, wantErr %v", err, tc.shouldErr) + return + } + + if got != tc.want { + t.Errorf("extractWorkloadNameFromPodName() = %v, want %v", got, tc.want) + } + }) + } +} + +// TestGetWorkloadAndNamespace function +func TestGetWorkloadAndNamespace(t *testing.T) { + // Test ReplicaSet case + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testPod", + Namespace: "testNamespace", + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "ReplicaSet", + Name: "testDeployment-5d68bc5f49", + }, + }, + }, + } + result := getWorkloadAndNamespace(pod) + if result != "testDeployment@testNamespace" { + t.Errorf("getDeploymentAndNamespace was incorrect, got: %s, want: %s.", result, "testDeployment@testNamespace") + } + + // Test StatefulSet case + pod.ObjectMeta.OwnerReferences[0].Kind = "StatefulSet" + pod.ObjectMeta.OwnerReferences[0].Name = "testStatefulSet" + result = getWorkloadAndNamespace(pod) + if result != "testStatefulSet@testNamespace" { + t.Errorf("getWorkloadAndNamespace was incorrect, got: %s, want: %s.", result, "testStatefulSet@testNamespace") + } + + // Test Other case + pod.ObjectMeta.OwnerReferences[0].Kind = "Other" + pod.ObjectMeta.OwnerReferences[0].Name = "testOther" + result = getWorkloadAndNamespace(pod) + if result != "" { + t.Errorf("getWorkloadAndNamespace was incorrect, got: %s, want: %s.", result, "") + } + + // Test no OwnerReferences case + pod.ObjectMeta.OwnerReferences = nil + result = getWorkloadAndNamespace(pod) + if result != "" { + t.Errorf("getWorkloadAndNamespace was incorrect, got: %s, want: %s.", result, "") + } +} + +func TestExtractIPPort(t *testing.T) { + // Test valid IP:Port + ip, port, ok := extractIPPort("192.0.2.0:8080") + assert.Equal(t, "192.0.2.0", ip) + assert.Equal(t, "8080", port) + assert.True(t, ok) + + // Test invalid IP:Port + ip, port, ok = extractIPPort("192.0.2:8080") + assert.Equal(t, "", ip) + assert.Equal(t, "", port) + assert.False(t, ok) + + // Test IP only + ip, port, ok = extractIPPort("192.0.2.0") + assert.Equal(t, "", ip) + assert.Equal(t, "", port) + assert.False(t, ok) +} + +func TestInferWorkloadName(t *testing.T) { + testCases := []struct { + name string + input string + service string + expected string + }{ + {"StatefulSet single digit", "mysql-0", "service", "mysql"}, + {"StatefulSet multiple digits", "mysql-10", "service", "mysql"}, + {"ReplicaSet bare pod", "nginx-b2dfg", "service", "nginx"}, + {"Deployment-based ReplicaSet pod", "nginx-76977669dc-lwx64", "service", "nginx"}, + {"Non matching", "simplepod", "service", "service"}, + {"ReplicaSet name with number suffix", "nginx-123-d9stt", "service", "nginx-123"}, + {"Some confusing case with a replicaSet/daemonset name matching the pattern", "nginx-245678-d9stt", "nginx-service", "nginx"}, + // when the regex pattern doesn't matter, we just fall back to service name to handle all the edge cases + {"Some confusing case with a replicaSet/daemonset name not matching the pattern", "nginx-123456-d9stt", "nginx-service", "nginx-123456"}, + {"Empty", "", "service", "service"}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + got := inferWorkloadName(tc.input, tc.service) + if got != tc.expected { + t.Errorf("inferWorkloadName(%q) = %q; expected %q", tc.input, got, tc.expected) + } + }) + } +} diff --git a/plugins/processors/awsapplicationsignals/internal/resolver/podwatcher.go b/plugins/processors/awsapplicationsignals/internal/resolver/podwatcher.go new file mode 100644 index 0000000000..1187f1f8bb --- /dev/null +++ b/plugins/processors/awsapplicationsignals/internal/resolver/podwatcher.go @@ -0,0 +1,205 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package resolver + +import ( + "sync" + + mapset "github.com/deckarep/golang-set/v2" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/informers" + "k8s.io/client-go/tools/cache" +) + +func (p *podWatcher) removeHostNetworkRecords(pod *corev1.Pod) { + for _, port := range getHostNetworkPorts(pod) { + p.deleter.DeleteWithDelay(p.ipToPod, pod.Status.HostIP+":"+port) + } +} + +func (p *podWatcher) handlePodAdd(pod *corev1.Pod) { + if pod.Spec.HostNetwork && pod.Status.HostIP != "" { + for _, port := range getHostNetworkPorts(pod) { + p.ipToPod.Store(pod.Status.HostIP+":"+port, pod.Name) + } + } + if pod.Status.PodIP != "" { + p.ipToPod.Store(pod.Status.PodIP, pod.Name) + } +} + +func (p *podWatcher) handlePodUpdate(newPod *corev1.Pod, oldPod *corev1.Pod) { + // HostNetwork is an immutable field + if newPod.Spec.HostNetwork && oldPod.Status.HostIP != newPod.Status.HostIP { + if oldPod.Status.HostIP != "" { + p.logger.Debug("deleting host ip from cache", zap.String("hostNetwork", oldPod.Status.HostIP)) + p.removeHostNetworkRecords(oldPod) + } + if newPod.Status.HostIP != "" { + for _, port := range getHostNetworkPorts(newPod) { + p.ipToPod.Store(newPod.Status.HostIP+":"+port, newPod.Name) + } + } + } + if oldPod.Status.PodIP != newPod.Status.PodIP { + if oldPod.Status.PodIP != "" { + p.logger.Debug("deleting pod ip from cache", zap.String("podNetwork", oldPod.Status.PodIP)) + p.deleter.DeleteWithDelay(p.ipToPod, oldPod.Status.PodIP) + } + if newPod.Status.PodIP != "" { + p.ipToPod.Store(newPod.Status.PodIP, newPod.Name) + } + } +} + +func (p *podWatcher) onAddOrUpdatePod(pod, oldPod *corev1.Pod) { + if oldPod == nil { + p.handlePodAdd(pod) + } else { + p.handlePodUpdate(pod, oldPod) + } + + workloadAndNamespace := getWorkloadAndNamespace(pod) + + if workloadAndNamespace != "" { + p.podToWorkloadAndNamespace.Store(pod.Name, workloadAndNamespace) + podLabels := mapset.NewSet[string]() + for key, value := range pod.ObjectMeta.Labels { + podLabels.Add(key + "=" + value) + } + if podLabels.Cardinality() > 0 { + p.workloadAndNamespaceToLabels.Store(workloadAndNamespace, podLabels) + } + if oldPod == nil { + p.workloadPodCount[workloadAndNamespace]++ + p.logger.Debug("Added pod", zap.String("pod", pod.Name), zap.String("workload", workloadAndNamespace), zap.Int("count", p.workloadPodCount[workloadAndNamespace])) + } + } +} + +func (p *podWatcher) onDeletePod(obj interface{}) { + pod := obj.(*corev1.Pod) + if pod.Spec.HostNetwork && pod.Status.HostIP != "" { + p.logger.Debug("deleting host ip from cache", zap.String("hostNetwork", pod.Status.HostIP)) + p.removeHostNetworkRecords(pod) + } + if pod.Status.PodIP != "" { + p.logger.Debug("deleting pod ip from cache", zap.String("podNetwork", pod.Status.PodIP)) + p.deleter.DeleteWithDelay(p.ipToPod, pod.Status.PodIP) + } + + if workloadKey, ok := p.podToWorkloadAndNamespace.Load(pod.Name); ok { + workloadAndNamespace := workloadKey.(string) + p.workloadPodCount[workloadAndNamespace]-- + p.logger.Debug("decrementing pod count", zap.String("workload", workloadAndNamespace), zap.Int("podCount", p.workloadPodCount[workloadAndNamespace])) + if p.workloadPodCount[workloadAndNamespace] == 0 { + p.deleter.DeleteWithDelay(p.workloadAndNamespaceToLabels, workloadAndNamespace) + } + } else { + p.logger.Error("failed to load pod workloadKey", zap.String("pod", pod.Name)) + } + p.deleter.DeleteWithDelay(p.podToWorkloadAndNamespace, pod.Name) +} + +type podWatcher struct { + ipToPod *sync.Map + podToWorkloadAndNamespace *sync.Map + workloadAndNamespaceToLabels *sync.Map + workloadPodCount map[string]int + logger *zap.Logger + informer cache.SharedIndexInformer + deleter Deleter +} + +func newPodWatcher(logger *zap.Logger, sharedInformerFactory informers.SharedInformerFactory, deleter Deleter) *podWatcher { + podInformer := sharedInformerFactory.Core().V1().Pods().Informer() + err := podInformer.SetTransform(minimizePod) + if err != nil { + logger.Error("failed to minimize Pod objects", zap.Error(err)) + } + + return &podWatcher{ + ipToPod: &sync.Map{}, + podToWorkloadAndNamespace: &sync.Map{}, + workloadAndNamespaceToLabels: &sync.Map{}, + workloadPodCount: make(map[string]int), + logger: logger, + informer: podInformer, + deleter: deleter, + } +} + +func (p *podWatcher) run(stopCh chan struct{}) { + p.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + pod := obj.(*corev1.Pod) + p.logger.Debug("list and watch for pod: ADD " + pod.Name) + p.onAddOrUpdatePod(pod, nil) + }, + UpdateFunc: func(oldObj, newObj interface{}) { + pod := newObj.(*corev1.Pod) + oldPod := oldObj.(*corev1.Pod) + p.logger.Debug("list and watch for pods: UPDATE " + pod.Name) + p.onAddOrUpdatePod(pod, oldPod) + }, + DeleteFunc: func(obj interface{}) { + pod := obj.(*corev1.Pod) + p.logger.Debug("list and watch for pods: DELETE " + pod.Name) + p.onDeletePod(obj) + }, + }) + + go p.informer.Run(stopCh) + +} + +func (p *podWatcher) waitForCacheSync(stopCh chan struct{}) { + if !cache.WaitForNamedCacheSync("podWatcher", stopCh, p.informer.HasSynced) { + p.logger.Fatal("timed out waiting for kubernetes pod watcher caches to sync") + } + + p.logger.Info("podWatcher: Cache synced") +} + +// minimizePod removes fields that could contain large objects, and retain essential +// fields needed for IP/name translation. The following fields must be kept: +// - ObjectMeta: Namespace, Name, Labels, OwnerReference +// - Spec: HostNetwork, ContainerPorts +// - Status: PodIP/s, HostIP/s +func minimizePod(obj interface{}) (interface{}, error) { + if pod, ok := obj.(*corev1.Pod); ok { + pod.Annotations = nil + pod.Finalizers = nil + pod.ManagedFields = nil + + pod.Spec.Volumes = nil + pod.Spec.InitContainers = nil + pod.Spec.EphemeralContainers = nil + pod.Spec.ImagePullSecrets = nil + pod.Spec.HostAliases = nil + pod.Spec.SchedulingGates = nil + pod.Spec.ResourceClaims = nil + pod.Spec.Tolerations = nil + pod.Spec.Affinity = nil + + pod.Status.InitContainerStatuses = nil + pod.Status.ContainerStatuses = nil + pod.Status.EphemeralContainerStatuses = nil + + for i := 0; i < len(pod.Spec.Containers); i++ { + c := &pod.Spec.Containers[i] + c.Image = "" + c.Command = nil + c.Args = nil + c.EnvFrom = nil + c.Env = nil + c.Resources = corev1.ResourceRequirements{} + c.VolumeMounts = nil + c.VolumeDevices = nil + c.SecurityContext = nil + } + } + return obj, nil +} diff --git a/plugins/processors/awsapplicationsignals/internal/resolver/podwatcher_test.go b/plugins/processors/awsapplicationsignals/internal/resolver/podwatcher_test.go new file mode 100644 index 0000000000..02b45a1f42 --- /dev/null +++ b/plugins/processors/awsapplicationsignals/internal/resolver/podwatcher_test.go @@ -0,0 +1,517 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package resolver + +import ( + "sync" + "testing" + + mapset "github.com/deckarep/golang-set/v2" + "github.com/stretchr/testify/assert" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func newPodWatcherForTesting(ipToPod, podToWorkloadAndNamespace, workloadAndNamespaceToLabels *sync.Map, workloadPodCount map[string]int) *podWatcher { + logger, _ := zap.NewDevelopment() + return &podWatcher{ + ipToPod: ipToPod, + podToWorkloadAndNamespace: podToWorkloadAndNamespace, + workloadAndNamespaceToLabels: workloadAndNamespaceToLabels, + workloadPodCount: workloadPodCount, + logger: logger, + informer: nil, + deleter: mockDeleter, + } +} + +func TestOnAddOrUpdatePod(t *testing.T) { + t.Run("pod with both PodIP and HostIP", func(t *testing.T) { + ipToPod := &sync.Map{} + podToWorkloadAndNamespace := &sync.Map{} + workloadAndNamespaceToLabels := &sync.Map{} + workloadPodCount := map[string]int{} + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testPod", + Namespace: "testNamespace", + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "ReplicaSet", + Name: "testDeployment-598b89cd8d", + }, + }, + }, + Status: corev1.PodStatus{ + PodIP: "1.2.3.4", + HostIP: "5.6.7.8", + }, + } + + poWatcher := newPodWatcherForTesting(ipToPod, podToWorkloadAndNamespace, workloadAndNamespaceToLabels, workloadPodCount) + poWatcher.onAddOrUpdatePod(pod, nil) + + // Test the mappings in ipToPod + if podName, _ := ipToPod.Load("1.2.3.4"); podName != "testPod" { + t.Errorf("ipToPod was incorrect, got: %s, want: %s.", podName, "testPod") + } + + // Test the mapping in podToWorkloadAndNamespace + if depAndNamespace, _ := podToWorkloadAndNamespace.Load("testPod"); depAndNamespace != "testDeployment@testNamespace" { + t.Errorf("podToWorkloadAndNamespace was incorrect, got: %s, want: %s.", depAndNamespace, "testDeployment@testNamespace") + } + + // Test the count in workloadPodCount + if count := workloadPodCount["testDeployment@testNamespace"]; count != 1 { + t.Errorf("workloadPodCount was incorrect, got: %d, want: %d.", count, 1) + } + }) + + t.Run("pod with only HostIP", func(t *testing.T) { + ipToPod := &sync.Map{} + podToWorkloadAndNamespace := &sync.Map{} + workloadAndNamespaceToLabels := &sync.Map{} + workloadPodCount := map[string]int{} + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testPod", + Namespace: "testNamespace", + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "ReplicaSet", + Name: "testDeployment-7b74958fb8", + }, + }, + }, + Status: corev1.PodStatus{ + HostIP: "5.6.7.8", + }, + Spec: corev1.PodSpec{ + HostNetwork: true, + Containers: []corev1.Container{ + { + Ports: []corev1.ContainerPort{ + { + HostPort: int32(8080), + }, + }, + }, + }, + }, + } + + poWatcher := newPodWatcherForTesting(ipToPod, podToWorkloadAndNamespace, workloadAndNamespaceToLabels, workloadPodCount) + poWatcher.onAddOrUpdatePod(pod, nil) + + // Test the mappings in ipToPod + if podName, _ := ipToPod.Load("5.6.7.8:8080"); podName != "testPod" { + t.Errorf("ipToPod was incorrect, got: %s, want: %s.", podName, "testPod") + } + + // Test the mapping in podToWorkloadAndNamespace + if depAndNamespace, _ := podToWorkloadAndNamespace.Load("testPod"); depAndNamespace != "testDeployment@testNamespace" { + t.Errorf("podToWorkloadAndNamespace was incorrect, got: %s, want: %s.", depAndNamespace, "testDeployment@testNamespace") + } + + // Test the count in workloadPodCount + if count := workloadPodCount["testDeployment@testNamespace"]; count != 1 { + t.Errorf("workloadPodCount was incorrect, got: %d, want: %d.", count, 1) + } + }) + + t.Run("pod updated with different set of labels", func(t *testing.T) { + ipToPod := &sync.Map{} + podToWorkloadAndNamespace := &sync.Map{} + workloadAndNamespaceToLabels := &sync.Map{} + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testPod", + Namespace: "testNamespace", + Labels: map[string]string{ + "label1": "value1", + "label2": "value2", + }, + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "ReplicaSet", + Name: "testDeployment-5d68bc5f49", + }, + }, + }, + Status: corev1.PodStatus{ + HostIP: "5.6.7.8", + }, + Spec: corev1.PodSpec{ + HostNetwork: true, + Containers: []corev1.Container{ + { + Ports: []corev1.ContainerPort{ + {HostPort: 8080}, + }, + }, + }, + }, + } + + // add the pod + poWatcher := newPodWatcherForTesting(ipToPod, podToWorkloadAndNamespace, workloadAndNamespaceToLabels, map[string]int{}) + poWatcher.onAddOrUpdatePod(pod, nil) + + // Test the mappings in ipToPod + if podName, ok := ipToPod.Load("5.6.7.8:8080"); !ok && podName != "testPod" { + t.Errorf("ipToPod[%s] was incorrect, got: %s, want: %s.", "5.6.7.8:8080", podName, "testPod") + } + + // Test the mapping in workloadAndNamespaceToLabels + labels, _ := workloadAndNamespaceToLabels.Load("testDeployment@testNamespace") + expectedLabels := []string{"label1=value1", "label2=value2"} + for _, label := range expectedLabels { + if !labels.(mapset.Set[string]).Contains(label) { + t.Errorf("deploymentAndNamespaceToLabels was incorrect, got: %v, want: %s.", labels, label) + } + } + + pod2 := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testPod", + Namespace: "testNamespace", + Labels: map[string]string{ + "label1": "value1", + "label2": "value2", + "label3": "value3", + }, + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "ReplicaSet", + Name: "testDeployment-5d68bc5f49", + }, + }, + }, + Status: corev1.PodStatus{ + PodIP: "1.2.3.4", + HostIP: "5.6.7.8", + }, + Spec: corev1.PodSpec{ + HostNetwork: true, + Containers: []corev1.Container{ + { + Ports: []corev1.ContainerPort{ + {HostPort: 8080}, + }, + }, + }, + }, + } + + // add the pod + poWatcher.onAddOrUpdatePod(pod2, pod) + + // Test the mappings in ipToPod + if podName, ok := ipToPod.Load("5.6.7.8:8080"); !ok && podName != "testPod" { + t.Errorf("ipToPod[%s] was incorrect, got: %s, want: %s.", "5.6.7.8:8080", podName, "testPod") + } + + if podName, ok := ipToPod.Load("1.2.3.4"); !ok && podName != "testPod" { + t.Errorf("ipToPod[%s] was incorrect, got: %s, want: %s.", "1.2.3.4", podName, "testPod") + } + // Test the mapping in workloadAndNamespaceToLabels + labels, _ = workloadAndNamespaceToLabels.Load("testDeployment@testNamespace") + expectedLabels = []string{"label1=value1", "label2=value2", "label3=value3"} + for _, label := range expectedLabels { + if !labels.(mapset.Set[string]).Contains(label) { + t.Errorf("workloadAndNamespaceToLabels was incorrect, got: %v, want: %s.", labels, label) + } + } + }) +} + +func TestOnDeletePod(t *testing.T) { + t.Run("pod with both PodIP and HostIP", func(t *testing.T) { + ipToPod := &sync.Map{} + podToWorkloadAndNamespace := &sync.Map{} + workloadAndNamespaceToLabels := &sync.Map{} + workloadPodCount := map[string]int{} + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testPod", + Namespace: "testNamespace", + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "ReplicaSet", + Name: "testDeployment-xyz", + }, + }, + }, + Status: corev1.PodStatus{ + PodIP: "1.2.3.4", + HostIP: "5.6.7.8", + }, + } + + // Assume the pod has already been added + ipToPod.Store(pod.Status.PodIP, pod.Name) + ipToPod.Store(pod.Status.HostIP, pod.Name) + podToWorkloadAndNamespace.Store(pod.Name, "testDeployment@testNamespace") + workloadAndNamespaceToLabels.Store("testDeployment@testNamespace", "testLabels") + workloadPodCount["testDeployment@testNamespace"] = 1 + + poWatcher := newPodWatcherForTesting(ipToPod, podToWorkloadAndNamespace, workloadAndNamespaceToLabels, workloadPodCount) + poWatcher.onDeletePod(pod) + + // Test if the entries in ipToPod and podToWorkloadAndNamespace have been deleted + if _, ok := ipToPod.Load("1.2.3.4"); ok { + t.Errorf("ipToPod deletion was incorrect, key: %s still exists", "1.2.3.4") + } + + if _, ok := podToWorkloadAndNamespace.Load("testPod"); ok { + t.Errorf("podToWorkloadAndNamespace deletion was incorrect, key: %s still exists", "testPod") + } + + // Test if the count in workloadPodCount has been decremented and the entry in workloadAndNamespaceToLabels has been deleted + if count := workloadPodCount["testDeployment@testNamespace"]; count != 0 { + t.Errorf("workloadPodCount was incorrect, got: %d, want: %d.", count, 0) + } + + if _, ok := workloadAndNamespaceToLabels.Load("testDeployment@testNamespace"); ok { + t.Errorf("workloadAndNamespaceToLabels deletion was incorrect, key: %s still exists", "testDeployment@testNamespace") + } + }) + + t.Run("pod with only HostIP and some network ports", func(t *testing.T) { + ipToPod := &sync.Map{} + podToWorkloadAndNamespace := &sync.Map{} + workloadAndNamespaceToLabels := &sync.Map{} + workloadPodCount := map[string]int{} + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testPod", + Namespace: "testNamespace", + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "ReplicaSet", + Name: "testDeployment-xyz", + }, + }, + }, + Status: corev1.PodStatus{ + HostIP: "5.6.7.8", + }, + Spec: corev1.PodSpec{ + HostNetwork: true, + Containers: []corev1.Container{ + { + Ports: []corev1.ContainerPort{ + { + HostPort: int32(8080), + }, + }, + }, + }, + }, + } + + // Assume the pod has already been added + ipToPod.Store(pod.Status.HostIP, pod.Name) + ipToPod.Store(pod.Status.HostIP+":8080", pod.Name) + podToWorkloadAndNamespace.Store(pod.Name, "testDeployment@testNamespace") + workloadAndNamespaceToLabels.Store("testDeployment@testNamespace", "testLabels") + workloadPodCount["testDeployment@testNamespace"] = 1 + + poWatcher := newPodWatcherForTesting(ipToPod, podToWorkloadAndNamespace, workloadAndNamespaceToLabels, workloadPodCount) + poWatcher.onDeletePod(pod) + + // Test if the entries in ipToPod and podToWorkloadAndNamespace have been deleted + if _, ok := ipToPod.Load("5.6.7.8:8080"); ok { + t.Errorf("ipToPod deletion was incorrect, key: %s still exists", "5.6.7.8:8080") + } + + if _, ok := podToWorkloadAndNamespace.Load("testPod"); ok { + t.Errorf("podToDeploymentAndNamespace deletion was incorrect, key: %s still exists", "testPod") + } + + // Test if the count in workloadPodCount has been decremented and the entry in workloadAndNamespaceToLabels has been deleted + if count := workloadPodCount["testDeployment@testNamespace"]; count != 0 { + t.Errorf("workloadPodCount was incorrect, got: %d, want: %d.", count, 0) + } + + if _, ok := workloadAndNamespaceToLabels.Load("testDeployment@testNamespace"); ok { + t.Errorf("workloadAndNamespaceToLabels deletion was incorrect, key: %s still exists", "testDeployment@testNamespace") + } + }) +} + +func TestHandlePodUpdate(t *testing.T) { + testCases := []struct { + name string + oldPod *corev1.Pod + newPod *corev1.Pod + initialIPToPod map[string]string + expectedIPToPod map[string]string + }{ + { + name: "Old and New Pod Do Not Use Host Network, Different Pod IPs", + oldPod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mypod", + }, + Status: corev1.PodStatus{ + PodIP: "10.0.0.3", + }, + Spec: corev1.PodSpec{ + HostNetwork: false, + }, + }, + newPod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mypod", + }, + Status: corev1.PodStatus{ + PodIP: "10.0.0.4", + }, + Spec: corev1.PodSpec{ + HostNetwork: false, + }, + }, + initialIPToPod: map[string]string{ + "10.0.0.3": "mypod", + }, + expectedIPToPod: map[string]string{ + "10.0.0.4": "mypod", + }, + }, + { + name: "Old Pod Has Empty PodIP, New Pod Does Not Use Host Network, Non-Empty Pod IP", + oldPod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mypod", + }, + Status: corev1.PodStatus{ + PodIP: "", + }, + Spec: corev1.PodSpec{ + HostNetwork: false, + }, + }, + newPod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mypod", + }, + Status: corev1.PodStatus{ + PodIP: "10.0.0.5", + }, + Spec: corev1.PodSpec{ + HostNetwork: false, + }, + }, + initialIPToPod: map[string]string{}, + expectedIPToPod: map[string]string{ + "10.0.0.5": "mypod", + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ipToPod := &sync.Map{} + // Initialize ipToPod map + for k, v := range tc.initialIPToPod { + ipToPod.Store(k, v) + } + poWatcher := newPodWatcherForTesting(ipToPod, nil, nil, map[string]int{}) + poWatcher.handlePodUpdate(tc.newPod, tc.oldPod) + + // Now validate that ipToPod map has been updated correctly + for key, expectedValue := range tc.expectedIPToPod { + val, ok := ipToPod.Load(key) + if !ok || val.(string) != expectedValue { + t.Errorf("Expected record for %v to be %v, got %v", key, expectedValue, val) + } + } + // Validate that old keys have been removed + for key := range tc.initialIPToPod { + if _, ok := tc.expectedIPToPod[key]; !ok { + if _, ok := ipToPod.Load(key); ok { + t.Errorf("Expected record for %v to be removed, but it was not", key) + } + } + } + }) + } +} + +func TestFilterPodIPFields(t *testing.T) { + meta := metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + Labels: map[string]string{ + "name": "app", + }, + } + pod := &corev1.Pod{ + ObjectMeta: meta, + Spec: corev1.PodSpec{ + HostNetwork: true, + Containers: []corev1.Container{ + {}, + }, + }, + Status: corev1.PodStatus{}, + } + newPod, err := minimizePod(pod) + assert.Nil(t, err) + assert.Empty(t, getHostNetworkPorts(newPod.(*corev1.Pod))) + + podStatus := corev1.PodStatus{ + PodIP: "192.168.0.12", + HostIPs: []corev1.HostIP{ + { + IP: "132.168.3.12", + }, + }, + } + pod = &corev1.Pod{ + ObjectMeta: meta, + Spec: corev1.PodSpec{ + HostNetwork: true, + Containers: []corev1.Container{ + { + Ports: []corev1.ContainerPort{ + {HostPort: 8080}, + }, + }, + }, + }, + Status: podStatus, + } + newPod, err = minimizePod(pod) + assert.Nil(t, err) + assert.Equal(t, "app", newPod.(*corev1.Pod).Labels["name"]) + assert.Equal(t, []string{"8080"}, getHostNetworkPorts(newPod.(*corev1.Pod))) + assert.Equal(t, podStatus, newPod.(*corev1.Pod).Status) + + pod = &corev1.Pod{ + Spec: corev1.PodSpec{ + HostNetwork: true, + Containers: []corev1.Container{ + { + Ports: []corev1.ContainerPort{ + {HostPort: 8080}, + {HostPort: 8081}, + }, + }, + }, + }, + Status: podStatus, + } + newPod, err = minimizePod(pod) + assert.Nil(t, err) + assert.Equal(t, []string{"8080", "8081"}, getHostNetworkPorts(newPod.(*corev1.Pod))) + assert.Equal(t, podStatus, newPod.(*corev1.Pod).Status) +} diff --git a/plugins/processors/awsapplicationsignals/internal/resolver/servicetoworkload.go b/plugins/processors/awsapplicationsignals/internal/resolver/servicetoworkload.go new file mode 100644 index 0000000000..6cdc69f69e --- /dev/null +++ b/plugins/processors/awsapplicationsignals/internal/resolver/servicetoworkload.go @@ -0,0 +1,81 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package resolver + +import ( + "sync" + "time" + + mapset "github.com/deckarep/golang-set/v2" + "go.uber.org/zap" +) + +type serviceToWorkloadMapper struct { + serviceAndNamespaceToSelectors *sync.Map + workloadAndNamespaceToLabels *sync.Map + serviceToWorkload *sync.Map + logger *zap.Logger + deleter Deleter +} + +func newServiceToWorkloadMapper(serviceAndNamespaceToSelectors, workloadAndNamespaceToLabels, serviceToWorkload *sync.Map, logger *zap.Logger, deleter Deleter) *serviceToWorkloadMapper { + return &serviceToWorkloadMapper{ + serviceAndNamespaceToSelectors: serviceAndNamespaceToSelectors, + workloadAndNamespaceToLabels: workloadAndNamespaceToLabels, + serviceToWorkload: serviceToWorkload, + logger: logger, + deleter: deleter, + } +} + +func (m *serviceToWorkloadMapper) mapServiceToWorkload() { + m.logger.Debug("Map service to workload at:", zap.Time("time", time.Now())) + + m.serviceAndNamespaceToSelectors.Range(func(key, value interface{}) bool { + var workloads []string + serviceAndNamespace := key.(string) + _, serviceNamespace := extractResourceAndNamespace(serviceAndNamespace) + serviceLabels := value.(mapset.Set[string]) + + m.workloadAndNamespaceToLabels.Range(func(workloadKey, labelsValue interface{}) bool { + labels := labelsValue.(mapset.Set[string]) + workloadAndNamespace := workloadKey.(string) + _, workloadNamespace := extractResourceAndNamespace(workloadAndNamespace) + if workloadNamespace == serviceNamespace && workloadNamespace != "" && serviceLabels.IsSubset(labels) { + m.logger.Debug("Found workload for service", zap.String("service", serviceAndNamespace), zap.String("workload", workloadAndNamespace)) + workloads = append(workloads, workloadAndNamespace) + } + + return true + }) + + if len(workloads) > 1 { + m.logger.Info("Multiple workloads found for service. You will get unexpected results.", zap.String("service", serviceAndNamespace), zap.Strings("workloads", workloads)) + } else if len(workloads) == 1 { + m.serviceToWorkload.Store(serviceAndNamespace, workloads[0]) + } else { + m.logger.Debug("No workload found for service", zap.String("service", serviceAndNamespace)) + m.deleter.DeleteWithDelay(m.serviceToWorkload, serviceAndNamespace) + } + return true + }) +} + +func (m *serviceToWorkloadMapper) Start(stopCh chan struct{}) { + // do the first mapping immediately + m.mapServiceToWorkload() + m.logger.Debug("First-time map service to workload at:", zap.Time("time", time.Now())) + + go func() { + for { + select { + case <-stopCh: + return + case <-time.After(time.Minute + 30*time.Second): + m.mapServiceToWorkload() + m.logger.Debug("Map service to workload at:", zap.Time("time", time.Now())) + } + } + }() +} diff --git a/plugins/processors/awsapplicationsignals/internal/resolver/servicetoworkload_test.go b/plugins/processors/awsapplicationsignals/internal/resolver/servicetoworkload_test.go new file mode 100644 index 0000000000..b2589542e1 --- /dev/null +++ b/plugins/processors/awsapplicationsignals/internal/resolver/servicetoworkload_test.go @@ -0,0 +1,101 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package resolver + +import ( + "sync" + "testing" + "time" + + mapset "github.com/deckarep/golang-set/v2" + "go.uber.org/zap" +) + +func TestMapServiceToWorkload(t *testing.T) { + logger, _ := zap.NewDevelopment() + + serviceAndNamespaceToSelectors := &sync.Map{} + workloadAndNamespaceToLabels := &sync.Map{} + serviceToWorkload := &sync.Map{} + + serviceAndNamespaceToSelectors.Store("service1@namespace1", mapset.NewSet("label1=value1", "label2=value2")) + workloadAndNamespaceToLabels.Store("deployment1@namespace1", mapset.NewSet("label1=value1", "label2=value2", "label3=value3")) + + mapper := newServiceToWorkloadMapper(serviceAndNamespaceToSelectors, workloadAndNamespaceToLabels, serviceToWorkload, logger, mockDeleter) + mapper.mapServiceToWorkload() + + if _, ok := serviceToWorkload.Load("service1@namespace1"); !ok { + t.Errorf("Expected service1@namespace1 to be mapped to a workload, but it was not") + } +} + +func TestMapServiceToWorkload_NoWorkload(t *testing.T) { + logger, _ := zap.NewDevelopment() + + serviceAndNamespaceToSelectors := &sync.Map{} + workloadAndNamespaceToLabels := &sync.Map{} + serviceToWorkload := &sync.Map{} + + // Add a service with no matching workload + serviceAndNamespace := "service@namespace" + serviceAndNamespaceToSelectors.Store(serviceAndNamespace, mapset.NewSet("label1=value1")) + serviceToWorkload.Store(serviceAndNamespace, "workload@namespace") + + mapper := newServiceToWorkloadMapper(serviceAndNamespaceToSelectors, workloadAndNamespaceToLabels, serviceToWorkload, logger, mockDeleter) + mapper.mapServiceToWorkload() + + // Check that the service was deleted from serviceToWorkload + if _, ok := serviceToWorkload.Load(serviceAndNamespace); ok { + t.Errorf("Service was not deleted from serviceToWorkload") + } +} + +func TestMapServiceToWorkload_MultipleWorkloads(t *testing.T) { + logger, _ := zap.NewDevelopment() + + serviceAndNamespaceToSelectors := &sync.Map{} + workloadAndNamespaceToLabels := &sync.Map{} + serviceToWorkload := &sync.Map{} + + serviceAndNamespace := "service@namespace" + serviceAndNamespaceToSelectors.Store(serviceAndNamespace, mapset.NewSet("label1=value1", "label2=value2")) + + // Add two workloads with matching labels to the service + workloadAndNamespaceToLabels.Store("workload1@namespace", mapset.NewSet("label1=value1", "label2=value2", "label3=value3")) + workloadAndNamespaceToLabels.Store("workload2@namespace", mapset.NewSet("label1=value1", "label2=value2", "label4=value4")) + + mapper := newServiceToWorkloadMapper(serviceAndNamespaceToSelectors, workloadAndNamespaceToLabels, serviceToWorkload, logger, mockDeleter) + mapper.mapServiceToWorkload() + + // Check that the service does not map to any workload + if _, ok := serviceToWorkload.Load(serviceAndNamespace); ok { + t.Errorf("Unexpected mapping of service to multiple workloads") + } +} + +func TestStopsWhenSignaled(t *testing.T) { + logger, _ := zap.NewDevelopment() + + serviceAndNamespaceToSelectors := &sync.Map{} + workloadAndNamespaceToLabels := &sync.Map{} + serviceToWorkload := &sync.Map{} + + stopchan := make(chan struct{}) + + // Signal the stopchan to stop after 100 milliseconds + time.AfterFunc(100*time.Millisecond, func() { + close(stopchan) + }) + + mapper := newServiceToWorkloadMapper(serviceAndNamespaceToSelectors, workloadAndNamespaceToLabels, serviceToWorkload, logger, mockDeleter) + + start := time.Now() + mapper.Start(stopchan) + duration := time.Since(start) + + // Check that the function stopped in a reasonable time after the stop signal + if duration > 200*time.Millisecond { + t.Errorf("mapServiceToWorkload did not stop in a reasonable time after the stop signal, duration: %v", duration) + } +} diff --git a/plugins/processors/awsapplicationsignals/internal/resolver/servicewatcher.go b/plugins/processors/awsapplicationsignals/internal/resolver/servicewatcher.go new file mode 100644 index 0000000000..54207119cc --- /dev/null +++ b/plugins/processors/awsapplicationsignals/internal/resolver/servicewatcher.go @@ -0,0 +1,121 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package resolver + +import ( + "sync" + + mapset "github.com/deckarep/golang-set/v2" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/informers" + "k8s.io/client-go/tools/cache" +) + +type serviceWatcher struct { + ipToServiceAndNamespace *sync.Map + serviceAndNamespaceToSelectors *sync.Map + logger *zap.Logger + informer cache.SharedIndexInformer + deleter Deleter +} + +func newServiceWatcher(logger *zap.Logger, sharedInformerFactory informers.SharedInformerFactory, deleter Deleter) *serviceWatcher { + serviceInformer := sharedInformerFactory.Core().V1().Services().Informer() + err := serviceInformer.SetTransform(minimizeService) + if err != nil { + logger.Error("failed to minimize Service objects", zap.Error(err)) + } + + return &serviceWatcher{ + ipToServiceAndNamespace: &sync.Map{}, + serviceAndNamespaceToSelectors: &sync.Map{}, + logger: logger, + informer: serviceInformer, + deleter: deleter, + } +} + +func (s *serviceWatcher) Run(stopCh chan struct{}) { + s.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + service := obj.(*corev1.Service) + s.logger.Debug("list and watch for services: ADD " + service.Name) + s.onAddOrUpdateService(service) + }, + UpdateFunc: func(_, newObj interface{}) { + service := newObj.(*corev1.Service) + s.logger.Debug("list and watch for services: UPDATE " + service.Name) + s.onAddOrUpdateService(service) + }, + DeleteFunc: func(obj interface{}) { + service := obj.(*corev1.Service) + s.logger.Debug("list and watch for services: DELETE " + service.Name) + s.onDeleteService(service, s.deleter) + }, + }) + go s.informer.Run(stopCh) +} + +func (s *serviceWatcher) waitForCacheSync(stopCh chan struct{}) { + if !cache.WaitForNamedCacheSync("serviceWatcher", stopCh, s.informer.HasSynced) { + s.logger.Fatal("timed out waiting for kubernetes service watcher caches to sync") + } + + s.logger.Info("serviceWatcher: Cache synced") +} + +func (s *serviceWatcher) onAddOrUpdateService(service *corev1.Service) { + // service can also have an external IP (or ingress IP) that could be accessed + // this field can be either an IP address (in some edge case) or a hostname (see "EXTERNAL-IP" column in "k get svc" output) + // [ec2-user@ip-172-31-11-104 one-step]$ k get svc -A + // NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + // default pet-clinic-frontend ClusterIP 10.100.216.182 8080/TCP 108m + // default vets-service ClusterIP 10.100.62.167 8083/TCP 108m + // default visits-service ClusterIP 10.100.96.5 8082/TCP 108m + // ingress-nginx default-http-backend ClusterIP 10.100.11.231 80/TCP 108m + // ingress-nginx ingress-nginx LoadBalancer 10.100.154.5 aex7997ece08c435dbd2b912fd5aa5bd-5372117830.xxxxx.elb.amazonaws.com 80:32080/TCP,443:32081/TCP,9113:30410/TCP 108m + // kube-system kube-dns ClusterIP 10.100.0.10 + // + // we ignore such case for now and may need to consider it in the future + if service.Spec.ClusterIP != "" && service.Spec.ClusterIP != corev1.ClusterIPNone { + s.ipToServiceAndNamespace.Store(service.Spec.ClusterIP, getServiceAndNamespace(service)) + } + labelSet := mapset.NewSet[string]() + for key, value := range service.Spec.Selector { + labelSet.Add(key + "=" + value) + } + if labelSet.Cardinality() > 0 { + s.serviceAndNamespaceToSelectors.Store(getServiceAndNamespace(service), labelSet) + } +} + +func (s *serviceWatcher) onDeleteService(service *corev1.Service, deleter Deleter) { + if service.Spec.ClusterIP != "" && service.Spec.ClusterIP != corev1.ClusterIPNone { + deleter.DeleteWithDelay(s.ipToServiceAndNamespace, service.Spec.ClusterIP) + } + deleter.DeleteWithDelay(s.serviceAndNamespaceToSelectors, getServiceAndNamespace(service)) +} + +// minimizeService removes fields that could contain large objects, and retain essential +// fields needed for IP/name translation. The following fields must be kept: +// - ObjectMeta: Namespace, Name +// - Spec: Selectors, ClusterIP +func minimizeService(obj interface{}) (interface{}, error) { + if svc, ok := obj.(*corev1.Service); ok { + svc.Annotations = nil + svc.Finalizers = nil + svc.ManagedFields = nil + + svc.Spec.LoadBalancerSourceRanges = nil + svc.Spec.SessionAffinityConfig = nil + svc.Spec.IPFamilies = nil + svc.Spec.IPFamilyPolicy = nil + svc.Spec.InternalTrafficPolicy = nil + svc.Spec.InternalTrafficPolicy = nil + + svc.Status.Conditions = nil + } + return obj, nil +} diff --git a/plugins/processors/awsapplicationsignals/internal/resolver/servicewatcher_test.go b/plugins/processors/awsapplicationsignals/internal/resolver/servicewatcher_test.go new file mode 100644 index 0000000000..9e2bbdeeb0 --- /dev/null +++ b/plugins/processors/awsapplicationsignals/internal/resolver/servicewatcher_test.go @@ -0,0 +1,106 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package resolver + +import ( + "sync" + "testing" + + mapset "github.com/deckarep/golang-set/v2" + "github.com/stretchr/testify/assert" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func newServiceWatcherForTesting(ipToServiceAndNamespace, serviceAndNamespaceToSelectors *sync.Map) *serviceWatcher { + logger, _ := zap.NewDevelopment() + return &serviceWatcher{ipToServiceAndNamespace, serviceAndNamespaceToSelectors, logger, nil, nil} +} + +func TestOnAddOrUpdateService(t *testing.T) { + // Create a fake service + service := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "myservice", + Namespace: "mynamespace", + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "1.2.3.4", + Selector: map[string]string{ + "app": "myapp", + }, + }, + } + + // Create the maps + ipToServiceAndNamespace := &sync.Map{} + serviceAndNamespaceToSelectors := &sync.Map{} + + // Call the function + svcWatcher := newServiceWatcherForTesting(ipToServiceAndNamespace, serviceAndNamespaceToSelectors) + svcWatcher.onAddOrUpdateService(service) + + // Check that the maps contain the expected entries + if _, ok := ipToServiceAndNamespace.Load("1.2.3.4"); !ok { + t.Errorf("ipToServiceAndNamespace does not contain the service IP") + } + if _, ok := serviceAndNamespaceToSelectors.Load("myservice@mynamespace"); !ok { + t.Errorf("serviceAndNamespaceToSelectors does not contain the service") + } +} + +func TestOnDeleteService(t *testing.T) { + // Create a fake service + service := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "myservice", + Namespace: "mynamespace", + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "1.2.3.4", + Selector: map[string]string{ + "app": "myapp", + }, + }, + } + + // Create the maps and add the service to them + ipToServiceAndNamespace := &sync.Map{} + ipToServiceAndNamespace.Store("1.2.3.4", "myservice@mynamespace") + serviceAndNamespaceToSelectors := &sync.Map{} + serviceAndNamespaceToSelectors.Store("myservice@mynamespace", mapset.NewSet("app=myapp")) + + // Call the function + svcWatcher := newServiceWatcherForTesting(ipToServiceAndNamespace, serviceAndNamespaceToSelectors) + svcWatcher.onDeleteService(service, mockDeleter) + + // Check that the maps do not contain the service + if _, ok := ipToServiceAndNamespace.Load("1.2.3.4"); ok { + t.Errorf("ipToServiceAndNamespace still contains the service IP") + } + if _, ok := serviceAndNamespaceToSelectors.Load("myservice@mynamespace"); ok { + t.Errorf("serviceAndNamespaceToSelectors still contains the service") + } +} + +func TestFilterServiceIPFields(t *testing.T) { + meta := metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + } + svc := &corev1.Service{ + ObjectMeta: meta, + Spec: corev1.ServiceSpec{ + Selector: map[string]string{ + "name": "app", + }, + ClusterIP: "10.0.12.4", + }, + } + newSvc, err := minimizeService(svc) + assert.Nil(t, err) + assert.Equal(t, "10.0.12.4", newSvc.(*corev1.Service).Spec.ClusterIP) + assert.Equal(t, "app", newSvc.(*corev1.Service).Spec.Selector["name"]) +} diff --git a/plugins/processors/awsentity/factory.go b/plugins/processors/awsentity/factory.go index 32887ffa6a..d530e9d31b 100644 --- a/plugins/processors/awsentity/factory.go +++ b/plugins/processors/awsentity/factory.go @@ -35,7 +35,7 @@ func createDefaultConfig() component.Config { func createMetricsProcessor( ctx context.Context, - set processor.CreateSettings, + set processor.Settings, cfg component.Config, nextConsumer consumer.Metrics, ) (processor.Metrics, error) { @@ -45,7 +45,7 @@ func createMetricsProcessor( } metricsProcessor := newAwsEntityProcessor(processorConfig, set.Logger) - return processorhelper.NewMetricsProcessor( + return processorhelper.NewMetrics( ctx, set, cfg, diff --git a/plugins/processors/awsentity/factory_test.go b/plugins/processors/awsentity/factory_test.go index a00799bc55..9a2634ae01 100644 --- a/plugins/processors/awsentity/factory_test.go +++ b/plugins/processors/awsentity/factory_test.go @@ -9,9 +9,9 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/pipeline" "go.opentelemetry.io/collector/processor/processortest" ) @@ -29,17 +29,17 @@ func TestCreateProcessor(t *testing.T) { require.NotNil(t, factory) cfg := factory.CreateDefaultConfig() - setting := processortest.NewNopCreateSettings() + setting := processortest.NewNopSettings() - tProcessor, err := factory.CreateTracesProcessor(context.Background(), setting, cfg, consumertest.NewNop()) - assert.Equal(t, err, component.ErrDataTypeIsNotSupported) + tProcessor, err := factory.CreateTraces(context.Background(), setting, cfg, consumertest.NewNop()) + assert.Equal(t, err, pipeline.ErrSignalNotSupported) assert.Nil(t, tProcessor) - mProcessor, err := factory.CreateMetricsProcessor(context.Background(), setting, cfg, consumertest.NewNop()) + mProcessor, err := factory.CreateMetrics(context.Background(), setting, cfg, consumertest.NewNop()) assert.NoError(t, err) assert.NotNil(t, mProcessor) - lProcessor, err := factory.CreateLogsProcessor(context.Background(), setting, cfg, consumertest.NewNop()) - assert.Equal(t, err, component.ErrDataTypeIsNotSupported) + lProcessor, err := factory.CreateLogs(context.Background(), setting, cfg, consumertest.NewNop()) + assert.Equal(t, err, pipeline.ErrSignalNotSupported) assert.Nil(t, lProcessor) } diff --git a/plugins/processors/awsentity/internal/k8sattributescraper/k8sattributescraper.go b/plugins/processors/awsentity/internal/k8sattributescraper/k8sattributescraper.go index a563f3da96..2a14f7fe28 100644 --- a/plugins/processors/awsentity/internal/k8sattributescraper/k8sattributescraper.go +++ b/plugins/processors/awsentity/internal/k8sattributescraper/k8sattributescraper.go @@ -6,6 +6,8 @@ package k8sattributescraper import ( "go.opentelemetry.io/collector/pdata/pcommon" semconv "go.opentelemetry.io/collector/semconv/v1.22.0" + + "github.com/aws/amazon-cloudwatch-agent/internal/k8sCommon/k8sclient" ) var ( @@ -44,14 +46,19 @@ func NewK8sAttributeScraper(clusterName string) *K8sAttributeScraper { } } -func (e *K8sAttributeScraper) Scrape(rm pcommon.Resource) { +func (e *K8sAttributeScraper) Scrape(rm pcommon.Resource, podMeta k8sclient.PodMetadata) { resourceAttrs := rm.Attributes() - e.scrapeNamespace(resourceAttrs) - e.scrapeWorkload(resourceAttrs) - e.scrapeNode(resourceAttrs) + e.scrapeNamespace(resourceAttrs, podMeta.Namespace) + e.scrapeWorkload(resourceAttrs, podMeta.Workload) + e.scrapeNode(resourceAttrs, podMeta.Node) } -func (e *K8sAttributeScraper) scrapeNamespace(p pcommon.Map) { +func (e *K8sAttributeScraper) scrapeNamespace(p pcommon.Map, ns string) { + if ns != "" { + e.Namespace = ns + return + } + for _, namespace := range namespaceAllowlist { if namespaceAttr, ok := p.Get(namespace); ok { e.Namespace = namespaceAttr.Str() @@ -60,7 +67,12 @@ func (e *K8sAttributeScraper) scrapeNamespace(p pcommon.Map) { } } -func (e *K8sAttributeScraper) scrapeWorkload(p pcommon.Map) { +func (e *K8sAttributeScraper) scrapeWorkload(p pcommon.Map, wl string) { + if wl != "" { + e.Workload = wl + return + } + for _, workload := range workloadAllowlist { if workloadAttr, ok := p.Get(workload); ok { e.Workload = workloadAttr.Str() @@ -70,7 +82,12 @@ func (e *K8sAttributeScraper) scrapeWorkload(p pcommon.Map) { } -func (e *K8sAttributeScraper) scrapeNode(p pcommon.Map) { +func (e *K8sAttributeScraper) scrapeNode(p pcommon.Map, nd string) { + if nd != "" { + e.Node = nd + return + } + for _, node := range nodeAllowlist { if nodeAttr, ok := p.Get(node); ok { e.Node = nodeAttr.Str() diff --git a/plugins/processors/awsentity/internal/k8sattributescraper/k8sattributescraper_test.go b/plugins/processors/awsentity/internal/k8sattributescraper/k8sattributescraper_test.go index 81e46b35b3..3261b34066 100644 --- a/plugins/processors/awsentity/internal/k8sattributescraper/k8sattributescraper_test.go +++ b/plugins/processors/awsentity/internal/k8sattributescraper/k8sattributescraper_test.go @@ -10,31 +10,38 @@ import ( "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" semconv "go.opentelemetry.io/collector/semconv/v1.22.0" + + "github.com/aws/amazon-cloudwatch-agent/internal/k8sCommon/k8sclient" ) func TestNewK8sAttributeScraper(t *testing.T) { scraper := NewK8sAttributeScraper("test") assert.Equal(t, "test", scraper.Cluster) + assert.Empty(t, scraper.Namespace) + assert.Empty(t, scraper.Workload) + assert.Empty(t, scraper.Node) } func Test_k8sattributescraper_Scrape(t *testing.T) { - tests := []struct { name string clusterName string args pcommon.Resource + podMeta k8sclient.PodMetadata want *K8sAttributeScraper }{ { name: "Empty", clusterName: "", args: pcommon.NewResource(), + podMeta: k8sclient.PodMetadata{}, want: &K8sAttributeScraper{}, }, { name: "ClusterOnly", clusterName: "test-cluster", args: pcommon.NewResource(), + podMeta: k8sclient.PodMetadata{}, want: &K8sAttributeScraper{ Cluster: "test-cluster", }, @@ -42,7 +49,12 @@ func Test_k8sattributescraper_Scrape(t *testing.T) { { name: "AllAppSignalAttributes", clusterName: "test-cluster", - args: generateResourceMetrics(semconv.AttributeK8SNamespaceName, "test-namespace", semconv.AttributeK8SDeploymentName, "test-workload", semconv.AttributeK8SNodeName, "test-node"), + args: generateResourceMetrics( + semconv.AttributeK8SNamespaceName, "test-namespace", + semconv.AttributeK8SDeploymentName, "test-workload", + semconv.AttributeK8SNodeName, "test-node", + ), + podMeta: k8sclient.PodMetadata{}, want: &K8sAttributeScraper{ Cluster: "test-cluster", Namespace: "test-namespace", @@ -50,12 +62,67 @@ func Test_k8sattributescraper_Scrape(t *testing.T) { Node: "test-node", }, }, + { + name: "PodMetadataOnly", + clusterName: "my-cluster", + args: pcommon.NewResource(), + podMeta: k8sclient.PodMetadata{ + Namespace: "podmeta-namespace", + Workload: "podmeta-workload", + Node: "podmeta-node", + }, + want: &K8sAttributeScraper{ + Cluster: "my-cluster", + Namespace: "podmeta-namespace", + Workload: "podmeta-workload", + Node: "podmeta-node", + }, + }, + { + name: "MixedResourceAndPodMeta", + clusterName: "test-cluster", + args: generateResourceMetrics( + semconv.AttributeK8SNamespaceName, "resource-namespace", + semconv.AttributeK8SDeploymentName, "resource-workload", + semconv.AttributeK8SNodeName, "resource-node", + ), + podMeta: k8sclient.PodMetadata{ + Workload: "podmeta-workload", + }, + want: &K8sAttributeScraper{ + Cluster: "test-cluster", + Namespace: "resource-namespace", + Workload: "podmeta-workload", + Node: "resource-node", + }, + }, + { + name: "PodMetaOverridesAllResourceAttrs", + clusterName: "test-cluster", + args: generateResourceMetrics( + semconv.AttributeK8SNamespaceName, "resource-namespace", + semconv.AttributeK8SDeploymentName, "resource-workload", + semconv.AttributeK8SNodeName, "resource-node", + ), + podMeta: k8sclient.PodMetadata{ + Namespace: "override-namespace", + Workload: "override-workload", + Node: "override-node", + }, + want: &K8sAttributeScraper{ + Cluster: "test-cluster", + Namespace: "override-namespace", + Workload: "override-workload", + Node: "override-node", + }, + }, } + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { e := NewK8sAttributeScraper(tt.clusterName) - e.Scrape(tt.args) - assert.Equal(t, e, tt.want) + e.Scrape(tt.args, tt.podMeta) + assert.Equal(t, tt.want, e) }) } } @@ -115,15 +182,22 @@ func Test_k8sattributescraper_reset(t *testing.T) { func Test_k8sattributescraper_scrapeNamespace(t *testing.T) { tests := []struct { - name string - args pcommon.Map - want string + name string + nsArg string + args pcommon.Map + want string }{ { name: "Empty", args: getAttributeMap(map[string]any{"": ""}), want: "", }, + { + name: "DirectOverride", + nsArg: "direct-namespace", + args: getAttributeMap(map[string]any{semconv.AttributeK8SNamespaceName: "namespace-name"}), + want: "direct-namespace", + }, { name: "AppSignalNodeExists", args: getAttributeMap(map[string]any{semconv.AttributeK8SNamespaceName: "namespace-name"}), @@ -138,7 +212,7 @@ func Test_k8sattributescraper_scrapeNamespace(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { e := &K8sAttributeScraper{} - e.scrapeNamespace(tt.args) + e.scrapeNamespace(tt.args, tt.nsArg) assert.Equal(t, tt.want, e.Namespace) }) } @@ -146,15 +220,22 @@ func Test_k8sattributescraper_scrapeNamespace(t *testing.T) { func Test_k8sattributescraper_scrapeNode(t *testing.T) { tests := []struct { - name string - args pcommon.Map - want string + name string + ndArg string + args pcommon.Map + want string }{ { name: "Empty", args: getAttributeMap(map[string]any{"": ""}), want: "", }, + { + name: "DirectOverride", + ndArg: "direct-node", + args: getAttributeMap(map[string]any{semconv.AttributeK8SNodeName: "resource-node"}), + want: "direct-node", + }, { name: "AppsignalNodeExists", args: getAttributeMap(map[string]any{semconv.AttributeK8SNodeName: "node-name"}), @@ -169,7 +250,7 @@ func Test_k8sattributescraper_scrapeNode(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { e := &K8sAttributeScraper{} - e.scrapeNode(tt.args) + e.scrapeNode(tt.args, tt.ndArg) assert.Equal(t, tt.want, e.Node) }) } @@ -177,9 +258,10 @@ func Test_k8sattributescraper_scrapeNode(t *testing.T) { func Test_k8sattributescraper_scrapeWorkload(t *testing.T) { tests := []struct { - name string - args pcommon.Map - want string + name string + wlArg string + args pcommon.Map + want string }{ { name: "Empty", @@ -211,18 +293,30 @@ func Test_k8sattributescraper_scrapeWorkload(t *testing.T) { args: getAttributeMap(map[string]any{semconv.AttributeK8SContainerName: "test-container"}), want: "test-container", }, + { + name: "DirectOverride", + wlArg: "direct-workload", + args: getAttributeMap(map[string]any{semconv.AttributeK8SDeploymentName: "resource-workload"}), + want: "direct-workload", + }, { name: "MultipleWorkloads", args: getAttributeMap(map[string]any{ semconv.AttributeK8SDeploymentName: "test-deployment", - semconv.AttributeK8SContainerName: "test-container"}), + semconv.AttributeK8SContainerName: "test-container", + }), want: "test-deployment", }, + { + name: "NoArgNoResource", + args: getAttributeMap(map[string]any{"foo": "bar"}), + want: "", + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { e := &K8sAttributeScraper{} - e.scrapeWorkload(tt.args) + e.scrapeWorkload(tt.args, tt.wlArg) assert.Equal(t, tt.want, e.Workload) }) } diff --git a/plugins/processors/awsentity/processor.go b/plugins/processors/awsentity/processor.go index e42b15f133..6886475414 100644 --- a/plugins/processors/awsentity/processor.go +++ b/plugins/processors/awsentity/processor.go @@ -14,6 +14,8 @@ import ( "go.uber.org/zap" "github.com/aws/amazon-cloudwatch-agent/extension/entitystore" + "github.com/aws/amazon-cloudwatch-agent/extension/k8smetadata" + "github.com/aws/amazon-cloudwatch-agent/internal/k8sCommon/k8sclient" "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsentity/entityattributes" "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsentity/internal/k8sattributescraper" "github.com/aws/amazon-cloudwatch-agent/plugins/processors/ec2tagger" @@ -31,7 +33,7 @@ const ( ) type scraper interface { - Scrape(rm pcommon.Resource) + Scrape(rm pcommon.Resource, podMeta k8sclient.PodMetadata) Reset() } @@ -104,6 +106,21 @@ var getServiceNameSource = func() (string, string) { return es.GetMetricServiceNameAndSource() } +var getPodMeta = func(ctx context.Context) k8sclient.PodMetadata { + podMeta := k8sclient.PodMetadata{} + k8sMetadata := k8smetadata.GetKubernetesMetadata() + + if k8sMetadata != nil { + podIP := "" + + // Get the pod IP from the context + + podMeta = k8sMetadata.GetPodMetadata(podIP) + } + + return podMeta +} + // awsEntityProcessor looks for metrics that have the aws.log.group.names and either the service.name or // deployment.environment resource attributes set, then adds the association between the log group(s) and the // service/environment names to the entitystore extension. @@ -121,7 +138,7 @@ func newAwsEntityProcessor(config *Config, logger *zap.Logger) *awsEntityProcess } } -func (p *awsEntityProcessor) processMetrics(_ context.Context, md pmetric.Metrics) (pmetric.Metrics, error) { +func (p *awsEntityProcessor) processMetrics(ctx context.Context, md pmetric.Metrics) (pmetric.Metrics, error) { // Get the following metric attributes from the EntityStore: PlatformType, EC2.InstanceId, EC2.AutoScalingGroup rm := md.ResourceMetrics() @@ -176,7 +193,7 @@ func (p *awsEntityProcessor) processMetrics(_ context.Context, md pmetric.Metric } } if p.config.KubernetesMode != "" { - p.k8sscraper.Scrape(rm.At(i).Resource()) + p.k8sscraper.Scrape(rm.At(i).Resource(), getPodMeta(ctx)) if p.config.Platform == config.ModeEC2 { ec2Info = getEC2InfoFromEntityStore() } diff --git a/plugins/processors/ec2tagger/README.md b/plugins/processors/ec2tagger/README.md index 32777b2df5..2dbecd9a87 100644 --- a/plugins/processors/ec2tagger/README.md +++ b/plugins/processors/ec2tagger/README.md @@ -30,7 +30,8 @@ The IAM User or Role making the calls must have permissions to call the EC2 Desc The following receiver configuration parameters are supported. | Name | Description | Supported Value | Default | |--------------------------| ---------------------------------------------------------------------------------------------------------------| -----------------------------------------| --------| -|`refresh_interval_seconds`| is the frequency for the plugin to refresh the EC2 Instance Tags and ebs Volumes associated with this Instance.| "0s" | "0s" | +|`refresh_tags_interval` | is the frequency for the plugin to refresh the EC2 Instance Tags associated with this Instance. | "0s" | "0s" | +|`refresh_volumes_interval`| is the frequency for the plugin to refresh the EBS Volumes associated with this Instance. | "0s" | "0s" | |`ec2_metadata_tags` | is the option to specify which tags to be scraped from IMDS and add to datapoint attributes | ["InstanceId", "ImageId", "InstanceType"]| [] | |`ec2_instance_tag_keys` | is the option to specific which EC2 Instance tags to be scraped associated with this instance. | ["aws:autoscaling:groupName", "Name"] | [] | |`disk_device_tag_key` | is the option to Specify which tags to use to get the specified disk device name from input metric | [] | [] | diff --git a/plugins/processors/ec2tagger/config.go b/plugins/processors/ec2tagger/config.go index 9119139567..733c8320ba 100644 --- a/plugins/processors/ec2tagger/config.go +++ b/plugins/processors/ec2tagger/config.go @@ -22,7 +22,8 @@ const ( ) type Config struct { - RefreshIntervalSeconds time.Duration `mapstructure:"refresh_interval_seconds"` + RefreshTagsInterval time.Duration `mapstructure:"refresh_tags_interval"` + RefreshVolumesInterval time.Duration `mapstructure:"refresh_volumes_interval"` EC2MetadataTags []string `mapstructure:"ec2_metadata_tags"` EC2InstanceTagKeys []string `mapstructure:"ec2_instance_tag_keys"` EBSDeviceKeys []string `mapstructure:"ebs_device_keys,omitempty"` diff --git a/plugins/processors/ec2tagger/ec2tagger.go b/plugins/processors/ec2tagger/ec2tagger.go index 839ea21079..568ca1dc13 100644 --- a/plugins/processors/ec2tagger/ec2tagger.go +++ b/plugins/processors/ec2tagger/ec2tagger.go @@ -197,29 +197,24 @@ func (t *Tagger) Shutdown(context.Context) error { return nil } -// refreshLoop handles the refresh ticks and also responds to shutdown signal -func (t *Tagger) refreshLoop(refreshInterval time.Duration, stopAfterFirstSuccess bool) { +// refreshLoopTags handles the refresh ticks for describe tags and also responds to shutdown signal +func (t *Tagger) refreshLoopTags(refreshInterval time.Duration, stopAfterFirstSuccess bool) { refreshTicker := time.NewTicker(refreshInterval) defer refreshTicker.Stop() for { select { case <-refreshTicker.C: - t.logger.Debug("ec2tagger refreshing") + t.logger.Debug("ec2tagger refreshing tags") allTagsRetrieved := t.ec2TagsRetrieved() - allVolumesRetrieved := t.ebsVolumesRetrieved() t.logger.Debug("Retrieve status", - zap.Bool("Ec2AllTagsRetrieved", allTagsRetrieved), - zap.Bool("EbsAllVolumesRetrieved", allVolumesRetrieved)) + zap.Bool("Ec2AllTagsRetrieved", allTagsRetrieved)) refreshTags := len(t.EC2InstanceTagKeys) > 0 - refreshVolumes := len(t.EBSDeviceKeys) > 0 if stopAfterFirstSuccess { // need refresh tags when it is configured and not all ec2 tags are retrieved refreshTags = refreshTags && !allTagsRetrieved - // need refresh volumes when it is configured and not all volumes are retrieved - refreshVolumes = refreshVolumes && !allVolumesRetrieved - if !refreshTags && !refreshVolumes { - t.logger.Info("ec2tagger: Refresh is no longer needed, stop refreshTicker.") + if !refreshTags { + t.logger.Info("ec2tagger: Refresh for tags is no longer needed, stop refreshTicker.") return } } @@ -230,6 +225,34 @@ func (t *Tagger) refreshLoop(refreshInterval time.Duration, stopAfterFirstSucces } } + case <-t.shutdownC: + return + } + } +} + +// refreshLoopVolumes handles the refresh ticks for describe volumes and also responds to shutdown signal +func (t *Tagger) refreshLoopVolumes(refreshInterval time.Duration, stopAfterFirstSuccess bool) { + refreshTicker := time.NewTicker(refreshInterval) + defer refreshTicker.Stop() + for { + select { + case <-refreshTicker.C: + t.logger.Debug("ec2tagger refreshing volumes") + allVolumesRetrieved := t.ebsVolumesRetrieved() + t.logger.Debug("Retrieve status", + zap.Bool("EbsAllVolumesRetrieved", allVolumesRetrieved)) + refreshVolumes := len(t.EBSDeviceKeys) > 0 + + if stopAfterFirstSuccess { + // need refresh volumes when it is configured and not all volumes are retrieved + refreshVolumes = refreshVolumes && !allVolumesRetrieved + if !refreshVolumes { + t.logger.Info("ec2tagger: Refresh for volumes is no longer needed, stop refreshTicker.") + return + } + } + if refreshVolumes { if err := t.updateVolumes(); err != nil { t.logger.Warn("ec2tagger: Error refreshing EBS volumes, keeping old values", zap.Error(err)) @@ -333,7 +356,8 @@ func (t *Tagger) Start(ctx context.Context, host component.Host) error { go func() { //Async start of initial retrieval to prevent block of agent start t.initialRetrievalOfTagsAndVolumes() - t.refreshLoopToUpdateTagsAndVolumes() + t.refreshLoopToUpdateTags() + t.refreshLoopToUpdateVolumes() }() t.logger.Info("ec2tagger: EC2 tagger has started initialization.") @@ -343,24 +367,49 @@ func (t *Tagger) Start(ctx context.Context, host component.Host) error { return nil } -func (t *Tagger) refreshLoopToUpdateTagsAndVolumes() { +func (t *Tagger) refreshLoopToUpdateTags() { needRefresh := false stopAfterFirstSuccess := false - refreshInterval := t.RefreshIntervalSeconds - if t.RefreshIntervalSeconds.Seconds() == 0 { + refreshInterval := t.RefreshTagsInterval + if refreshInterval.Seconds() == 0 { //when the refresh interval is 0, this means that customer don't want to - //update tags/volumes values once they are retrieved successfully. In this case, + //update tags values once they are retrieved successfully. In this case, //we still want to do refresh to make sure all the specified keys for tags/volumes //are fetched successfully because initial retrieval might not get all of them. //When the specified key is "*", there is no way for us to check if all - //tags/volumes are fetched. So there is no need to do refresh in this case. - needRefresh = !(len(t.EC2InstanceTagKeys) == 1 && t.EC2InstanceTagKeys[0] == "*") || - !(len(t.EBSDeviceKeys) == 1 && t.EBSDeviceKeys[0] == "*") + //tags are fetched. So there is no need to do refresh in this case. + needRefresh = !(len(t.EC2InstanceTagKeys) == 1 && t.EC2InstanceTagKeys[0] == "*") + + stopAfterFirstSuccess = true + refreshInterval = defaultRefreshInterval + } else if refreshInterval.Seconds() > 0 { + //customer wants to update the tags with the given refresh interval + needRefresh = true + } + + if needRefresh { + go func() { + // randomly stagger the time of the first refresh to mitigate throttling if a whole fleet is + // restarted at the same time + sleepUntilHostJitter(refreshInterval) + t.refreshLoopTags(refreshInterval, stopAfterFirstSuccess) + }() + } +} + +func (t *Tagger) refreshLoopToUpdateVolumes() { + needRefresh := false + stopAfterFirstSuccess := false + + refreshInterval := t.RefreshVolumesInterval + if refreshInterval.Seconds() == 0 { + needRefresh = !(len(t.EBSDeviceKeys) == 1 && t.EBSDeviceKeys[0] == "*") + stopAfterFirstSuccess = true refreshInterval = defaultRefreshInterval - } else if t.RefreshIntervalSeconds.Seconds() > 0 { - //customer wants to update the tags/volumes with the given refresh interval + } else if refreshInterval.Seconds() > 0 { + //customer wants to update the volumes with the given refresh interval needRefresh = true } @@ -369,7 +418,7 @@ func (t *Tagger) refreshLoopToUpdateTagsAndVolumes() { // randomly stagger the time of the first refresh to mitigate throttling if a whole fleet is // restarted at the same time sleepUntilHostJitter(refreshInterval) - t.refreshLoop(refreshInterval, stopAfterFirstSuccess) + t.refreshLoopVolumes(refreshInterval, stopAfterFirstSuccess) }() } } diff --git a/plugins/processors/ec2tagger/ec2tagger_test.go b/plugins/processors/ec2tagger/ec2tagger_test.go index 93642e3c6e..0abf7a23de 100644 --- a/plugins/processors/ec2tagger/ec2tagger_test.go +++ b/plugins/processors/ec2tagger/ec2tagger_test.go @@ -268,7 +268,7 @@ func TestStartFailWithNoMetadata(t *testing.T) { _, cancel := context.WithCancel(context.Background()) tagger := &Tagger{ Config: cfg, - logger: processortest.NewNopCreateSettings().Logger, + logger: processortest.NewNopSettings().Logger, cancelFunc: cancel, metadataProvider: &mockMetadataProvider{InstanceIdentityDocument: nil}, volumeSerialCache: &mockVolumeCache{cache: make(map[string]string)}, @@ -282,7 +282,8 @@ func TestStartFailWithNoMetadata(t *testing.T) { // run Start() and check all tags/volumes are retrieved and saved func TestStartSuccessWithNoTagsVolumesUpdate(t *testing.T) { cfg := createDefaultConfig().(*Config) - cfg.RefreshIntervalSeconds = 0 * time.Second + cfg.RefreshTagsInterval = 0 * time.Second + cfg.RefreshVolumesInterval = 0 * time.Second cfg.EC2MetadataTags = []string{mdKeyInstanceId, mdKeyImageId, mdKeyInstanceType} cfg.EC2InstanceTagKeys = []string{tagKey1, tagKey2, "AutoScalingGroupName"} cfg.EBSDeviceKeys = []string{device1, device2} @@ -302,7 +303,7 @@ func TestStartSuccessWithNoTagsVolumesUpdate(t *testing.T) { defaultRefreshInterval = 50 * time.Millisecond tagger := &Tagger{ Config: cfg, - logger: processortest.NewNopCreateSettings().Logger, + logger: processortest.NewNopSettings().Logger, cancelFunc: cancel, metadataProvider: &mockMetadataProvider{InstanceIdentityDocument: mockedInstanceIdentityDoc}, ec2Provider: ec2Provider, @@ -326,7 +327,8 @@ func TestStartSuccessWithNoTagsVolumesUpdate(t *testing.T) { func TestStartSuccessWithTagsVolumesUpdate(t *testing.T) { cfg := createDefaultConfig().(*Config) //use millisecond rather than second to speed up test execution - cfg.RefreshIntervalSeconds = 20 * time.Millisecond + cfg.RefreshTagsInterval = 20 * time.Millisecond + cfg.RefreshVolumesInterval = 20 * time.Millisecond cfg.EC2MetadataTags = []string{mdKeyInstanceId, mdKeyImageId, mdKeyInstanceType} cfg.EC2InstanceTagKeys = []string{tagKey1, tagKey2, "AutoScalingGroupName"} cfg.EBSDeviceKeys = []string{device1, device2} @@ -346,7 +348,7 @@ func TestStartSuccessWithTagsVolumesUpdate(t *testing.T) { tagger := &Tagger{ Config: cfg, - logger: processortest.NewNopCreateSettings().Logger, + logger: processortest.NewNopSettings().Logger, cancelFunc: cancel, metadataProvider: &mockMetadataProvider{InstanceIdentityDocument: mockedInstanceIdentityDoc}, ec2Provider: ec2Provider, @@ -382,7 +384,8 @@ func TestStartSuccessWithTagsVolumesUpdate(t *testing.T) { // check there is no attempt to fetch all tags/volumes func TestStartSuccessWithWildcardTagVolumeKey(t *testing.T) { cfg := createDefaultConfig().(*Config) - cfg.RefreshIntervalSeconds = 0 * time.Second + cfg.RefreshTagsInterval = 0 * time.Second + cfg.RefreshVolumesInterval = 0 * time.Second cfg.EC2MetadataTags = []string{mdKeyInstanceId, mdKeyImageId, mdKeyInstanceType} cfg.EC2InstanceTagKeys = []string{"*"} cfg.EBSDeviceKeys = []string{"*"} @@ -401,7 +404,7 @@ func TestStartSuccessWithWildcardTagVolumeKey(t *testing.T) { defaultRefreshInterval = 50 * time.Millisecond tagger := &Tagger{ Config: cfg, - logger: processortest.NewNopCreateSettings().Logger, + logger: processortest.NewNopSettings().Logger, cancelFunc: cancel, metadataProvider: &mockMetadataProvider{InstanceIdentityDocument: mockedInstanceIdentityDoc}, ec2Provider: ec2Provider, @@ -426,7 +429,8 @@ func TestStartSuccessWithWildcardTagVolumeKey(t *testing.T) { func TestApplyWithTagsVolumesUpdate(t *testing.T) { cfg := createDefaultConfig().(*Config) //use millisecond rather than second to speed up test execution - cfg.RefreshIntervalSeconds = 20 * time.Millisecond + cfg.RefreshTagsInterval = 20 * time.Millisecond + cfg.RefreshVolumesInterval = 20 * time.Millisecond cfg.EC2MetadataTags = []string{mdKeyInstanceId, mdKeyImageId, mdKeyInstanceType} cfg.EC2InstanceTagKeys = []string{tagKey1, tagKey2, "AutoScalingGroupName"} cfg.EBSDeviceKeys = []string{device1, device2} @@ -446,7 +450,7 @@ func TestApplyWithTagsVolumesUpdate(t *testing.T) { defaultRefreshInterval = 50 * time.Millisecond tagger := &Tagger{ Config: cfg, - logger: processortest.NewNopCreateSettings().Logger, + logger: processortest.NewNopSettings().Logger, cancelFunc: cancel, metadataProvider: &mockMetadataProvider{InstanceIdentityDocument: mockedInstanceIdentityDoc}, ec2Provider: ec2Provider, @@ -520,7 +524,8 @@ func TestApplyWithTagsVolumesUpdate(t *testing.T) { // Test metrics are dropped before the initial retrieval is done func TestMetricsDroppedBeforeStarted(t *testing.T) { cfg := createDefaultConfig().(*Config) - cfg.RefreshIntervalSeconds = 0 * time.Millisecond + cfg.RefreshTagsInterval = 0 * time.Millisecond + cfg.RefreshVolumesInterval = 0 * time.Millisecond cfg.EC2MetadataTags = []string{mdKeyInstanceId, mdKeyImageId, mdKeyInstanceType} cfg.EC2InstanceTagKeys = []string{"*"} cfg.EBSDeviceKeys = []string{"*"} @@ -539,7 +544,7 @@ func TestMetricsDroppedBeforeStarted(t *testing.T) { defaultRefreshInterval = 50 * time.Millisecond tagger := &Tagger{ Config: cfg, - logger: processortest.NewNopCreateSettings().Logger, + logger: processortest.NewNopSettings().Logger, cancelFunc: cancel, metadataProvider: &mockMetadataProvider{InstanceIdentityDocument: mockedInstanceIdentityDoc}, ec2Provider: ec2Provider, @@ -585,7 +590,8 @@ func TestMetricsDroppedBeforeStarted(t *testing.T) { // Test ec2tagger Start does not block for a long time func TestTaggerStartDoesNotBlock(t *testing.T) { cfg := createDefaultConfig().(*Config) - cfg.RefreshIntervalSeconds = 0 * time.Second + cfg.RefreshTagsInterval = 0 * time.Second + cfg.RefreshVolumesInterval = 0 * time.Second cfg.EC2MetadataTags = []string{mdKeyInstanceId, mdKeyImageId, mdKeyInstanceType} cfg.EC2InstanceTagKeys = []string{"*"} cfg.EBSDeviceKeys = []string{"*"} @@ -603,7 +609,7 @@ func TestTaggerStartDoesNotBlock(t *testing.T) { defaultRefreshInterval = 180 * time.Second tagger := &Tagger{ Config: cfg, - logger: processortest.NewNopCreateSettings().Logger, + logger: processortest.NewNopSettings().Logger, cancelFunc: cancel, metadataProvider: &mockMetadataProvider{InstanceIdentityDocument: mockedInstanceIdentityDoc}, ec2Provider: ec2Provider, @@ -628,13 +634,14 @@ func TestTaggerStartDoesNotBlock(t *testing.T) { // Test ec2tagger Start does not block for a long time func TestTaggerStartsWithoutTagOrVolume(t *testing.T) { cfg := createDefaultConfig().(*Config) - cfg.RefreshIntervalSeconds = 0 * time.Second + cfg.RefreshTagsInterval = 0 * time.Second + cfg.RefreshVolumesInterval = 0 * time.Second cfg.EC2MetadataTags = []string{mdKeyInstanceId, mdKeyImageId, mdKeyInstanceType} _, cancel := context.WithCancel(context.Background()) tagger := &Tagger{ Config: cfg, - logger: processortest.NewNopCreateSettings().Logger, + logger: processortest.NewNopSettings().Logger, cancelFunc: cancel, metadataProvider: &mockMetadataProvider{InstanceIdentityDocument: mockedInstanceIdentityDoc}, volumeSerialCache: &mockVolumeCache{cache: make(map[string]string)}, diff --git a/plugins/processors/ec2tagger/factory.go b/plugins/processors/ec2tagger/factory.go index 72b5576b9d..6b20fcf8f3 100644 --- a/plugins/processors/ec2tagger/factory.go +++ b/plugins/processors/ec2tagger/factory.go @@ -35,7 +35,7 @@ func NewFactory() processor.Factory { func createMetricsProcessor( ctx context.Context, - set processor.CreateSettings, + set processor.Settings, cfg component.Config, nextConsumer consumer.Metrics, ) (processor.Metrics, error) { @@ -46,7 +46,7 @@ func createMetricsProcessor( metricsProcessor := newTagger(processorConfig, set.Logger) - return processorhelper.NewMetricsProcessor(ctx, set, cfg, nextConsumer, + return processorhelper.NewMetrics(ctx, set, cfg, nextConsumer, metricsProcessor.processMetrics, processorhelper.WithCapabilities(processorCapabilities), processorhelper.WithStart(metricsProcessor.Start), diff --git a/plugins/processors/ec2tagger/factory_test.go b/plugins/processors/ec2tagger/factory_test.go index e837577cb3..388f914c1d 100644 --- a/plugins/processors/ec2tagger/factory_test.go +++ b/plugins/processors/ec2tagger/factory_test.go @@ -9,9 +9,9 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/pipeline" "go.opentelemetry.io/collector/processor/processortest" ) @@ -29,17 +29,17 @@ func TestCreateProcessor(t *testing.T) { require.NotNil(t, factory) cfg := factory.CreateDefaultConfig() - setting := processortest.NewNopCreateSettings() + setting := processortest.NewNopSettings() - tProcessor, err := factory.CreateTracesProcessor(context.Background(), setting, cfg, consumertest.NewNop()) - assert.Equal(t, err, component.ErrDataTypeIsNotSupported) + tProcessor, err := factory.CreateTraces(context.Background(), setting, cfg, consumertest.NewNop()) + assert.Equal(t, err, pipeline.ErrSignalNotSupported) assert.Nil(t, tProcessor) - mProcessor, err := factory.CreateMetricsProcessor(context.Background(), setting, cfg, consumertest.NewNop()) + mProcessor, err := factory.CreateMetrics(context.Background(), setting, cfg, consumertest.NewNop()) assert.NoError(t, err) assert.NotNil(t, mProcessor) - lProcessor, err := factory.CreateLogsProcessor(context.Background(), setting, cfg, consumertest.NewNop()) - assert.Equal(t, err, component.ErrDataTypeIsNotSupported) + lProcessor, err := factory.CreateLogs(context.Background(), setting, cfg, consumertest.NewNop()) + assert.Equal(t, err, pipeline.ErrSignalNotSupported) assert.Nil(t, lProcessor) } diff --git a/plugins/processors/gpuattributes/factory.go b/plugins/processors/gpuattributes/factory.go index fcbc4f3950..df7876584d 100644 --- a/plugins/processors/gpuattributes/factory.go +++ b/plugins/processors/gpuattributes/factory.go @@ -35,7 +35,7 @@ func createDefaultConfig() component.Config { func createMetricsProcessor( ctx context.Context, - set processor.CreateSettings, + set processor.Settings, cfg component.Config, nextConsumer consumer.Metrics, ) (processor.Metrics, error) { @@ -46,7 +46,7 @@ func createMetricsProcessor( metricsProcessor := newGpuAttributesProcessor(processorConfig, set.Logger) - return processorhelper.NewMetricsProcessor( + return processorhelper.NewMetrics( ctx, set, cfg, diff --git a/plugins/processors/gpuattributes/factory_test.go b/plugins/processors/gpuattributes/factory_test.go index 7fd46aca74..a933b17af9 100644 --- a/plugins/processors/gpuattributes/factory_test.go +++ b/plugins/processors/gpuattributes/factory_test.go @@ -9,9 +9,9 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/pipeline" "go.opentelemetry.io/collector/processor/processortest" ) @@ -29,17 +29,17 @@ func TestCreateProcessor(t *testing.T) { require.NotNil(t, factory) cfg := factory.CreateDefaultConfig() - setting := processortest.NewNopCreateSettings() + setting := processortest.NewNopSettings() - tProcessor, err := factory.CreateTracesProcessor(context.Background(), setting, cfg, consumertest.NewNop()) - assert.Equal(t, err, component.ErrDataTypeIsNotSupported) + tProcessor, err := factory.CreateTraces(context.Background(), setting, cfg, consumertest.NewNop()) + assert.Equal(t, err, pipeline.ErrSignalNotSupported) assert.Nil(t, tProcessor) - mProcessor, err := factory.CreateMetricsProcessor(context.Background(), setting, cfg, consumertest.NewNop()) + mProcessor, err := factory.CreateMetrics(context.Background(), setting, cfg, consumertest.NewNop()) assert.NoError(t, err) assert.NotNil(t, mProcessor) - lProcessor, err := factory.CreateLogsProcessor(context.Background(), setting, cfg, consumertest.NewNop()) - assert.Equal(t, err, component.ErrDataTypeIsNotSupported) + lProcessor, err := factory.CreateLogs(context.Background(), setting, cfg, consumertest.NewNop()) + assert.Equal(t, err, pipeline.ErrSignalNotSupported) assert.Nil(t, lProcessor) } diff --git a/plugins/processors/kueueattributes/factory.go b/plugins/processors/kueueattributes/factory.go index 511b525e28..5fb51507eb 100644 --- a/plugins/processors/kueueattributes/factory.go +++ b/plugins/processors/kueueattributes/factory.go @@ -35,7 +35,7 @@ func createDefaultConfig() component.Config { func createMetricsProcessor( ctx context.Context, - set processor.CreateSettings, + set processor.Settings, cfg component.Config, nextConsumer consumer.Metrics, ) (processor.Metrics, error) { @@ -46,7 +46,7 @@ func createMetricsProcessor( metricsProcessor := newKueueAttributesProcessor(processorConfig, set.Logger) - return processorhelper.NewMetricsProcessor( + return processorhelper.NewMetrics( ctx, set, cfg, diff --git a/plugins/processors/kueueattributes/factory_test.go b/plugins/processors/kueueattributes/factory_test.go index bc636e35f1..4624111c0f 100644 --- a/plugins/processors/kueueattributes/factory_test.go +++ b/plugins/processors/kueueattributes/factory_test.go @@ -9,9 +9,9 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/pipeline" "go.opentelemetry.io/collector/processor/processortest" ) @@ -29,17 +29,17 @@ func TestCreateProcessor(t *testing.T) { require.NotNil(t, factory) cfg := factory.CreateDefaultConfig() - setting := processortest.NewNopCreateSettings() + setting := processortest.NewNopSettings() - tProcessor, err := factory.CreateTracesProcessor(context.Background(), setting, cfg, consumertest.NewNop()) - assert.Equal(t, err, component.ErrDataTypeIsNotSupported) + tProcessor, err := factory.CreateTraces(context.Background(), setting, cfg, consumertest.NewNop()) + assert.Equal(t, err, pipeline.ErrSignalNotSupported) assert.Nil(t, tProcessor) - mProcessor, err := factory.CreateMetricsProcessor(context.Background(), setting, cfg, consumertest.NewNop()) + mProcessor, err := factory.CreateMetrics(context.Background(), setting, cfg, consumertest.NewNop()) assert.NoError(t, err) assert.NotNil(t, mProcessor) - lProcessor, err := factory.CreateLogsProcessor(context.Background(), setting, cfg, consumertest.NewNop()) - assert.Equal(t, err, component.ErrDataTypeIsNotSupported) + lProcessor, err := factory.CreateLogs(context.Background(), setting, cfg, consumertest.NewNop()) + assert.Equal(t, err, pipeline.ErrSignalNotSupported) assert.Nil(t, lProcessor) } diff --git a/processor/rollupprocessor/factory.go b/processor/rollupprocessor/factory.go index a2d8841773..b988dbc86f 100644 --- a/processor/rollupprocessor/factory.go +++ b/processor/rollupprocessor/factory.go @@ -38,7 +38,7 @@ func createDefaultConfig() component.Config { func createMetricsProcessor( ctx context.Context, - set processor.CreateSettings, + set processor.Settings, cfg component.Config, nextConsumer consumer.Metrics, ) (processor.Metrics, error) { @@ -47,7 +47,7 @@ func createMetricsProcessor( return nil, fmt.Errorf("invalid configuration type: %T", cfg) } metricsProcessor := newProcessor(pCfg) - return processorhelper.NewMetricsProcessor( + return processorhelper.NewMetrics( ctx, set, cfg, diff --git a/processor/rollupprocessor/factory_test.go b/processor/rollupprocessor/factory_test.go index 652efb00a3..cf191c2fe2 100644 --- a/processor/rollupprocessor/factory_test.go +++ b/processor/rollupprocessor/factory_test.go @@ -28,12 +28,12 @@ func TestCreateDefaultConfig(t *testing.T) { func TestCreateProcessor(t *testing.T) { factory := NewFactory() - mp, err := factory.CreateMetricsProcessor(context.Background(), processortest.NewNopCreateSettings(), nil, consumertest.NewNop()) + mp, err := factory.CreateMetrics(context.Background(), processortest.NewNopSettings(), nil, consumertest.NewNop()) assert.Error(t, err) assert.Nil(t, mp) cfg := factory.CreateDefaultConfig().(*Config) - mp, err = factory.CreateMetricsProcessor(context.Background(), processortest.NewNopCreateSettings(), cfg, consumertest.NewNop()) + mp, err = factory.CreateMetrics(context.Background(), processortest.NewNopSettings(), cfg, consumertest.NewNop()) assert.NoError(t, err) assert.NotNil(t, mp) diff --git a/receiver/adapter/factory.go b/receiver/adapter/factory.go index 7840135d65..39410837af 100644 --- a/receiver/adapter/factory.go +++ b/receiver/adapter/factory.go @@ -14,6 +14,7 @@ import ( "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/receiver" "go.opentelemetry.io/collector/receiver/scraperhelper" + otelscraper "go.opentelemetry.io/collector/scraper" ) const ( @@ -52,7 +53,7 @@ func (a Adapter) NewReceiverFactory(telegrafInputName string) receiver.Factory { receiver.WithMetrics(a.createMetricsReceiver, component.StabilityLevelStable)) } -func (a Adapter) createMetricsReceiver(ctx context.Context, settings receiver.CreateSettings, config component.Config, consumer consumer.Metrics) (receiver.Metrics, error) { +func (a Adapter) createMetricsReceiver(ctx context.Context, settings receiver.Settings, config component.Config, consumer consumer.Metrics) (receiver.Metrics, error) { cfg := config.(*Config) input, err := a.initializeInput(settings.ID.Type().String(), settings.ID.Name()) @@ -62,11 +63,10 @@ func (a Adapter) createMetricsReceiver(ctx context.Context, settings receiver.Cr rcvr := newAdaptedReceiver(input, ctx, consumer, settings.Logger) - scraper, err := scraperhelper.NewScraper( - settings.ID.Type().String(), + scraper, err := otelscraper.NewMetrics( rcvr.scrape, - scraperhelper.WithStart(rcvr.start), - scraperhelper.WithShutdown(rcvr.shutdown), + otelscraper.WithStart(rcvr.start), + otelscraper.WithShutdown(rcvr.shutdown), ) if err != nil { @@ -75,7 +75,7 @@ func (a Adapter) createMetricsReceiver(ctx context.Context, settings receiver.Cr return scraperhelper.NewScraperControllerReceiver( &cfg.ControllerConfig, settings, consumer, - scraperhelper.AddScraper(scraper), + scraperhelper.AddScraper(settings.ID.Type(), scraper), ) } diff --git a/receiver/adapter/factory_test.go b/receiver/adapter/factory_test.go index 7510d316d7..c1de00c00a 100644 --- a/receiver/adapter/factory_test.go +++ b/receiver/adapter/factory_test.go @@ -61,10 +61,10 @@ func Test_CreateMetricsReceiver(t *testing.T) { adapter := NewAdapter(c) factory := adapter.NewReceiverFactory("cpu") - set := receivertest.NewNopCreateSettings() + set := receivertest.NewNopSettings() set.ID = component.NewIDWithName(factory.Type(), "cpu") - metricsReceiver, err := factory.CreateMetricsReceiver( + metricsReceiver, err := factory.CreateMetrics( context.Background(), set, &Config{ @@ -90,9 +90,9 @@ func Test_CreateInvalidMetricsReceiver(t *testing.T) { adapter := NewAdapter(c) factory := adapter.NewReceiverFactory("mem") - metricsReceiver, err := factory.CreateMetricsReceiver( + metricsReceiver, err := factory.CreateMetrics( context.Background(), - receivertest.NewNopCreateSettings(), + receivertest.NewNopSettings(), &Config{ ControllerConfig: scraperhelper.ControllerConfig{ CollectionInterval: time.Minute, diff --git a/service/defaultcomponents/components.go b/service/defaultcomponents/components.go index 64725b01ed..812c0a888b 100644 --- a/service/defaultcomponents/components.go +++ b/service/defaultcomponents/components.go @@ -45,7 +45,6 @@ import ( "go.opentelemetry.io/collector/exporter/debugexporter" "go.opentelemetry.io/collector/exporter/nopexporter" "go.opentelemetry.io/collector/extension" - "go.opentelemetry.io/collector/extension/ballastextension" "go.opentelemetry.io/collector/extension/zpagesextension" "go.opentelemetry.io/collector/otelcol" "go.opentelemetry.io/collector/processor" @@ -57,6 +56,7 @@ import ( "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth" "github.com/aws/amazon-cloudwatch-agent/extension/entitystore" + "github.com/aws/amazon-cloudwatch-agent/extension/k8smetadata" "github.com/aws/amazon-cloudwatch-agent/extension/server" "github.com/aws/amazon-cloudwatch-agent/plugins/outputs/cloudwatch" "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsapplicationsignals" @@ -134,8 +134,8 @@ func Factories() (otelcol.Factories, error) { agenthealth.NewFactory(), awsproxy.NewFactory(), entitystore.NewFactory(), + k8smetadata.NewFactory(), server.NewFactory(), - ballastextension.NewFactory(), ecsobserver.NewFactory(), filestorage.NewFactory(), healthcheckextension.NewFactory(), diff --git a/service/defaultcomponents/components_test.go b/service/defaultcomponents/components_test.go index 9ef627aff3..f0bb6c344a 100644 --- a/service/defaultcomponents/components_test.go +++ b/service/defaultcomponents/components_test.go @@ -47,7 +47,7 @@ func TestComponents(t *testing.T) { "cumulativetodelta", "deltatorate", "ec2tagger", - "experimental_metricsgeneration", + "metricsgeneration", "filter", "gpuattributes", "kueueattributes", @@ -89,9 +89,9 @@ func TestComponents(t *testing.T) { "awsproxy", "ecs_observer", "entitystore", + "k8smetadata", "file_storage", "health_check", - "memory_ballast", "pprof", "server", "sigv4auth", diff --git a/tool/clean/clean_ami/clean_ami.go b/tool/clean/clean_ami/clean_ami.go index fb254ba1d6..465a41af84 100644 --- a/tool/clean/clean_ami/clean_ami.go +++ b/tool/clean/clean_ami/clean_ami.go @@ -30,16 +30,13 @@ var imagePrefixes = []string{ "cloudwatch-agent-integration-test-alma-linux-8", "cloudwatch-agent-integration-test-alma-linux-9", "cloudwatch-agent-integration-test-arm64-al2", - "cloudwatch-agent-integration-test-debian-11-arm64", "cloudwatch-agent-integration-test-debian-12-arm64", "cloudwatch-agent-integration-test-nvidia-gpu-al2", - "cloudwatch-agent-integration-test-ol7", "cloudwatch-agent-integration-test-ol8", "cloudwatch-agent-integration-test-ol9", "cloudwatch-agent-integration-test-rocky-linux-8", "cloudwatch-agent-integration-test-rocky-linux-9", "cloudwatch-agent-integration-test-sles-15", - "cloudwatch-agent-integration-test-ubuntu-23", "cloudwatch-agent-integration-test-ubuntu-24", "cloudwatch-agent-integration-test-ubuntu", "cloudwatch-agent-integration-test-ubuntu-LTS-22", diff --git a/tool/clean/clean_log_group/clean_log_group.go b/tool/clean/clean_log_group/clean_log_group.go new file mode 100644 index 0000000000..dccbabda45 --- /dev/null +++ b/tool/clean/clean_log_group/clean_log_group.go @@ -0,0 +1,284 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package main + +import ( + "context" + "flag" + "fmt" + "log" + "strings" + "sync" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs" + "github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs/types" + + "github.com/aws/amazon-cloudwatch-agent/tool/clean" +) + +type cloudwatchlogsClient interface { + DeleteLogGroup(ctx context.Context, params *cloudwatchlogs.DeleteLogGroupInput, optFns ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.DeleteLogGroupOutput, error) + DescribeLogGroups(ctx context.Context, params *cloudwatchlogs.DescribeLogGroupsInput, optFns ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.DescribeLogGroupsOutput, error) + DescribeLogStreams(ctx context.Context, params *cloudwatchlogs.DescribeLogStreamsInput, optFns ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.DescribeLogStreamsOutput, error) +} + +const ( + LogGroupProcessChanSize = 500 +) + +// Config holds the application configuration +type Config struct { + creationThreshold time.Duration + inactiveThreshold time.Duration + numWorkers int + deleteBatchCap int + exceptionList []string + dryRun bool +} + +// Global configuration +var ( + cfg Config +) + +func init() { + // Set default configuration + cfg = Config{ + creationThreshold: 3 * clean.KeepDurationOneDay, + inactiveThreshold: 1 * clean.KeepDurationOneDay, + numWorkers: 15, + exceptionList: []string{"lambda"}, + dryRun: true, + } + +} + +func main() { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Minute) + defer cancel() + // Parse command line flags + flag.BoolVar(&cfg.dryRun, "dry-run", false, "Enable dry-run mode (no actual deletion)") + flag.Parse() + // Load AWS configuration + awsCfg, err := loadAWSConfig(ctx) + if err != nil { + log.Fatalf("Error loading AWS config: %v", err) + } + + // Create CloudWatch Logs client + client := cloudwatchlogs.NewFromConfig(awsCfg) + + // Compute cutoff times + cutoffTimes := calculateCutoffTimes() + + log.Printf("🔍 Searching for CloudWatch Log Groups older than %d days AND inactive for %d days in %s region\n", + cfg.creationThreshold, cfg.inactiveThreshold, awsCfg.Region) + + // Delete old log groups + deletedGroups := deleteOldLogGroups(ctx, client, cutoffTimes) + log.Printf("Total log groups deleted: %d", len(deletedGroups)) +} + +type cutoffTimes struct { + creation int64 + inactive int64 +} + +func calculateCutoffTimes() cutoffTimes { + return cutoffTimes{ + creation: time.Now().Add(cfg.creationThreshold).UnixMilli(), + inactive: time.Now().Add(cfg.inactiveThreshold).UnixMilli(), + } +} + +func loadAWSConfig(ctx context.Context) (aws.Config, error) { + cfg, err := config.LoadDefaultConfig(ctx) + if err != nil { + return aws.Config{}, fmt.Errorf("loading AWS config: %w", err) + } + cfg.RetryMode = aws.RetryModeAdaptive + return cfg, nil +} + +func deleteOldLogGroups(ctx context.Context, client cloudwatchlogsClient, times cutoffTimes) []string { + var ( + wg sync.WaitGroup + deletedLogGroup []string + foundLogGroupChan = make(chan *types.LogGroup, LogGroupProcessChanSize) + deletedLogGroupNameChan = make(chan string, LogGroupProcessChanSize) + handlerWg sync.WaitGroup + ) + + // Start worker pool + log.Printf("👷 Creating %d workers\n", cfg.numWorkers) + for i := 0; i < cfg.numWorkers; i++ { + wg.Add(1) + w := worker{ + id: i, + wg: &wg, + incomingLogGroupChan: foundLogGroupChan, + deletedLogGroupChan: deletedLogGroupNameChan, + times: times, + } + go w.processLogGroup(ctx, client) + } + + // Start handler with its own WaitGroup + handlerWg.Add(1) + go func() { + handleDeletedLogGroups(&deletedLogGroup, deletedLogGroupNameChan) + handlerWg.Done() + }() + + // Process log groups in batches + if err := fetchAndProcessLogGroups(ctx, client, foundLogGroupChan); err != nil { + log.Printf("Error processing log groups: %v", err) + } + + close(foundLogGroupChan) + wg.Wait() + close(deletedLogGroupNameChan) + handlerWg.Wait() + + return deletedLogGroup +} + +func handleDeletedLogGroups(deletedLogGroups *[]string, deletedLogGroupNameChan chan string) { + for logGroupName := range deletedLogGroupNameChan { + *deletedLogGroups = append(*deletedLogGroups, logGroupName) + log.Printf("🔍 Processed %d log groups so far\n", len(*deletedLogGroups)) + } +} + +type worker struct { + id int + wg *sync.WaitGroup + incomingLogGroupChan <-chan *types.LogGroup + deletedLogGroupChan chan<- string + times cutoffTimes +} + +func (w *worker) processLogGroup(ctx context.Context, client cloudwatchlogsClient) { + defer w.wg.Done() + + for logGroup := range w.incomingLogGroupChan { + if err := w.handleLogGroup(ctx, client, logGroup); err != nil { + log.Printf("Worker %d: Error processing log group: %v", w.id, err) + } + } +} + +func (w *worker) handleLogGroup(ctx context.Context, client cloudwatchlogsClient, logGroup *types.LogGroup) error { + if logGroup.CreationTime == nil { + return fmt.Errorf("log group has no creation time: %v", logGroup) + } + + logGroupName := *logGroup.LogGroupName + creationTime := *logGroup.CreationTime + + if creationTime >= w.times.creation { + return nil + } + + lastLogTime := getLastLogEventTime(ctx, client, logGroupName) + if lastLogTime == 0 { + return nil + } + + if lastLogTime < w.times.inactive { + log.Printf("🚨 Worker: %d| Old & Inactive Log Group: %s (Created: %v, Last Event: %v)\n", + w.id, logGroupName, time.Unix(creationTime, 0), time.Unix(lastLogTime, 0)) + + w.deletedLogGroupChan <- logGroupName + + if cfg.dryRun { + log.Printf("🛑 Dry-Run: Would delete log group: %s", logGroupName) + return nil + } + + return deleteLogGroup(ctx, client, logGroupName) + } + + return nil +} + +func deleteLogGroup(ctx context.Context, client cloudwatchlogsClient, logGroupName string) error { + _, err := client.DeleteLogGroup(ctx, &cloudwatchlogs.DeleteLogGroupInput{ + LogGroupName: aws.String(logGroupName), + }) + if err != nil { + return fmt.Errorf("deleting log group %s: %w", logGroupName, err) + } + log.Printf("✅ Deleted log group: %s", logGroupName) + return nil +} + +func fetchAndProcessLogGroups(ctx context.Context, client cloudwatchlogsClient, + logGroupChan chan<- *types.LogGroup) error { + + var nextToken *string + describeCount := 0 + + for { + output, err := client.DescribeLogGroups(ctx, &cloudwatchlogs.DescribeLogGroupsInput{ + NextToken: nextToken, + }) + if err != nil { + return fmt.Errorf("describing log groups: %w", err) + } + + log.Printf("🔍 Described %d times | Found %d log groups\n", describeCount, len(output.LogGroups)) + + for _, logGroup := range output.LogGroups { + if isLogGroupException(*logGroup.LogGroupName) { + log.Printf("⏭️ Skipping Log Group: %s (in exception list)\n", *logGroup.LogGroupName) + continue + } + logGroupChan <- &logGroup + } + + if output.NextToken == nil { + break + } + + nextToken = output.NextToken + describeCount++ + } + + return nil +} + +func getLastLogEventTime(ctx context.Context, client cloudwatchlogsClient, logGroupName string) int64 { + var latestTimestamp int64 + latestTimestamp = 0 + output, err := client.DescribeLogStreams(ctx, &cloudwatchlogs.DescribeLogStreamsInput{ + LogGroupName: aws.String(logGroupName), + OrderBy: types.OrderByLastEventTime, + Descending: aws.Bool(true), + }) + if err != nil { + log.Printf("⚠️ Warning: Failed to retrieve log streams for %s: %v\n", logGroupName, err) + return 0 + } + + stream := output.LogStreams[0] + + if stream.LastEventTimestamp != nil && *stream.LastEventTimestamp > latestTimestamp { + latestTimestamp = *stream.LastEventTimestamp + } + + return latestTimestamp +} + +func isLogGroupException(logGroupName string) bool { + for _, exception := range cfg.exceptionList { + if strings.Contains(logGroupName, exception) { + return true + } + } + return false +} diff --git a/tool/clean/clean_log_group/clean_log_group_test.go b/tool/clean/clean_log_group/clean_log_group_test.go new file mode 100644 index 0000000000..0b0c3ed990 --- /dev/null +++ b/tool/clean/clean_log_group/clean_log_group_test.go @@ -0,0 +1,286 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +// main_test.go +package main + +import ( + "context" + "sync" + "testing" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs" + "github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + "github.com/aws/amazon-cloudwatch-agent/tool/clean" +) + +// MockCloudWatchLogsClient is a stub for cloudwatchlogs.Client +type MockCloudWatchLogsClient struct { + mock.Mock +} + +var _ cloudwatchlogsClient = (*MockCloudWatchLogsClient)(nil) + +func (m *MockCloudWatchLogsClient) DescribeLogGroups(ctx context.Context, input *cloudwatchlogs.DescribeLogGroupsInput, optFns ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.DescribeLogGroupsOutput, error) { + args := m.Called(ctx, input, optFns) + return args.Get(0).(*cloudwatchlogs.DescribeLogGroupsOutput), args.Error(1) +} + +func (m *MockCloudWatchLogsClient) DeleteLogGroup(ctx context.Context, input *cloudwatchlogs.DeleteLogGroupInput, optFns ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.DeleteLogGroupOutput, error) { + args := m.Called(ctx, input, optFns) + return args.Get(0).(*cloudwatchlogs.DeleteLogGroupOutput), args.Error(1) +} + +func (m *MockCloudWatchLogsClient) DescribeLogStreams(ctx context.Context, input *cloudwatchlogs.DescribeLogStreamsInput, optFns ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.DescribeLogStreamsOutput, error) { + args := m.Called(ctx, input, optFns) + return args.Get(0).(*cloudwatchlogs.DescribeLogStreamsOutput), args.Error(1) +} + +// Test getLastLogEventTime simulating multiple pages of log streams. +func TestGetLastLogEventTime(t *testing.T) { + mockClient := new(MockCloudWatchLogsClient) + + // Create two pages of responses + firstPage := &cloudwatchlogs.DescribeLogStreamsOutput{ + LogStreams: []types.LogStream{ + {LastEventTimestamp: aws.Int64(1000)}, + {LastEventTimestamp: aws.Int64(1500)}, + }, + NextToken: aws.String("token1"), + } + secondPage := &cloudwatchlogs.DescribeLogStreamsOutput{ + LogStreams: []types.LogStream{ + {LastEventTimestamp: aws.Int64(2000)}, + {LastEventTimestamp: aws.Int64(1800)}, + }, + NextToken: nil, + } + + // Set up expectations for the two API calls + mockClient.On("DescribeLogStreams", + mock.Anything, + &cloudwatchlogs.DescribeLogStreamsInput{ + LogGroupName: aws.String("dummy-log-group"), + OrderBy: types.OrderByLastEventTime, + Descending: aws.Bool(true), + NextToken: nil, + }, + mock.Anything).Return(firstPage, nil).Once() + + mockClient.On("DescribeLogStreams", + mock.Anything, + &cloudwatchlogs.DescribeLogStreamsInput{ + LogGroupName: aws.String("dummy-log-group"), + OrderBy: types.OrderByLastEventTime, + Descending: aws.Bool(true), + NextToken: aws.String("token1"), + }, + mock.Anything).Return(secondPage, nil).Once() + + lastEventTime := getLastLogEventTime(context.Background(), mockClient, "dummy-log-group") + + assert.Equal(t, int64(2000), lastEventTime) + mockClient.AssertExpectations(t) +} +func testHandleLogGroup(cfg Config, logGroupName string, logCreationDate, logStreamCreationDate int) ([]string, error) { + cfg.dryRun = true // Prevent actual deletion. + + // Calculate cutoffs relative to now. + now := time.Now() + times := cutoffTimes{ + creation: now.Add(cfg.creationThreshold).UnixMilli(), + inactive: now.Add(cfg.inactiveThreshold).UnixMilli(), + } + // Create a dummy log group. + creationTime := now.AddDate(0, 0, -logCreationDate).UnixMilli() + logGroup := &types.LogGroup{ + LogGroupName: aws.String(logGroupName), + CreationTime: aws.Int64(creationTime), + } + + mockClient := new(MockCloudWatchLogsClient) + + // Set up expectation for DescribeLogStreams + mockClient.On("DescribeLogStreams", + mock.Anything, + &cloudwatchlogs.DescribeLogStreamsInput{ + LogGroupName: aws.String(logGroupName), + OrderBy: types.OrderByLastEventTime, + Descending: aws.Bool(true), + NextToken: nil, + }, + mock.Anything).Return(&cloudwatchlogs.DescribeLogStreamsOutput{ + LogStreams: []types.LogStream{ + {LastEventTimestamp: aws.Int64(now.AddDate(0, 0, -logStreamCreationDate).UnixMilli())}, + }, + NextToken: nil, + }, nil).Once() + + var ( + deletedLogGroup []string + wg sync.WaitGroup + foundLogGroupChan = make(chan *types.LogGroup, 500) + deletedLogGroupNameChan = make(chan string, 500) + ) + + w := worker{ + id: 1, + wg: &wg, + incomingLogGroupChan: foundLogGroupChan, + deletedLogGroupChan: deletedLogGroupNameChan, + times: times, + } + go handleDeletedLogGroups(&deletedLogGroup, deletedLogGroupNameChan) + // Call handleLogGroup in dry-run mode (so no deletion call is made). + err := w.handleLogGroup(context.Background(), mockClient, logGroup) + time.Sleep(1 * time.Second) // give time for deleted group to be handled + return deletedLogGroup, err + +} + +// Test handleLogGroup to simulate deletion when a log group is old and inactive. +func TestHandleLogGroup(t *testing.T) { + cfg := Config{ + creationThreshold: 3 * clean.KeepDurationOneDay, + inactiveThreshold: 2 * clean.KeepDurationOneDay, + numWorkers: 0, + deleteBatchCap: 0, + exceptionList: []string{"EXCEPTION"}, + dryRun: true, + } + testCases := []struct { + name string + logGroupName string + logCreationDate int + logStreamCreationDate int + expected []string + }{ + { + "Expired log group", + "expired-test-log-group", + 7, + 7, + []string{"expired-test-log-group"}, + }, + { + "Fresh log group", + "fresh-test-log-group", + 1, + 7, + []string{}, + }, + { + "Old but still used log group", + "old-test-log-group", + 7, + 1, + []string{}, + }, + } + for _, tc := range testCases { + t.Run(tc.logGroupName, func(t *testing.T) { + deletedLogGroup, err := testHandleLogGroup(cfg, tc.logGroupName, tc.logCreationDate, tc.logStreamCreationDate) + assert.NoError(t, err) + assert.Len(t, deletedLogGroup, len(tc.expected)) + assert.ElementsMatch(t, deletedLogGroup, tc.expected) + }) + } + +} +func testDeleteLogGroup(cfg Config, logGroupName string, logCreationDate, logStreamCreationDate int) []string { + cfg.dryRun = true // Prevent actual deletion. + now := time.Now() + + mockClient := new(MockCloudWatchLogsClient) + + // Set up expectation for DescribeLogGroups + mockClient.On("DescribeLogGroups", + mock.Anything, + &cloudwatchlogs.DescribeLogGroupsInput{ + NextToken: nil, + }, + mock.Anything).Return(&cloudwatchlogs.DescribeLogGroupsOutput{ + LogGroups: []types.LogGroup{ + {LogGroupName: aws.String(logGroupName), + CreationTime: aws.Int64(now.AddDate(0, 0, -logCreationDate).UnixMilli())}, + }, + NextToken: nil, + }, nil).Once() + mockClient.On("DescribeLogStreams", + mock.Anything, + &cloudwatchlogs.DescribeLogStreamsInput{ + LogGroupName: aws.String(logGroupName), + OrderBy: types.OrderByLastEventTime, + Descending: aws.Bool(true), + NextToken: nil, + }, + mock.Anything).Return(&cloudwatchlogs.DescribeLogStreamsOutput{ + LogStreams: []types.LogStream{ + {LastEventTimestamp: aws.Int64(now.AddDate(0, 0, -logStreamCreationDate).UnixMilli())}, + }, + NextToken: nil, + }, nil).Once() + + // Call handleLogGroup in dry-run mode (so no deletion call is made). + return deleteOldLogGroups(context.Background(), mockClient, calculateCutoffTimes()) + +} +func TestDeleteLogGroups(t *testing.T) { + cfg := Config{ + creationThreshold: 3, + inactiveThreshold: 2, + numWorkers: 0, + deleteBatchCap: 0, + exceptionList: []string{"except"}, + dryRun: true, + } + testCases := []struct { + name string + logGroupName string + logCreationDate int + logStreamCreationDate int + expected []string + }{ + { + "Expired log group", + "expired-test-log-group", + 7, + 7, + []string{"expired-test-log-group"}, + }, + { + "Fresh log group", + "fresh-test-log-group", + 1, + 7, + []string{}, + }, + { + "Old but still used log group", + "old-test-log-group", + 7, + 1, + []string{}, + }, + { + "Exception log group", + "exceptional-test-log-group", + 7, + 1, + []string{}, + }, + } + for _, tc := range testCases { + t.Run(tc.logGroupName, func(t *testing.T) { + deletedLogGroup := testDeleteLogGroup(cfg, tc.logGroupName, tc.logCreationDate, tc.logStreamCreationDate) + assert.Len(t, deletedLogGroup, len(tc.expected)) + assert.ElementsMatch(t, deletedLogGroup, tc.expected) + }) + } + +} diff --git a/tool/clean/go.mod b/tool/clean/go.mod index 9497610f40..e2209f0071 100644 --- a/tool/clean/go.mod +++ b/tool/clean/go.mod @@ -1,26 +1,30 @@ module github.com/aws/amazon-cloudwatch-agent/tool/clean -go 1.20 +go 1.22 + +toolchain go1.23.6 require ( github.com/aws/aws-sdk-go v1.48.14 - github.com/aws/aws-sdk-go-v2 v1.23.5 + github.com/aws/aws-sdk-go-v2 v1.36.2 github.com/aws/aws-sdk-go-v2/config v1.25.12 github.com/aws/aws-sdk-go-v2/service/autoscaling v1.36.3 + github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.45.14 github.com/aws/aws-sdk-go-v2/service/ec2 v1.140.0 github.com/aws/aws-sdk-go-v2/service/ecs v1.35.3 github.com/aws/aws-sdk-go-v2/service/efs v1.26.3 github.com/aws/aws-sdk-go-v2/service/eks v1.35.3 github.com/aws/aws-sdk-go-v2/service/iam v1.28.3 - github.com/aws/smithy-go v1.18.1 + github.com/aws/smithy-go v1.22.2 github.com/stretchr/testify v1.8.4 ) require ( + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 // indirect github.com/aws/aws-sdk-go-v2/credentials v1.16.10 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.9 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.8 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.8 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.33 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.33 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.7.1 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.3 // indirect github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.8 // indirect diff --git a/tool/clean/go.sum b/tool/clean/go.sum index 42f745d7d7..ec10d06adb 100644 --- a/tool/clean/go.sum +++ b/tool/clean/go.sum @@ -1,21 +1,25 @@ github.com/aws/aws-sdk-go v1.48.14 h1:nVLrp+F84SG+xGiFMfe1TE6ZV6smF+42tuuNgYGV30s= github.com/aws/aws-sdk-go v1.48.14/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= -github.com/aws/aws-sdk-go-v2 v1.23.5 h1:xK6C4udTyDMd82RFvNkDQxtAd00xlzFUtX4fF2nMZyg= -github.com/aws/aws-sdk-go-v2 v1.23.5/go.mod h1:t3szzKfP0NeRU27uBFczDivYJjsmSnqI8kIvKyWb9ds= +github.com/aws/aws-sdk-go-v2 v1.36.2 h1:Ub6I4lq/71+tPb/atswvToaLGVMxKZvjYDVOWEExOcU= +github.com/aws/aws-sdk-go-v2 v1.36.2/go.mod h1:LLXuLpgzEbD766Z5ECcRmi8AzSwfZItDtmABVkRLGzg= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 h1:zAybnyUQXIZ5mok5Jqwlf58/TFE7uvd3IAsa1aF9cXs= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10/go.mod h1:qqvMj6gHLR/EXWZw4ZbqlPbQUyenf4h82UQUlKc+l14= github.com/aws/aws-sdk-go-v2/config v1.25.12 h1:mF4cMuNh/2G+d19nWnm1vJ/ak0qK6SbqF0KtSX9pxu0= github.com/aws/aws-sdk-go-v2/config v1.25.12/go.mod h1:lOvvqtZP9p29GIjOTuA/76HiVk0c/s8qRcFRq2+E2uc= github.com/aws/aws-sdk-go-v2/credentials v1.16.10 h1:VmRkuoKaGl2ZDNGkkRQgw80Hxj1Bb9a+bsT5shqlCwo= github.com/aws/aws-sdk-go-v2/credentials v1.16.10/go.mod h1:WEn22lpd50buTs/TDqywytW5xQ2zPOMbYipIlqI6xXg= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.9 h1:FZVFahMyZle6WcogZCOxo6D/lkDA2lqKIn4/ueUmVXw= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.9/go.mod h1:kjq7REMIkxdtcEC9/4BVXjOsNY5isz6jQbEgk6osRTU= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.8 h1:8GVZIR0y6JRIUNSYI1xAMF4HDfV8H/bOsZ/8AD/uY5Q= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.8/go.mod h1:rwBfu0SoUkBUZndVgPZKAD9Y2JigaZtRP68unRiYToQ= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.8 h1:ZE2ds/qeBkhk3yqYvS3CDCFNvd9ir5hMjlVStLZWrvM= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.8/go.mod h1:/lAPPymDYL023+TS6DJmjuL42nxix2AvEvfjqOBRODk= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.33 h1:knLyPMw3r3JsU8MFHWctE4/e2qWbPaxDYLlohPvnY8c= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.33/go.mod h1:EBp2HQ3f+XCB+5J+IoEbGhoV7CpJbnrsd4asNXmTL0A= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.33 h1:K0+Ne08zqti8J9jwENxZ5NoUyBnaFDTu3apwQJWrwwA= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.33/go.mod h1:K97stwwzaWzmqxO8yLGHhClbVW1tC6VT1pDLk1pGrq4= github.com/aws/aws-sdk-go-v2/internal/ini v1.7.1 h1:uR9lXYjdPX0xY+NhvaJ4dD8rpSRz5VY81ccIIoNG+lw= github.com/aws/aws-sdk-go-v2/internal/ini v1.7.1/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY= github.com/aws/aws-sdk-go-v2/service/autoscaling v1.36.3 h1:16TRfDZhx5aX90VsvG0yJ5XNlDNHMVDj2DBpVMwDxzc= github.com/aws/aws-sdk-go-v2/service/autoscaling v1.36.3/go.mod h1:dJD5FZKnDClUVIcCwfu676Y72h4GytmMoZHzf1nTW8Q= +github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.45.14 h1:Xc90sglbEnAC1X4d4ui422Ppw0HWjyNoqGAE1Dq+Rcg= +github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.45.14/go.mod h1:IbPFVuHnR+Klb3rrZHai890N1dnMCJZ0GeRfG0fj+ys= github.com/aws/aws-sdk-go-v2/service/ec2 v1.140.0 h1:joMAX3jOjpbgIYzXgyMLAYly0kzbTJ7DrfAB3PNwobA= github.com/aws/aws-sdk-go-v2/service/ec2 v1.140.0/go.mod h1:d1hAqgLDOPaSO1Piy/0bBmj6oAplFwv6p0cquHntNHM= github.com/aws/aws-sdk-go-v2/service/ecs v1.35.3 h1:5P4kia3F4RC5/nBQDBlH0WijReF9YIZ9JwvwoVBRZxY= @@ -36,12 +40,13 @@ github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.3 h1:CxAHBS0BWSUqI7qzXHc2ZpTe github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.3/go.mod h1:7Lt5mjQ8x5rVdKqg+sKKDeuwoszDJIIPmkd8BVsEdS0= github.com/aws/aws-sdk-go-v2/service/sts v1.26.3 h1:KfREzajmHCSYjCaMRtdLr9boUMA7KPpoPApitPlbNeo= github.com/aws/aws-sdk-go-v2/service/sts v1.26.3/go.mod h1:7Ld9eTqocTvJqqJ5K/orbSDwmGcpRdlDiLjz2DO+SL8= -github.com/aws/smithy-go v1.18.1 h1:pOdBTUfXNazOlxLrgeYalVnuTpKreACHtc62xLwIB3c= -github.com/aws/smithy-go v1.18.1/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE= +github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ= +github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= diff --git a/tool/testutil/testutil.go b/tool/testutil/testutil.go index f26e838048..3cc3200913 100644 --- a/tool/testutil/testutil.go +++ b/tool/testutil/testutil.go @@ -5,8 +5,12 @@ package testutil import ( "fmt" + "strings" + "sync" "testing" + "github.com/influxdata/telegraf" + "github.com/aws/amazon-cloudwatch-agent/tool/stdin" ) @@ -34,3 +38,109 @@ func Type(inputChan chan<- string, inputString ...string) { } }() } + +type LogSink struct { + mu sync.Mutex + lines []string +} + +var _ telegraf.Logger = (*LogSink)(nil) + +func NewLogSink() *LogSink { + return &LogSink{ + lines: make([]string, 0), + } +} + +func (l *LogSink) Errorf(format string, args ...any) { + l.mu.Lock() + defer l.mu.Unlock() + l.lines = append(l.lines, "E! "+fmt.Sprintf(format, args...)) +} + +func (l *LogSink) Error(args ...any) { + l.mu.Lock() + defer l.mu.Unlock() + l.lines = append(l.lines, "E! "+fmt.Sprint(args...)) +} + +func (l *LogSink) Debugf(format string, args ...any) { + l.mu.Lock() + defer l.mu.Unlock() + l.lines = append(l.lines, "D! "+fmt.Sprintf(format, args...)) +} + +func (l *LogSink) Debug(args ...any) { + l.mu.Lock() + defer l.mu.Unlock() + l.lines = append(l.lines, "D! "+fmt.Sprint(args...)) +} + +func (l *LogSink) Warnf(format string, args ...any) { + l.mu.Lock() + defer l.mu.Unlock() + l.lines = append(l.lines, "W! "+fmt.Sprintf(format, args...)) +} + +func (l *LogSink) Warn(args ...any) { + l.mu.Lock() + defer l.mu.Unlock() + l.lines = append(l.lines, "W! "+fmt.Sprint(args...)) +} + +func (l *LogSink) Infof(format string, args ...any) { + l.mu.Lock() + defer l.mu.Unlock() + l.lines = append(l.lines, "I! "+fmt.Sprintf(format, args...)) +} + +func (l *LogSink) Info(args ...any) { + l.mu.Lock() + defer l.mu.Unlock() + l.lines = append(l.lines, "I! "+fmt.Sprint(args...)) +} + +func (l *LogSink) Lines() []string { + l.mu.Lock() + defer l.mu.Unlock() + lines := make([]string, len(l.lines)) + copy(lines, l.lines) + return lines +} + +func (l *LogSink) String() string { + return strings.Join(l.Lines(), "\n") +} + +type NopLogger struct { +} + +var _ telegraf.Logger = (*NopLogger)(nil) + +func NewNopLogger() telegraf.Logger { + return &NopLogger{} +} + +func (n NopLogger) Errorf(string, ...interface{}) { +} + +func (n NopLogger) Error(...interface{}) { +} + +func (n NopLogger) Debugf(string, ...interface{}) { +} + +func (n NopLogger) Debug(...interface{}) { +} + +func (n NopLogger) Warnf(string, ...interface{}) { +} + +func (n NopLogger) Warn(...interface{}) { +} + +func (n NopLogger) Infof(string, ...interface{}) { +} + +func (n NopLogger) Info(...interface{}) { +} diff --git a/translator/config/schema.json b/translator/config/schema.json index 4938baf571..83f51b8f3b 100644 --- a/translator/config/schema.json +++ b/translator/config/schema.json @@ -959,6 +959,10 @@ "UTC" ] }, + "trim_timestamp" : { + "type": "boolean", + "description": "Whether to trim the timestamp in the log message" + }, "encoding": { "type": "string", "minLength": 1, diff --git a/translator/tocwconfig/sampleConfig/advanced_config_darwin.yaml b/translator/tocwconfig/sampleConfig/advanced_config_darwin.yaml index bd1d153b8d..afdebc3fae 100644 --- a/translator/tocwconfig/sampleConfig/advanced_config_darwin.yaml +++ b/translator/tocwconfig/sampleConfig/advanced_config_darwin.yaml @@ -51,7 +51,8 @@ processors: - InstanceType imds_retries: 1 middleware: agenthealth/statuscode - refresh_interval_seconds: 0s + refresh_tags_interval: 0s + refresh_volumes_interval: 0s receivers: telegraf_cpu: collection_interval: 1m0s @@ -90,11 +91,11 @@ service: - ec2tagger - awsentity/resource receivers: + - telegraf_disk + - telegraf_mem - telegraf_netstat - telegraf_swap - telegraf_cpu - - telegraf_disk - - telegraf_mem metrics/hostDeltaMetrics: exporters: - awscloudwatch @@ -121,4 +122,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/advanced_config_linux.yaml b/translator/tocwconfig/sampleConfig/advanced_config_linux.yaml index 792b6d5073..353b746af6 100644 --- a/translator/tocwconfig/sampleConfig/advanced_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/advanced_config_linux.yaml @@ -51,7 +51,8 @@ processors: - InstanceType imds_retries: 1 middleware: agenthealth/statuscode - refresh_interval_seconds: 0s + refresh_tags_interval: 0s + refresh_volumes_interval: 0s receivers: telegraf_cpu: collection_interval: 1m0s @@ -98,13 +99,13 @@ service: - ec2tagger - awsentity/resource receivers: - - telegraf_ethtool - - telegraf_nvidia_smi - - telegraf_cpu - telegraf_disk - telegraf_mem - telegraf_netstat - telegraf_swap + - telegraf_ethtool + - telegraf_nvidia_smi + - telegraf_cpu metrics/hostDeltaMetrics: exporters: - awscloudwatch @@ -131,4 +132,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/advanced_config_windows.yaml b/translator/tocwconfig/sampleConfig/advanced_config_windows.yaml index 4ce5959952..df47e58553 100644 --- a/translator/tocwconfig/sampleConfig/advanced_config_windows.yaml +++ b/translator/tocwconfig/sampleConfig/advanced_config_windows.yaml @@ -36,12 +36,13 @@ processors: ec2_instance_tag_keys: - AutoScalingGroupName ec2_metadata_tags: - - InstanceType - ImageId - InstanceId + - InstanceType imds_retries: 1 middleware: agenthealth/statuscode - refresh_interval_seconds: 0s + refresh_tags_interval: 0s + refresh_volumes_interval: 0s receivers: telegraf_win_perf_counters/1492679118: alias_name: Memory @@ -91,13 +92,13 @@ service: - ec2tagger - awsentity/resource receivers: + - telegraf_win_perf_counters/3762679655 - telegraf_win_perf_counters/2073218482 - telegraf_win_perf_counters/2039663244 - telegraf_win_perf_counters/4283769065 - telegraf_win_perf_counters/1492679118 - telegraf_win_perf_counters/3610923661 - telegraf_win_perf_counters/3446270237 - - telegraf_win_perf_counters/3762679655 telemetry: logs: development: false @@ -115,4 +116,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/amp_config_linux.yaml b/translator/tocwconfig/sampleConfig/amp_config_linux.yaml index df5dc3fc5c..b47c7f0c42 100644 --- a/translator/tocwconfig/sampleConfig/amp_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/amp_config_linux.yaml @@ -28,7 +28,11 @@ exporters: enabled: false http2_ping_timeout: 0s http2_read_idle_timeout: 0s + idle_conn_timeout: 1m30s max_batch_size_bytes: 3000000 + max_conns_per_host: 0 + max_idle_conns: 100 + max_idle_conns_per_host: 0 namespace: "" proxy_url: "" read_buffer_size: 0 @@ -99,12 +103,13 @@ processors: ec2_instance_tag_keys: - AutoScalingGroupName ec2_metadata_tags: - - ImageId - InstanceId - InstanceType + - ImageId imds_retries: 1 middleware: agenthealth/statuscode - refresh_interval_seconds: 0s + refresh_tags_interval: 0s + refresh_volumes_interval: 0s rollup: attribute_groups: - - ImageId @@ -175,4 +180,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/appsignals_and_ecs_config.yaml b/translator/tocwconfig/sampleConfig/appsignals_and_ecs_config.yaml index ac1ea7b86e..6bd1354948 100644 --- a/translator/tocwconfig/sampleConfig/appsignals_and_ecs_config.yaml +++ b/translator/tocwconfig/sampleConfig/appsignals_and_ecs_config.yaml @@ -1256,7 +1256,9 @@ processors: enabled: true faas.version: enabled: true + max_conns_per_host: 0 max_idle_conns: 100 + max_idle_conns_per_host: 0 middleware: agenthealth/statuscode openshift: address: "" @@ -1350,10 +1352,13 @@ receivers: write_buffer_size: 0 http: endpoint: 0.0.0.0:4316 + idle_timeout: 0s include_metadata: false logs_url_path: /v1/logs max_request_body_size: 0 metrics_url_path: /v1/metrics + read_header_timeout: 0s + read_timeout: 0s tls: ca_file: "" cert_file: path/to/cert.crt @@ -1365,6 +1370,7 @@ receivers: min_version: "" reload_interval: 0s traces_url_path: /v1/traces + write_timeout: 0s service: extensions: - awsproxy/application_signals @@ -1404,4 +1410,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/appsignals_and_eks_config.yaml b/translator/tocwconfig/sampleConfig/appsignals_and_eks_config.yaml index 26ee65ff2b..fe7efba5c2 100644 --- a/translator/tocwconfig/sampleConfig/appsignals_and_eks_config.yaml +++ b/translator/tocwconfig/sampleConfig/appsignals_and_eks_config.yaml @@ -332,6 +332,20 @@ processors: send_batch_max_size: 0 send_batch_size: 8192 timeout: 5s + filter/containerinsights: + error_mode: propagate + logs: {} + metrics: + exclude: + match_type: strict + metric_names: + - up + - scrape_duration_seconds + - scrape_samples_scraped + - scrape_series_added + - scrape_samples_post_metric_relabeling + spans: {} + traces: {} metricstransform/application_signals: transforms: - action: update @@ -1385,7 +1399,9 @@ processors: enabled: true faas.version: enabled: true + max_conns_per_host: 0 max_idle_conns: 100 + max_idle_conns_per_host: 0 middleware: agenthealth/statuscode openshift: address: "" @@ -1508,10 +1524,13 @@ receivers: write_buffer_size: 0 http: endpoint: 0.0.0.0:4316 + idle_timeout: 0s include_metadata: false logs_url_path: /v1/logs max_request_body_size: 0 metrics_url_path: /v1/metrics + read_header_timeout: 0s + read_timeout: 0s tls: ca_file: "" cert_file: path/to/cert.crt @@ -1523,6 +1542,7 @@ receivers: min_version: "" reload_interval: 0s traces_url_path: /v1/traces + write_timeout: 0s service: extensions: - agenthealth/logs @@ -1547,6 +1567,7 @@ service: - awsemf/containerinsights processors: - batch/containerinsights + - filter/containerinsights receivers: - awscontainerinsightreceiver traces/application_signals: @@ -1572,4 +1593,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/appsignals_and_k8s_config.yaml b/translator/tocwconfig/sampleConfig/appsignals_and_k8s_config.yaml index bdb8069db5..28105f35ae 100644 --- a/translator/tocwconfig/sampleConfig/appsignals_and_k8s_config.yaml +++ b/translator/tocwconfig/sampleConfig/appsignals_and_k8s_config.yaml @@ -333,6 +333,20 @@ processors: send_batch_max_size: 0 send_batch_size: 8192 timeout: 5s + filter/containerinsights: + error_mode: propagate + logs: {} + metrics: + exclude: + match_type: strict + metric_names: + - up + - scrape_duration_seconds + - scrape_samples_scraped + - scrape_series_added + - scrape_samples_post_metric_relabeling + spans: {} + traces: {} metricstransform/application_signals: transforms: - action: update @@ -1386,7 +1400,9 @@ processors: enabled: true faas.version: enabled: true + max_conns_per_host: 0 max_idle_conns: 100 + max_idle_conns_per_host: 0 middleware: agenthealth/statuscode openshift: address: "" @@ -1499,11 +1515,15 @@ receivers: write_buffer_size: 0 http: endpoint: 0.0.0.0:4316 + idle_timeout: 0s include_metadata: false logs_url_path: /v1/logs max_request_body_size: 0 metrics_url_path: /v1/metrics + read_header_timeout: 0s + read_timeout: 0s traces_url_path: /v1/traces + write_timeout: 0s service: extensions: - agenthealth/logs @@ -1528,6 +1548,7 @@ service: - awsemf/containerinsights processors: - batch/containerinsights + - filter/containerinsights receivers: - awscontainerinsightreceiver traces/application_signals: @@ -1553,4 +1574,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/appsignals_fallback_and_eks_config.yaml b/translator/tocwconfig/sampleConfig/appsignals_fallback_and_eks_config.yaml index 26ee65ff2b..fe7efba5c2 100644 --- a/translator/tocwconfig/sampleConfig/appsignals_fallback_and_eks_config.yaml +++ b/translator/tocwconfig/sampleConfig/appsignals_fallback_and_eks_config.yaml @@ -332,6 +332,20 @@ processors: send_batch_max_size: 0 send_batch_size: 8192 timeout: 5s + filter/containerinsights: + error_mode: propagate + logs: {} + metrics: + exclude: + match_type: strict + metric_names: + - up + - scrape_duration_seconds + - scrape_samples_scraped + - scrape_series_added + - scrape_samples_post_metric_relabeling + spans: {} + traces: {} metricstransform/application_signals: transforms: - action: update @@ -1385,7 +1399,9 @@ processors: enabled: true faas.version: enabled: true + max_conns_per_host: 0 max_idle_conns: 100 + max_idle_conns_per_host: 0 middleware: agenthealth/statuscode openshift: address: "" @@ -1508,10 +1524,13 @@ receivers: write_buffer_size: 0 http: endpoint: 0.0.0.0:4316 + idle_timeout: 0s include_metadata: false logs_url_path: /v1/logs max_request_body_size: 0 metrics_url_path: /v1/metrics + read_header_timeout: 0s + read_timeout: 0s tls: ca_file: "" cert_file: path/to/cert.crt @@ -1523,6 +1542,7 @@ receivers: min_version: "" reload_interval: 0s traces_url_path: /v1/traces + write_timeout: 0s service: extensions: - agenthealth/logs @@ -1547,6 +1567,7 @@ service: - awsemf/containerinsights processors: - batch/containerinsights + - filter/containerinsights receivers: - awscontainerinsightreceiver traces/application_signals: @@ -1572,4 +1593,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/appsignals_over_fallback_config.yaml b/translator/tocwconfig/sampleConfig/appsignals_over_fallback_config.yaml index 26ee65ff2b..fe7efba5c2 100644 --- a/translator/tocwconfig/sampleConfig/appsignals_over_fallback_config.yaml +++ b/translator/tocwconfig/sampleConfig/appsignals_over_fallback_config.yaml @@ -332,6 +332,20 @@ processors: send_batch_max_size: 0 send_batch_size: 8192 timeout: 5s + filter/containerinsights: + error_mode: propagate + logs: {} + metrics: + exclude: + match_type: strict + metric_names: + - up + - scrape_duration_seconds + - scrape_samples_scraped + - scrape_series_added + - scrape_samples_post_metric_relabeling + spans: {} + traces: {} metricstransform/application_signals: transforms: - action: update @@ -1385,7 +1399,9 @@ processors: enabled: true faas.version: enabled: true + max_conns_per_host: 0 max_idle_conns: 100 + max_idle_conns_per_host: 0 middleware: agenthealth/statuscode openshift: address: "" @@ -1508,10 +1524,13 @@ receivers: write_buffer_size: 0 http: endpoint: 0.0.0.0:4316 + idle_timeout: 0s include_metadata: false logs_url_path: /v1/logs max_request_body_size: 0 metrics_url_path: /v1/metrics + read_header_timeout: 0s + read_timeout: 0s tls: ca_file: "" cert_file: path/to/cert.crt @@ -1523,6 +1542,7 @@ receivers: min_version: "" reload_interval: 0s traces_url_path: /v1/traces + write_timeout: 0s service: extensions: - agenthealth/logs @@ -1547,6 +1567,7 @@ service: - awsemf/containerinsights processors: - batch/containerinsights + - filter/containerinsights receivers: - awscontainerinsightreceiver traces/application_signals: @@ -1572,4 +1593,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/base_appsignals_config.yaml b/translator/tocwconfig/sampleConfig/base_appsignals_config.yaml index ce4711920e..759d089a1d 100644 --- a/translator/tocwconfig/sampleConfig/base_appsignals_config.yaml +++ b/translator/tocwconfig/sampleConfig/base_appsignals_config.yaml @@ -163,6 +163,7 @@ exporters: debug/application_signals: sampling_initial: 2 sampling_thereafter: 500 + use_internal_logger: true verbosity: Detailed extensions: agenthealth/logs: @@ -1269,7 +1270,9 @@ processors: enabled: true faas.version: enabled: true + max_conns_per_host: 0 max_idle_conns: 100 + max_idle_conns_per_host: 0 middleware: agenthealth/statuscode openshift: address: "" @@ -1353,11 +1356,15 @@ receivers: write_buffer_size: 0 http: endpoint: 0.0.0.0:4316 + idle_timeout: 0s include_metadata: false logs_url_path: /v1/logs max_request_body_size: 0 metrics_url_path: /v1/metrics + read_header_timeout: 0s + read_timeout: 0s traces_url_path: /v1/traces + write_timeout: 0s service: extensions: - awsproxy/application_signals @@ -1403,4 +1410,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/base_appsignals_fallback_config.yaml b/translator/tocwconfig/sampleConfig/base_appsignals_fallback_config.yaml index 9f050852d0..9c377b2529 100644 --- a/translator/tocwconfig/sampleConfig/base_appsignals_fallback_config.yaml +++ b/translator/tocwconfig/sampleConfig/base_appsignals_fallback_config.yaml @@ -1265,7 +1265,9 @@ processors: enabled: true faas.version: enabled: true + max_conns_per_host: 0 max_idle_conns: 100 + max_idle_conns_per_host: 0 middleware: agenthealth/statuscode openshift: address: "" @@ -1349,11 +1351,15 @@ receivers: write_buffer_size: 0 http: endpoint: 0.0.0.0:4316 + idle_timeout: 0s include_metadata: false logs_url_path: /v1/logs max_request_body_size: 0 metrics_url_path: /v1/metrics + read_header_timeout: 0s + read_timeout: 0s traces_url_path: /v1/traces + write_timeout: 0s service: extensions: - awsproxy/application_signals @@ -1397,4 +1403,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/base_container_insights_config.yaml b/translator/tocwconfig/sampleConfig/base_container_insights_config.yaml index 4df5992b20..9c8e12b228 100644 --- a/translator/tocwconfig/sampleConfig/base_container_insights_config.yaml +++ b/translator/tocwconfig/sampleConfig/base_container_insights_config.yaml @@ -169,6 +169,20 @@ processors: send_batch_max_size: 0 send_batch_size: 8192 timeout: 5s + filter/containerinsights: + error_mode: propagate + logs: {} + metrics: + exclude: + match_type: strict + metric_names: + - up + - scrape_duration_seconds + - scrape_samples_scraped + - scrape_series_added + - scrape_samples_post_metric_relabeling + spans: {} + traces: {} receivers: awscontainerinsightreceiver: accelerated_compute_metrics: false @@ -244,6 +258,7 @@ service: - awsemf/containerinsights processors: - batch/containerinsights + - filter/containerinsights receivers: - awscontainerinsightreceiver telemetry: @@ -261,4 +276,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/basic_config_linux.yaml b/translator/tocwconfig/sampleConfig/basic_config_linux.yaml index 8c7671dc35..c8fad9f1ab 100644 --- a/translator/tocwconfig/sampleConfig/basic_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/basic_config_linux.yaml @@ -36,12 +36,13 @@ processors: ec2_instance_tag_keys: - AutoScalingGroupName ec2_metadata_tags: + - InstanceType - ImageId - InstanceId - - InstanceType imds_retries: 1 middleware: agenthealth/statuscode - refresh_interval_seconds: 0s + refresh_tags_interval: 0s + refresh_volumes_interval: 0s receivers: telegraf_disk: collection_interval: 1m0s @@ -83,4 +84,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/basic_config_windows.yaml b/translator/tocwconfig/sampleConfig/basic_config_windows.yaml index 335680faca..f88e998108 100644 --- a/translator/tocwconfig/sampleConfig/basic_config_windows.yaml +++ b/translator/tocwconfig/sampleConfig/basic_config_windows.yaml @@ -41,7 +41,8 @@ processors: - InstanceType imds_retries: 1 middleware: agenthealth/statuscode - refresh_interval_seconds: 0s + refresh_tags_interval: 0s + refresh_volumes_interval: 0s receivers: telegraf_win_perf_counters/1492679118: alias_name: Memory @@ -85,4 +86,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/collectd_config_linux.yaml b/translator/tocwconfig/sampleConfig/collectd_config_linux.yaml index 2d409b2753..fd0cd69ea8 100644 --- a/translator/tocwconfig/sampleConfig/collectd_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/collectd_config_linux.yaml @@ -67,4 +67,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/compass_linux_config.yaml b/translator/tocwconfig/sampleConfig/compass_linux_config.yaml index 09c4ff3916..aa7462cc31 100644 --- a/translator/tocwconfig/sampleConfig/compass_linux_config.yaml +++ b/translator/tocwconfig/sampleConfig/compass_linux_config.yaml @@ -47,12 +47,13 @@ processors: ec2_instance_tag_keys: - AutoScalingGroupName ec2_metadata_tags: + - InstanceType - ImageId - InstanceId - - InstanceType imds_retries: 1 middleware: agenthealth/statuscode - refresh_interval_seconds: 0s + refresh_tags_interval: 0s + refresh_volumes_interval: 0s receivers: telegraf_socket_listener: collection_interval: 10s @@ -72,11 +73,11 @@ service: exporters: - awscloudwatch processors: - - awsentity/service/telegraf - ec2tagger + - awsentity/service/telegraf receivers: - - telegraf_socket_listener - telegraf_statsd + - telegraf_socket_listener telemetry: logs: development: false @@ -94,4 +95,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/complete_darwin_config.yaml b/translator/tocwconfig/sampleConfig/complete_darwin_config.yaml index 8053e8754c..e30fe0e162 100644 --- a/translator/tocwconfig/sampleConfig/complete_darwin_config.yaml +++ b/translator/tocwconfig/sampleConfig/complete_darwin_config.yaml @@ -134,12 +134,13 @@ processors: ec2_instance_tag_keys: - AutoScalingGroupName ec2_metadata_tags: + - ImageId - InstanceId - InstanceType - - ImageId imds_retries: 1 middleware: agenthealth/statuscode - refresh_interval_seconds: 0s + refresh_tags_interval: 0s + refresh_volumes_interval: 0s transform: error_mode: propagate flatten_data: false @@ -197,11 +198,15 @@ receivers: write_buffer_size: 0 http: endpoint: 0.0.0.0:2222 + idle_timeout: 0s include_metadata: false logs_url_path: /v1/logs max_request_body_size: 0 metrics_url_path: /v1/metrics + read_header_timeout: 0s + read_timeout: 0s traces_url_path: /v1/traces + write_timeout: 0s telegraf_cpu: collection_interval: 10s initial_delay: 1s @@ -285,23 +290,23 @@ service: - transform - awsentity/resource receivers: + - telegraf_netstat + - telegraf_processes - telegraf_disk + - telegraf_cpu - telegraf_swap - telegraf_mem - - telegraf_cpu - - telegraf_processes - - telegraf_netstat - telegraf_procstat/1917393364 metrics/hostCustomMetrics: exporters: - awscloudwatch processors: - - awsentity/service/telegraf - ec2tagger - transform + - awsentity/service/telegraf receivers: - - telegraf_socket_listener - telegraf_statsd + - telegraf_socket_listener metrics/hostDeltaMetrics: exporters: - awscloudwatch @@ -338,4 +343,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/complete_linux_config.conf b/translator/tocwconfig/sampleConfig/complete_linux_config.conf index aef5f442db..12730d434c 100644 --- a/translator/tocwconfig/sampleConfig/complete_linux_config.conf +++ b/translator/tocwconfig/sampleConfig/complete_linux_config.conf @@ -54,6 +54,7 @@ pipe = false retention_in_days = 5 timezone = "UTC" + trim_timestamp = true [[inputs.logfile.file_config]] auto_removal = true diff --git a/translator/tocwconfig/sampleConfig/complete_linux_config.json b/translator/tocwconfig/sampleConfig/complete_linux_config.json index c2fb70e151..48062c0e20 100755 --- a/translator/tocwconfig/sampleConfig/complete_linux_config.json +++ b/translator/tocwconfig/sampleConfig/complete_linux_config.json @@ -258,6 +258,7 @@ "log_group_name": "amazon-cloudwatch-agent.log", "log_stream_name": "amazon-cloudwatch-agent.log", "timezone": "UTC", + "trim_timestamp": true, "retention_in_days": 5 }, { diff --git a/translator/tocwconfig/sampleConfig/complete_linux_config.yaml b/translator/tocwconfig/sampleConfig/complete_linux_config.yaml index 3cc541ead6..7fc6bbb1a2 100644 --- a/translator/tocwconfig/sampleConfig/complete_linux_config.yaml +++ b/translator/tocwconfig/sampleConfig/complete_linux_config.yaml @@ -152,7 +152,8 @@ processors: - InstanceType imds_retries: 1 middleware: agenthealth/statuscode - refresh_interval_seconds: 0s + refresh_tags_interval: 0s + refresh_volumes_interval: 0s filter/jmx/0: error_mode: propagate logs: {} @@ -199,11 +200,11 @@ processors: metric_statements: - context: metric statements: - - set(unit, "unit") where name == "disk_free" - - set(name, "DISK_FREE") where name == "disk_free" - set(unit, "unit") where name == "cpu_usage_idle" - set(name, "CPU_USAGE_IDLE") where name == "cpu_usage_idle" - set(unit, "unit") where name == "cpu_usage_nice" + - set(unit, "unit") where name == "disk_free" + - set(name, "DISK_FREE") where name == "disk_free" trace_statements: [] transform/jmx/0: error_mode: propagate @@ -212,9 +213,9 @@ processors: metric_statements: - context: metric statements: + - set(name, "kafka.fetch-rate") where name == "kafka.consumer.fetch-rate" - set(unit, "unit") where name == "jvm.memory.heap.used" - set(name, "JVM_MEM_HEAP_USED") where name == "jvm.memory.heap.used" - - set(name, "kafka.fetch-rate") where name == "kafka.consumer.fetch-rate" trace_statements: [] transform/jmx/1: error_mode: propagate @@ -304,11 +305,15 @@ receivers: write_buffer_size: 0 http: endpoint: 0.0.0.0:2222 + idle_timeout: 0s include_metadata: false logs_url_path: /v1/logs max_request_body_size: 0 metrics_url_path: /v1/metrics + read_header_timeout: 0s + read_timeout: 0s traces_url_path: /v1/traces + write_timeout: 0s telegraf_cpu: collection_interval: 10s initial_delay: 1s @@ -392,23 +397,23 @@ service: - transform - awsentity/resource receivers: - - telegraf_swap - - telegraf_cpu - telegraf_netstat - - telegraf_procstat/1917393364 - - telegraf_mem + - telegraf_swap - telegraf_disk + - telegraf_mem + - telegraf_cpu - telegraf_processes + - telegraf_procstat/1917393364 metrics/hostCustomMetrics/cloudwatch: exporters: - awscloudwatch processors: - - awsentity/service/telegraf - ec2tagger - transform + - awsentity/service/telegraf receivers: - - telegraf_statsd - telegraf_socket_listener + - telegraf_statsd metrics/hostDeltaMetrics/cloudwatch: exporters: - awscloudwatch @@ -418,8 +423,8 @@ service: - transform - awsentity/resource receivers: - - telegraf_net - telegraf_diskio + - telegraf_net metrics/jmx/cloudwatch/0: exporters: - awscloudwatch @@ -467,4 +472,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/complete_windows_config.yaml b/translator/tocwconfig/sampleConfig/complete_windows_config.yaml index d0274a1117..12cb50e766 100644 --- a/translator/tocwconfig/sampleConfig/complete_windows_config.yaml +++ b/translator/tocwconfig/sampleConfig/complete_windows_config.yaml @@ -129,7 +129,8 @@ processors: - InstanceType imds_retries: 1 middleware: agenthealth/statuscode - refresh_interval_seconds: 0s + refresh_tags_interval: 0s + refresh_volumes_interval: 0s transform: error_mode: propagate flatten_data: false @@ -186,11 +187,15 @@ receivers: write_buffer_size: 0 http: endpoint: 0.0.0.0:2222 + idle_timeout: 0s include_metadata: false logs_url_path: /v1/logs max_request_body_size: 0 metrics_url_path: /v1/metrics + read_header_timeout: 0s + read_timeout: 0s traces_url_path: /v1/traces + write_timeout: 0s telegraf_nvidia_smi: collection_interval: 1m0s initial_delay: 1s @@ -314,4 +319,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/config_with_env.yaml b/translator/tocwconfig/sampleConfig/config_with_env.yaml index 40307cc324..82858df377 100644 --- a/translator/tocwconfig/sampleConfig/config_with_env.yaml +++ b/translator/tocwconfig/sampleConfig/config_with_env.yaml @@ -113,4 +113,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/container_insights_jmx.yaml b/translator/tocwconfig/sampleConfig/container_insights_jmx.yaml index 0fa09f48e6..4d90f27f01 100644 --- a/translator/tocwconfig/sampleConfig/container_insights_jmx.yaml +++ b/translator/tocwconfig/sampleConfig/container_insights_jmx.yaml @@ -205,6 +205,20 @@ processors: match_type: "" initial_value: 2 max_staleness: 0s + filter/containerinsights: + error_mode: propagate + logs: {} + metrics: + exclude: + match_type: strict + metric_names: + - up + - scrape_duration_seconds + - scrape_samples_scraped + - scrape_series_added + - scrape_samples_post_metric_relabeling + spans: {} + traces: {} filter/containerinsightsjmx: error_mode: propagate logs: {} @@ -517,11 +531,15 @@ receivers: protocols: http: endpoint: 0.0.0.0:4314 + idle_timeout: 0s include_metadata: false logs_url_path: /v1/logs max_request_body_size: 0 metrics_url_path: /v1/metrics + read_header_timeout: 0s + read_timeout: 0s traces_url_path: /v1/traces + write_timeout: 0s service: extensions: - agenthealth/logs @@ -533,6 +551,7 @@ service: - awsemf/containerinsights processors: - batch/containerinsights + - filter/containerinsights receivers: - awscontainerinsightreceiver metrics/containerinsightsjmx: @@ -561,4 +580,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/delta_config_linux.yaml b/translator/tocwconfig/sampleConfig/delta_config_linux.yaml index d7125d06cd..9e7d5f23b4 100644 --- a/translator/tocwconfig/sampleConfig/delta_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/delta_config_linux.yaml @@ -51,7 +51,8 @@ processors: - InstanceType imds_retries: 1 middleware: agenthealth/statuscode - refresh_interval_seconds: 0s + refresh_tags_interval: 0s + refresh_volumes_interval: 0s transform: error_mode: propagate flatten_data: false @@ -104,4 +105,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/delta_net_config_linux.yaml b/translator/tocwconfig/sampleConfig/delta_net_config_linux.yaml index 93a44eb467..accedd3a4b 100644 --- a/translator/tocwconfig/sampleConfig/delta_net_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/delta_net_config_linux.yaml @@ -43,12 +43,13 @@ processors: ec2_instance_tag_keys: - AutoScalingGroupName ec2_metadata_tags: - - ImageId - InstanceId - InstanceType + - ImageId imds_retries: 1 middleware: agenthealth/statuscode - refresh_interval_seconds: 0s + refresh_tags_interval: 0s + refresh_volumes_interval: 0s receivers: telegraf_net: collection_interval: 1m0s @@ -86,4 +87,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/drop_origin_linux.yaml b/translator/tocwconfig/sampleConfig/drop_origin_linux.yaml index 24d603d40b..41aa4c36a6 100644 --- a/translator/tocwconfig/sampleConfig/drop_origin_linux.yaml +++ b/translator/tocwconfig/sampleConfig/drop_origin_linux.yaml @@ -46,7 +46,8 @@ processors: - InstanceType imds_retries: 1 middleware: agenthealth/statuscode - refresh_interval_seconds: 0s + refresh_tags_interval: 0s + refresh_volumes_interval: 0s transform: error_mode: propagate flatten_data: false @@ -105,4 +106,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/emf_and_kubernetes_config.yaml b/translator/tocwconfig/sampleConfig/emf_and_kubernetes_config.yaml index 8a0d8fc3e2..9735dd2efd 100644 --- a/translator/tocwconfig/sampleConfig/emf_and_kubernetes_config.yaml +++ b/translator/tocwconfig/sampleConfig/emf_and_kubernetes_config.yaml @@ -421,6 +421,20 @@ processors: send_batch_max_size: 0 send_batch_size: 8192 timeout: 5s + filter/containerinsights: + error_mode: propagate + logs: {} + metrics: + exclude: + match_type: strict + metric_names: + - up + - scrape_duration_seconds + - scrape_samples_scraped + - scrape_series_added + - scrape_samples_post_metric_relabeling + spans: {} + traces: {} metricstransform/containerinsights: transforms: - action: insert @@ -508,6 +522,7 @@ service: - awsemf/containerinsights processors: - batch/containerinsights + - filter/containerinsights - metricstransform/containerinsights receivers: - awscontainerinsightreceiver @@ -526,4 +541,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/emf_and_kubernetes_with_gpu_config.yaml b/translator/tocwconfig/sampleConfig/emf_and_kubernetes_with_gpu_config.yaml index d70f0580c3..16d79a9587 100644 --- a/translator/tocwconfig/sampleConfig/emf_and_kubernetes_with_gpu_config.yaml +++ b/translator/tocwconfig/sampleConfig/emf_and_kubernetes_with_gpu_config.yaml @@ -679,6 +679,20 @@ processors: send_batch_max_size: 0 send_batch_size: 8192 timeout: 5s + filter/containerinsights: + error_mode: propagate + logs: {} + metrics: + exclude: + match_type: strict + metric_names: + - up + - scrape_duration_seconds + - scrape_samples_scraped + - scrape_series_added + - scrape_samples_post_metric_relabeling + spans: {} + traces: {} gpuattributes/containerinsights: {} metricstransform/containerinsights: transforms: @@ -692,9 +706,9 @@ processors: submatch_case: "" - action: insert aggregation_type: "" - include: DCGM_FI_DEV_POWER_USAGE + include: DCGM_FI_DEV_FB_USED match_type: "" - new_name: container_gpu_power_draw + new_name: container_gpu_memory_used operations: - action: add_label aggregation_type: "" @@ -703,12 +717,19 @@ processors: label_value: "" new_label: Type new_value: ContainerGPU + - action: experimental_scale_value + aggregation_type: "" + experimental_scale: 1.048576e+06 + label: "" + label_value: "" + new_label: "" + new_value: "" submatch_case: "" - action: insert aggregation_type: "" - include: DCGM_FI_DEV_POWER_USAGE + include: DCGM_FI_DEV_FB_USED match_type: "" - new_name: pod_gpu_power_draw + new_name: pod_gpu_memory_used operations: - action: add_label aggregation_type: "" @@ -717,12 +738,19 @@ processors: label_value: "" new_label: Type new_value: PodGPU + - action: experimental_scale_value + aggregation_type: "" + experimental_scale: 1.048576e+06 + label: "" + label_value: "" + new_label: "" + new_value: "" submatch_case: "" - action: insert aggregation_type: "" - include: DCGM_FI_DEV_POWER_USAGE + include: DCGM_FI_DEV_FB_USED match_type: "" - new_name: node_gpu_power_draw + new_name: node_gpu_memory_used operations: - action: add_label aggregation_type: "" @@ -731,12 +759,19 @@ processors: label_value: "" new_label: Type new_value: NodeGPU + - action: experimental_scale_value + aggregation_type: "" + experimental_scale: 1.048576e+06 + label: "" + label_value: "" + new_label: "" + new_value: "" submatch_case: "" - action: insert aggregation_type: "" - include: DCGM_FI_DEV_GPU_UTIL + include: DCGM_FI_DEV_FB_TOTAL match_type: "" - new_name: container_gpu_utilization + new_name: container_gpu_memory_total operations: - action: add_label aggregation_type: "" @@ -745,12 +780,19 @@ processors: label_value: "" new_label: Type new_value: ContainerGPU + - action: experimental_scale_value + aggregation_type: "" + experimental_scale: 1.048576e+06 + label: "" + label_value: "" + new_label: "" + new_value: "" submatch_case: "" - action: insert aggregation_type: "" - include: DCGM_FI_DEV_GPU_UTIL + include: DCGM_FI_DEV_FB_TOTAL match_type: "" - new_name: pod_gpu_utilization + new_name: pod_gpu_memory_total operations: - action: add_label aggregation_type: "" @@ -759,12 +801,19 @@ processors: label_value: "" new_label: Type new_value: PodGPU + - action: experimental_scale_value + aggregation_type: "" + experimental_scale: 1.048576e+06 + label: "" + label_value: "" + new_label: "" + new_value: "" submatch_case: "" - action: insert aggregation_type: "" - include: DCGM_FI_DEV_GPU_UTIL + include: DCGM_FI_DEV_FB_TOTAL match_type: "" - new_name: node_gpu_utilization + new_name: node_gpu_memory_total operations: - action: add_label aggregation_type: "" @@ -773,12 +822,19 @@ processors: label_value: "" new_label: Type new_value: NodeGPU + - action: experimental_scale_value + aggregation_type: "" + experimental_scale: 1.048576e+06 + label: "" + label_value: "" + new_label: "" + new_value: "" submatch_case: "" - action: insert aggregation_type: "" - include: DCGM_FI_DEV_FB_USED_PERCENT + include: DCGM_FI_DEV_GPU_TEMP match_type: "" - new_name: container_gpu_memory_utilization + new_name: container_gpu_temperature operations: - action: add_label aggregation_type: "" @@ -787,19 +843,12 @@ processors: label_value: "" new_label: Type new_value: ContainerGPU - - action: experimental_scale_value - aggregation_type: "" - experimental_scale: 100 - label: "" - label_value: "" - new_label: "" - new_value: "" submatch_case: "" - action: insert aggregation_type: "" - include: DCGM_FI_DEV_FB_USED_PERCENT + include: DCGM_FI_DEV_GPU_TEMP match_type: "" - new_name: pod_gpu_memory_utilization + new_name: pod_gpu_temperature operations: - action: add_label aggregation_type: "" @@ -808,19 +857,12 @@ processors: label_value: "" new_label: Type new_value: PodGPU - - action: experimental_scale_value - aggregation_type: "" - experimental_scale: 100 - label: "" - label_value: "" - new_label: "" - new_value: "" submatch_case: "" - action: insert aggregation_type: "" - include: DCGM_FI_DEV_FB_USED_PERCENT + include: DCGM_FI_DEV_GPU_TEMP match_type: "" - new_name: node_gpu_memory_utilization + new_name: node_gpu_temperature operations: - action: add_label aggregation_type: "" @@ -829,19 +871,12 @@ processors: label_value: "" new_label: Type new_value: NodeGPU - - action: experimental_scale_value - aggregation_type: "" - experimental_scale: 100 - label: "" - label_value: "" - new_label: "" - new_value: "" submatch_case: "" - action: insert aggregation_type: "" - include: DCGM_FI_DEV_FB_USED + include: DCGM_FI_DEV_POWER_USAGE match_type: "" - new_name: container_gpu_memory_used + new_name: container_gpu_power_draw operations: - action: add_label aggregation_type: "" @@ -850,19 +885,12 @@ processors: label_value: "" new_label: Type new_value: ContainerGPU - - action: experimental_scale_value - aggregation_type: "" - experimental_scale: 1.048576e+06 - label: "" - label_value: "" - new_label: "" - new_value: "" submatch_case: "" - action: insert aggregation_type: "" - include: DCGM_FI_DEV_FB_USED + include: DCGM_FI_DEV_POWER_USAGE match_type: "" - new_name: pod_gpu_memory_used + new_name: pod_gpu_power_draw operations: - action: add_label aggregation_type: "" @@ -871,19 +899,12 @@ processors: label_value: "" new_label: Type new_value: PodGPU - - action: experimental_scale_value - aggregation_type: "" - experimental_scale: 1.048576e+06 - label: "" - label_value: "" - new_label: "" - new_value: "" submatch_case: "" - action: insert aggregation_type: "" - include: DCGM_FI_DEV_FB_USED + include: DCGM_FI_DEV_POWER_USAGE match_type: "" - new_name: node_gpu_memory_used + new_name: node_gpu_power_draw operations: - action: add_label aggregation_type: "" @@ -892,19 +913,12 @@ processors: label_value: "" new_label: Type new_value: NodeGPU - - action: experimental_scale_value - aggregation_type: "" - experimental_scale: 1.048576e+06 - label: "" - label_value: "" - new_label: "" - new_value: "" submatch_case: "" - action: insert aggregation_type: "" - include: DCGM_FI_DEV_FB_TOTAL + include: DCGM_FI_DEV_GPU_UTIL match_type: "" - new_name: container_gpu_memory_total + new_name: container_gpu_utilization operations: - action: add_label aggregation_type: "" @@ -913,19 +927,12 @@ processors: label_value: "" new_label: Type new_value: ContainerGPU - - action: experimental_scale_value - aggregation_type: "" - experimental_scale: 1.048576e+06 - label: "" - label_value: "" - new_label: "" - new_value: "" submatch_case: "" - action: insert aggregation_type: "" - include: DCGM_FI_DEV_FB_TOTAL + include: DCGM_FI_DEV_GPU_UTIL match_type: "" - new_name: pod_gpu_memory_total + new_name: pod_gpu_utilization operations: - action: add_label aggregation_type: "" @@ -934,19 +941,12 @@ processors: label_value: "" new_label: Type new_value: PodGPU - - action: experimental_scale_value - aggregation_type: "" - experimental_scale: 1.048576e+06 - label: "" - label_value: "" - new_label: "" - new_value: "" submatch_case: "" - action: insert aggregation_type: "" - include: DCGM_FI_DEV_FB_TOTAL + include: DCGM_FI_DEV_GPU_UTIL match_type: "" - new_name: node_gpu_memory_total + new_name: node_gpu_utilization operations: - action: add_label aggregation_type: "" @@ -955,19 +955,12 @@ processors: label_value: "" new_label: Type new_value: NodeGPU - - action: experimental_scale_value - aggregation_type: "" - experimental_scale: 1.048576e+06 - label: "" - label_value: "" - new_label: "" - new_value: "" submatch_case: "" - action: insert aggregation_type: "" - include: DCGM_FI_DEV_GPU_TEMP + include: DCGM_FI_DEV_FB_USED_PERCENT match_type: "" - new_name: container_gpu_temperature + new_name: container_gpu_memory_utilization operations: - action: add_label aggregation_type: "" @@ -976,12 +969,19 @@ processors: label_value: "" new_label: Type new_value: ContainerGPU + - action: experimental_scale_value + aggregation_type: "" + experimental_scale: 100 + label: "" + label_value: "" + new_label: "" + new_value: "" submatch_case: "" - action: insert aggregation_type: "" - include: DCGM_FI_DEV_GPU_TEMP + include: DCGM_FI_DEV_FB_USED_PERCENT match_type: "" - new_name: pod_gpu_temperature + new_name: pod_gpu_memory_utilization operations: - action: add_label aggregation_type: "" @@ -990,12 +990,19 @@ processors: label_value: "" new_label: Type new_value: PodGPU + - action: experimental_scale_value + aggregation_type: "" + experimental_scale: 100 + label: "" + label_value: "" + new_label: "" + new_value: "" submatch_case: "" - action: insert aggregation_type: "" - include: DCGM_FI_DEV_GPU_TEMP + include: DCGM_FI_DEV_FB_USED_PERCENT match_type: "" - new_name: node_gpu_temperature + new_name: node_gpu_memory_utilization operations: - action: add_label aggregation_type: "" @@ -1004,34 +1011,48 @@ processors: label_value: "" new_label: Type new_value: NodeGPU + - action: experimental_scale_value + aggregation_type: "" + experimental_scale: 100 + label: "" + label_value: "" + new_label: "" + new_value: "" submatch_case: "" - action: update aggregation_type: "" - include: execution_errors_total + include: neuroncore_memory_usage_model_shared_scratchpad match_type: "" - new_name: neuron_execution_errors + new_name: neuroncore_memory_usage_model_shared_scratchpad operations: [] submatch_case: "" - action: update aggregation_type: "" - include: neuroncore_memory_usage_model_code + include: neuroncore_memory_usage_runtime_memory match_type: "" - new_name: neuroncore_memory_usage_model_code + new_name: neuroncore_memory_usage_runtime_memory operations: [] submatch_case: "" - action: update aggregation_type: "" - include: neuroncore_memory_usage_runtime_memory + include: neuroncore_memory_usage_tensors match_type: "" - new_name: neuroncore_memory_usage_runtime_memory + new_name: neuroncore_memory_usage_tensors operations: [] submatch_case: "" - action: update aggregation_type: "" - include: instance_info + include: neuroncore_utilization_ratio match_type: "" - new_name: instance_info - operations: [] + new_name: neuroncore_utilization + operations: + - action: experimental_scale_value + aggregation_type: "" + experimental_scale: 100 + label: "" + label_value: "" + new_label: "" + new_value: "" submatch_case: "" - action: update aggregation_type: "" @@ -1042,9 +1063,16 @@ processors: submatch_case: "" - action: update aggregation_type: "" - include: hardware_ecc_events_total + include: execution_latency_seconds match_type: "" - new_name: neurondevice_hw_ecc_events + new_name: neuron_execution_latency + operations: [] + submatch_case: "" + - action: update + aggregation_type: "" + include: execution_errors_total + match_type: "" + new_name: neuron_execution_errors operations: [] submatch_case: "" - action: update @@ -1070,37 +1098,23 @@ processors: submatch_case: "" - action: update aggregation_type: "" - include: neuroncore_memory_usage_model_shared_scratchpad + include: neuroncore_memory_usage_model_code match_type: "" - new_name: neuroncore_memory_usage_model_shared_scratchpad + new_name: neuroncore_memory_usage_model_code operations: [] submatch_case: "" - action: update aggregation_type: "" - include: neuroncore_memory_usage_tensors + include: instance_info match_type: "" - new_name: neuroncore_memory_usage_tensors + new_name: instance_info operations: [] submatch_case: "" - action: update aggregation_type: "" - include: neuroncore_utilization_ratio - match_type: "" - new_name: neuroncore_utilization - operations: - - action: experimental_scale_value - aggregation_type: "" - experimental_scale: 100 - label: "" - label_value: "" - new_label: "" - new_value: "" - submatch_case: "" - - action: update - aggregation_type: "" - include: execution_latency_seconds + include: hardware_ecc_events_total match_type: "" - new_name: neuron_execution_latency + new_name: neurondevice_hw_ecc_events operations: [] submatch_case: "" receivers: @@ -1180,6 +1194,7 @@ service: - awsemf/containerinsights processors: - batch/containerinsights + - filter/containerinsights - metricstransform/containerinsights - gpuattributes/containerinsights receivers: @@ -1199,4 +1214,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/emf_and_kubernetes_with_kueue_config.yaml b/translator/tocwconfig/sampleConfig/emf_and_kubernetes_with_kueue_config.yaml index f59e0a6639..19dfb38a63 100644 --- a/translator/tocwconfig/sampleConfig/emf_and_kubernetes_with_kueue_config.yaml +++ b/translator/tocwconfig/sampleConfig/emf_and_kubernetes_with_kueue_config.yaml @@ -506,6 +506,34 @@ processors: send_batch_max_size: 0 send_batch_size: 8192 timeout: 5s + filter/containerinsights: + error_mode: propagate + logs: {} + metrics: + exclude: + match_type: strict + metric_names: + - up + - scrape_duration_seconds + - scrape_samples_scraped + - scrape_series_added + - scrape_samples_post_metric_relabeling + spans: {} + traces: {} + filter/kueueContainerInsights: + error_mode: propagate + logs: {} + metrics: + exclude: + match_type: strict + metric_names: + - up + - scrape_duration_seconds + - scrape_samples_scraped + - scrape_series_added + - scrape_samples_post_metric_relabeling + spans: {} + traces: {} kueueattributes/kueueContainerInsights: {} metricstransform/containerinsights: transforms: @@ -597,6 +625,7 @@ service: - awsemf/containerinsights processors: - batch/containerinsights + - filter/containerinsights - metricstransform/containerinsights receivers: - awscontainerinsightreceiver @@ -605,6 +634,7 @@ service: - awsemf/kueueContainerInsights processors: - batch/kueueContainerInsights + - filter/kueueContainerInsights - kueueattributes/kueueContainerInsights receivers: - awscontainerinsightskueuereceiver @@ -623,4 +653,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/ignore_append_dimensions.yaml b/translator/tocwconfig/sampleConfig/ignore_append_dimensions.yaml index 4ce3f21361..7f31f7919e 100644 --- a/translator/tocwconfig/sampleConfig/ignore_append_dimensions.yaml +++ b/translator/tocwconfig/sampleConfig/ignore_append_dimensions.yaml @@ -35,7 +35,8 @@ processors: ec2tagger: imds_retries: 1 middleware: agenthealth/statuscode - refresh_interval_seconds: 0s + refresh_tags_interval: 0s + refresh_volumes_interval: 0s receivers: telegraf_disk: collection_interval: 1m0s @@ -77,4 +78,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/invalid_input_linux.yaml b/translator/tocwconfig/sampleConfig/invalid_input_linux.yaml index 4cb6979b8e..8ee36bea83 100644 --- a/translator/tocwconfig/sampleConfig/invalid_input_linux.yaml +++ b/translator/tocwconfig/sampleConfig/invalid_input_linux.yaml @@ -36,12 +36,13 @@ processors: ec2_instance_tag_keys: - AutoScalingGroupName ec2_metadata_tags: - - InstanceType - ImageId - InstanceId + - InstanceType imds_retries: 1 middleware: agenthealth/statuscode - refresh_interval_seconds: 0s + refresh_tags_interval: 0s + refresh_volumes_interval: 0s receivers: telegraf_disk: collection_interval: 1m0s @@ -83,4 +84,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/jmx_config_linux.yaml b/translator/tocwconfig/sampleConfig/jmx_config_linux.yaml index 9198fadc61..d9cfe0785e 100644 --- a/translator/tocwconfig/sampleConfig/jmx_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/jmx_config_linux.yaml @@ -22,7 +22,11 @@ exporters: enabled: false http2_ping_timeout: 0s http2_read_idle_timeout: 0s + idle_conn_timeout: 1m30s max_batch_size_bytes: 3000000 + max_conns_per_host: 0 + max_idle_conns: 100 + max_idle_conns_per_host: 0 namespace: "" proxy_url: "" read_buffer_size: 0 @@ -149,9 +153,9 @@ processors: metric_statements: - context: metric statements: - - set(name, "kafka.fetch-rate") where name == "kafka.consumer.fetch-rate" - set(unit, "unit") where name == "jvm.memory.heap.used" - set(name, "JVM_MEM_HEAP_USED") where name == "jvm.memory.heap.used" + - set(name, "kafka.fetch-rate") where name == "kafka.consumer.fetch-rate" trace_statements: [] receivers: jmx: @@ -190,8 +194,8 @@ service: exporters: - awscloudwatch processors: - - awsentity/resource - transform + - awsentity/resource receivers: - telegraf_cpu - telegraf_disk @@ -232,4 +236,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/jmx_eks_config_linux.yaml b/translator/tocwconfig/sampleConfig/jmx_eks_config_linux.yaml index 2d73ac7668..0fbf745955 100644 --- a/translator/tocwconfig/sampleConfig/jmx_eks_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/jmx_eks_config_linux.yaml @@ -19,7 +19,11 @@ exporters: enabled: false http2_ping_timeout: 0s http2_read_idle_timeout: 0s + idle_conn_timeout: 1m30s max_batch_size_bytes: 3000000 + max_conns_per_host: 0 + max_idle_conns: 100 + max_idle_conns_per_host: 0 namespace: "" proxy_url: "" read_buffer_size: 0 @@ -211,11 +215,15 @@ receivers: protocols: http: endpoint: 0.0.0.0:4314 + idle_timeout: 0s include_metadata: false logs_url_path: /v1/logs max_request_body_size: 0 metrics_url_path: /v1/metrics + read_header_timeout: 0s + read_timeout: 0s traces_url_path: /v1/traces + write_timeout: 0s service: extensions: - agenthealth/metrics @@ -285,4 +293,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/kubernetes_on_prem_config.yaml b/translator/tocwconfig/sampleConfig/kubernetes_on_prem_config.yaml index d8d8dbb5c2..d7ca0d4ab7 100644 --- a/translator/tocwconfig/sampleConfig/kubernetes_on_prem_config.yaml +++ b/translator/tocwconfig/sampleConfig/kubernetes_on_prem_config.yaml @@ -383,6 +383,20 @@ processors: send_batch_max_size: 0 send_batch_size: 8192 timeout: 5s + filter/containerinsights: + error_mode: propagate + logs: {} + metrics: + exclude: + match_type: strict + metric_names: + - up + - scrape_duration_seconds + - scrape_samples_scraped + - scrape_series_added + - scrape_samples_post_metric_relabeling + spans: {} + traces: {} metricstransform/containerinsights: transforms: - action: insert @@ -436,6 +450,7 @@ service: - awsemf/containerinsights processors: - batch/containerinsights + - filter/containerinsights - metricstransform/containerinsights receivers: - awscontainerinsightreceiver @@ -454,4 +469,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/kueue_container_insights_config.yaml b/translator/tocwconfig/sampleConfig/kueue_container_insights_config.yaml index afc2bc77ed..38144eea48 100644 --- a/translator/tocwconfig/sampleConfig/kueue_container_insights_config.yaml +++ b/translator/tocwconfig/sampleConfig/kueue_container_insights_config.yaml @@ -252,6 +252,34 @@ processors: send_batch_max_size: 0 send_batch_size: 8192 timeout: 5s + filter/containerinsights: + error_mode: propagate + logs: {} + metrics: + exclude: + match_type: strict + metric_names: + - up + - scrape_duration_seconds + - scrape_samples_scraped + - scrape_series_added + - scrape_samples_post_metric_relabeling + spans: {} + traces: {} + filter/kueueContainerInsights: + error_mode: propagate + logs: {} + metrics: + exclude: + match_type: strict + metric_names: + - up + - scrape_duration_seconds + - scrape_samples_scraped + - scrape_series_added + - scrape_samples_post_metric_relabeling + spans: {} + traces: {} kueueattributes/kueueContainerInsights: {} receivers: awscontainerinsightreceiver: @@ -331,6 +359,7 @@ service: - awsemf/containerinsights processors: - batch/containerinsights + - filter/containerinsights receivers: - awscontainerinsightreceiver metrics/kueueContainerInsights: @@ -338,6 +367,7 @@ service: - awsemf/kueueContainerInsights processors: - batch/kueueContainerInsights + - filter/kueueContainerInsights - kueueattributes/kueueContainerInsights receivers: - awscontainerinsightskueuereceiver @@ -356,4 +386,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/log_ecs_metric_only.yaml b/translator/tocwconfig/sampleConfig/log_ecs_metric_only.yaml index 1b0e0ee4c3..2e03863be9 100644 --- a/translator/tocwconfig/sampleConfig/log_ecs_metric_only.yaml +++ b/translator/tocwconfig/sampleConfig/log_ecs_metric_only.yaml @@ -115,6 +115,20 @@ processors: send_batch_max_size: 0 send_batch_size: 8192 timeout: 5s + filter/containerinsights: + error_mode: propagate + logs: {} + metrics: + exclude: + match_type: strict + metric_names: + - up + - scrape_duration_seconds + - scrape_samples_scraped + - scrape_series_added + - scrape_samples_post_metric_relabeling + spans: {} + traces: {} receivers: awscontainerinsightreceiver: accelerated_compute_metrics: true @@ -189,6 +203,7 @@ service: - awsemf/containerinsights processors: - batch/containerinsights + - filter/containerinsights receivers: - awscontainerinsightreceiver telemetry: @@ -206,4 +221,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/log_filter.yaml b/translator/tocwconfig/sampleConfig/log_filter.yaml index 6ca631da29..0991434ce2 100644 --- a/translator/tocwconfig/sampleConfig/log_filter.yaml +++ b/translator/tocwconfig/sampleConfig/log_filter.yaml @@ -33,4 +33,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/log_only_config_windows.yaml b/translator/tocwconfig/sampleConfig/log_only_config_windows.yaml index 391143c01f..558843bdd0 100644 --- a/translator/tocwconfig/sampleConfig/log_only_config_windows.yaml +++ b/translator/tocwconfig/sampleConfig/log_only_config_windows.yaml @@ -33,4 +33,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/logs_and_kubernetes_config.yaml b/translator/tocwconfig/sampleConfig/logs_and_kubernetes_config.yaml index 46216e4031..23c0654c26 100644 --- a/translator/tocwconfig/sampleConfig/logs_and_kubernetes_config.yaml +++ b/translator/tocwconfig/sampleConfig/logs_and_kubernetes_config.yaml @@ -415,6 +415,20 @@ processors: send_batch_max_size: 0 send_batch_size: 8192 timeout: 5s + filter/containerinsights: + error_mode: propagate + logs: {} + metrics: + exclude: + match_type: strict + metric_names: + - up + - scrape_duration_seconds + - scrape_samples_scraped + - scrape_series_added + - scrape_samples_post_metric_relabeling + spans: {} + traces: {} metricstransform/containerinsights: transforms: - action: insert @@ -500,6 +514,7 @@ service: - awsemf/containerinsights processors: - batch/containerinsights + - filter/containerinsights - metricstransform/containerinsights receivers: - awscontainerinsightreceiver @@ -518,4 +533,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/no_skip_log_timestamp.yaml b/translator/tocwconfig/sampleConfig/no_skip_log_timestamp.yaml index 11a14c9ad5..9ba5b0ef7a 100644 --- a/translator/tocwconfig/sampleConfig/no_skip_log_timestamp.yaml +++ b/translator/tocwconfig/sampleConfig/no_skip_log_timestamp.yaml @@ -33,4 +33,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/no_skip_log_timestamp_windows.yaml b/translator/tocwconfig/sampleConfig/no_skip_log_timestamp_windows.yaml index 391143c01f..558843bdd0 100644 --- a/translator/tocwconfig/sampleConfig/no_skip_log_timestamp_windows.yaml +++ b/translator/tocwconfig/sampleConfig/no_skip_log_timestamp_windows.yaml @@ -33,4 +33,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/otlp_metrics_cloudwatchlogs_config.yaml b/translator/tocwconfig/sampleConfig/otlp_metrics_cloudwatchlogs_config.yaml index f9649de2e4..87d31a9658 100644 --- a/translator/tocwconfig/sampleConfig/otlp_metrics_cloudwatchlogs_config.yaml +++ b/translator/tocwconfig/sampleConfig/otlp_metrics_cloudwatchlogs_config.yaml @@ -85,10 +85,13 @@ receivers: write_buffer_size: 0 http: endpoint: 0.0.0.0:2345 + idle_timeout: 0s include_metadata: false logs_url_path: /v1/logs max_request_body_size: 0 metrics_url_path: /v1/metrics + read_header_timeout: 0s + read_timeout: 0s tls: ca_file: "" cert_file: /path/to/cert.pem @@ -100,6 +103,7 @@ receivers: min_version: "" reload_interval: 0s traces_url_path: /v1/traces + write_timeout: 0s service: extensions: - agenthealth/logs @@ -131,4 +135,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/otlp_metrics_cloudwatchlogs_eks_config.yaml b/translator/tocwconfig/sampleConfig/otlp_metrics_cloudwatchlogs_eks_config.yaml index 5534b3815e..3910d11190 100644 --- a/translator/tocwconfig/sampleConfig/otlp_metrics_cloudwatchlogs_eks_config.yaml +++ b/translator/tocwconfig/sampleConfig/otlp_metrics_cloudwatchlogs_eks_config.yaml @@ -48,6 +48,7 @@ extensions: kubernetes_mode: K8sEC2 mode: ec2 region: us-west-2 + k8smetadata: {} server: listen_addr: :4311 tls_ca_path: /etc/amazon-cloudwatch-observability-agent-client-cert/tls-ca.crt @@ -95,10 +96,13 @@ receivers: write_buffer_size: 0 http: endpoint: 0.0.0.0:2345 + idle_timeout: 0s include_metadata: false logs_url_path: /v1/logs max_request_body_size: 0 metrics_url_path: /v1/metrics + read_header_timeout: 0s + read_timeout: 0s tls: ca_file: "" cert_file: /path/to/cert.pem @@ -110,8 +114,10 @@ receivers: min_version: "" reload_interval: 0s traces_url_path: /v1/traces + write_timeout: 0s service: extensions: + - k8smetadata - agenthealth/logs - agenthealth/statuscode - entitystore @@ -121,8 +127,8 @@ service: exporters: - awsemf processors: - - awsentity/service/otlp - cumulativetodelta/hostOtlpMetrics/cloudwatchlogs + - awsentity/service/otlp - batch/hostOtlpMetrics/cloudwatchlogs receivers: - otlp/metrics @@ -141,4 +147,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/otlp_metrics_config.yaml b/translator/tocwconfig/sampleConfig/otlp_metrics_config.yaml index 4f5db5136a..aa38947620 100644 --- a/translator/tocwconfig/sampleConfig/otlp_metrics_config.yaml +++ b/translator/tocwconfig/sampleConfig/otlp_metrics_config.yaml @@ -44,7 +44,8 @@ processors: - InstanceType imds_retries: 1 middleware: agenthealth/statuscode - refresh_interval_seconds: 0s + refresh_tags_interval: 0s + refresh_volumes_interval: 0s receivers: otlp/metrics: protocols: @@ -70,10 +71,13 @@ receivers: write_buffer_size: 0 http: endpoint: 0.0.0.0:2345 + idle_timeout: 0s include_metadata: false logs_url_path: /v1/logs max_request_body_size: 0 metrics_url_path: /v1/metrics + read_header_timeout: 0s + read_timeout: 0s tls: ca_file: "" cert_file: /path/to/cert.pem @@ -85,6 +89,7 @@ receivers: min_version: "" reload_interval: 0s traces_url_path: /v1/traces + write_timeout: 0s service: extensions: - agenthealth/metrics @@ -116,4 +121,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/otlp_metrics_eks_config.yaml b/translator/tocwconfig/sampleConfig/otlp_metrics_eks_config.yaml index a55f30cb6d..366ff4ada7 100644 --- a/translator/tocwconfig/sampleConfig/otlp_metrics_eks_config.yaml +++ b/translator/tocwconfig/sampleConfig/otlp_metrics_eks_config.yaml @@ -28,6 +28,7 @@ extensions: kubernetes_mode: K8sEC2 mode: ec2 region: us-west-2 + k8smetadata: {} server: listen_addr: :4311 tls_ca_path: /etc/amazon-cloudwatch-observability-agent-client-cert/tls-ca.crt @@ -54,7 +55,8 @@ processors: - InstanceType imds_retries: 1 middleware: agenthealth/statuscode - refresh_interval_seconds: 0s + refresh_tags_interval: 0s + refresh_volumes_interval: 0s receivers: otlp/metrics: protocols: @@ -80,10 +82,13 @@ receivers: write_buffer_size: 0 http: endpoint: 0.0.0.0:2345 + idle_timeout: 0s include_metadata: false logs_url_path: /v1/logs max_request_body_size: 0 metrics_url_path: /v1/metrics + read_header_timeout: 0s + read_timeout: 0s tls: ca_file: "" cert_file: /path/to/cert.pem @@ -95,8 +100,10 @@ receivers: min_version: "" reload_interval: 0s traces_url_path: /v1/traces + write_timeout: 0s service: extensions: + - k8smetadata - agenthealth/metrics - agenthealth/statuscode - entitystore @@ -106,9 +113,9 @@ service: exporters: - awscloudwatch processors: - - awsentity/service/otlp - cumulativetodelta/hostOtlpMetrics - ec2tagger + - awsentity/service/otlp receivers: - otlp/metrics telemetry: @@ -126,4 +133,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/procstat_memory_swap_config.yaml b/translator/tocwconfig/sampleConfig/procstat_memory_swap_config.yaml index 4df9b2e012..79441f6e4e 100644 --- a/translator/tocwconfig/sampleConfig/procstat_memory_swap_config.yaml +++ b/translator/tocwconfig/sampleConfig/procstat_memory_swap_config.yaml @@ -66,4 +66,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/prometheus_combined_config_linux.yaml b/translator/tocwconfig/sampleConfig/prometheus_combined_config_linux.yaml index 56d9630267..0e1772467e 100644 --- a/translator/tocwconfig/sampleConfig/prometheus_combined_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/prometheus_combined_config_linux.yaml @@ -79,7 +79,11 @@ exporters: enabled: false http2_ping_timeout: 0s http2_read_idle_timeout: 0s + idle_conn_timeout: 1m30s max_batch_size_bytes: 3000000 + max_conns_per_host: 0 + max_idle_conns: 100 + max_idle_conns_per_host: 0 namespace: "" proxy_url: "" read_buffer_size: 0 @@ -224,4 +228,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/prometheus_config_linux.yaml b/translator/tocwconfig/sampleConfig/prometheus_config_linux.yaml index 2a6bd82212..a5dbfb6720 100644 --- a/translator/tocwconfig/sampleConfig/prometheus_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/prometheus_config_linux.yaml @@ -126,4 +126,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/prometheus_config_windows.yaml b/translator/tocwconfig/sampleConfig/prometheus_config_windows.yaml index 45211718b3..5e1a000526 100644 --- a/translator/tocwconfig/sampleConfig/prometheus_config_windows.yaml +++ b/translator/tocwconfig/sampleConfig/prometheus_config_windows.yaml @@ -108,4 +108,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/prometheus_otel_config_linux.yaml b/translator/tocwconfig/sampleConfig/prometheus_otel_config_linux.yaml index 4707009580..c39ceb9b77 100644 --- a/translator/tocwconfig/sampleConfig/prometheus_otel_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/prometheus_otel_config_linux.yaml @@ -10,7 +10,11 @@ exporters: enabled: false http2_ping_timeout: 0s http2_read_idle_timeout: 0s + idle_conn_timeout: 1m30s max_batch_size_bytes: 3000000 + max_conns_per_host: 0 + max_idle_conns: 100 + max_idle_conns_per_host: 0 namespace: "" proxy_url: "" read_buffer_size: 0 @@ -120,4 +124,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/skip_log_timestamp.yaml b/translator/tocwconfig/sampleConfig/skip_log_timestamp.yaml index e417a5320a..dc09e627d0 100644 --- a/translator/tocwconfig/sampleConfig/skip_log_timestamp.yaml +++ b/translator/tocwconfig/sampleConfig/skip_log_timestamp.yaml @@ -33,4 +33,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/skip_log_timestamp_default.yaml b/translator/tocwconfig/sampleConfig/skip_log_timestamp_default.yaml index 11a14c9ad5..9ba5b0ef7a 100644 --- a/translator/tocwconfig/sampleConfig/skip_log_timestamp_default.yaml +++ b/translator/tocwconfig/sampleConfig/skip_log_timestamp_default.yaml @@ -33,4 +33,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/skip_log_timestamp_default_windows.yaml b/translator/tocwconfig/sampleConfig/skip_log_timestamp_default_windows.yaml index 391143c01f..558843bdd0 100644 --- a/translator/tocwconfig/sampleConfig/skip_log_timestamp_default_windows.yaml +++ b/translator/tocwconfig/sampleConfig/skip_log_timestamp_default_windows.yaml @@ -33,4 +33,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/skip_log_timestamp_windows.yaml b/translator/tocwconfig/sampleConfig/skip_log_timestamp_windows.yaml index d29a89a498..9d3de0849a 100644 --- a/translator/tocwconfig/sampleConfig/skip_log_timestamp_windows.yaml +++ b/translator/tocwconfig/sampleConfig/skip_log_timestamp_windows.yaml @@ -33,4 +33,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/standard_config_linux.yaml b/translator/tocwconfig/sampleConfig/standard_config_linux.yaml index cae3e6f3ed..aa33959532 100644 --- a/translator/tocwconfig/sampleConfig/standard_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/standard_config_linux.yaml @@ -50,7 +50,8 @@ processors: - InstanceId - InstanceType middleware: agenthealth/statuscode - refresh_interval_seconds: 0s + refresh_tags_interval: 0s + refresh_volumes_interval: 0s receivers: telegraf_cpu: collection_interval: 1m0s @@ -85,10 +86,10 @@ service: - ec2tagger - awsentity/resource receivers: - - telegraf_mem - telegraf_swap - telegraf_cpu - telegraf_disk + - telegraf_mem metrics/hostDeltaMetrics: exporters: - awscloudwatch @@ -115,4 +116,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/standard_config_linux_with_common_config.yaml b/translator/tocwconfig/sampleConfig/standard_config_linux_with_common_config.yaml index 8da70feec4..d498996f4f 100644 --- a/translator/tocwconfig/sampleConfig/standard_config_linux_with_common_config.yaml +++ b/translator/tocwconfig/sampleConfig/standard_config_linux_with_common_config.yaml @@ -50,13 +50,14 @@ processors: ec2_instance_tag_keys: - AutoScalingGroupName ec2_metadata_tags: + - InstanceType - ImageId - InstanceId - - InstanceType imds_retries: 2 middleware: agenthealth/statuscode profile: AmazonCloudWatchAgent - refresh_interval_seconds: 0s + refresh_tags_interval: 0s + refresh_volumes_interval: 0s shared_credential_file: fake-path receivers: telegraf_cpu: @@ -92,10 +93,10 @@ service: - ec2tagger - awsentity/resource receivers: + - telegraf_swap - telegraf_cpu - telegraf_disk - telegraf_mem - - telegraf_swap metrics/hostDeltaMetrics: exporters: - awscloudwatch @@ -122,4 +123,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/standard_config_windows.yaml b/translator/tocwconfig/sampleConfig/standard_config_windows.yaml index c065028b6d..96d6c2409c 100644 --- a/translator/tocwconfig/sampleConfig/standard_config_windows.yaml +++ b/translator/tocwconfig/sampleConfig/standard_config_windows.yaml @@ -40,7 +40,8 @@ processors: - InstanceId - InstanceType middleware: agenthealth/statuscode - refresh_interval_seconds: 0s + refresh_tags_interval: 0s + refresh_volumes_interval: 0s receivers: telegraf_win_perf_counters/1492679118: alias_name: Memory @@ -80,11 +81,11 @@ service: - ec2tagger - awsentity/resource receivers: + - telegraf_win_perf_counters/3610923661 + - telegraf_win_perf_counters/3446270237 - telegraf_win_perf_counters/3762679655 - telegraf_win_perf_counters/4283769065 - telegraf_win_perf_counters/1492679118 - - telegraf_win_perf_counters/3610923661 - - telegraf_win_perf_counters/3446270237 telemetry: logs: development: false @@ -102,4 +103,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/standard_config_windows_with_common_config.yaml b/translator/tocwconfig/sampleConfig/standard_config_windows_with_common_config.yaml index 520722cc40..0417a1f8e6 100644 --- a/translator/tocwconfig/sampleConfig/standard_config_windows_with_common_config.yaml +++ b/translator/tocwconfig/sampleConfig/standard_config_windows_with_common_config.yaml @@ -40,13 +40,14 @@ processors: ec2_instance_tag_keys: - AutoScalingGroupName ec2_metadata_tags: - - InstanceType - ImageId - InstanceId + - InstanceType imds_retries: 2 middleware: agenthealth/statuscode profile: AmazonCloudWatchAgent - refresh_interval_seconds: 0s + refresh_tags_interval: 0s + refresh_volumes_interval: 0s shared_credential_file: fake-path receivers: telegraf_win_perf_counters/1492679118: @@ -87,11 +88,11 @@ service: - ec2tagger - awsentity/resource receivers: - - telegraf_win_perf_counters/3762679655 - telegraf_win_perf_counters/4283769065 - telegraf_win_perf_counters/1492679118 - telegraf_win_perf_counters/3610923661 - telegraf_win_perf_counters/3446270237 + - telegraf_win_perf_counters/3762679655 telemetry: logs: development: false @@ -109,4 +110,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/statsd_config_linux.yaml b/translator/tocwconfig/sampleConfig/statsd_config_linux.yaml index 0457d568e3..c780960680 100644 --- a/translator/tocwconfig/sampleConfig/statsd_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/statsd_config_linux.yaml @@ -67,4 +67,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/statsd_config_windows.yaml b/translator/tocwconfig/sampleConfig/statsd_config_windows.yaml index 1183351c3a..8013179877 100644 --- a/translator/tocwconfig/sampleConfig/statsd_config_windows.yaml +++ b/translator/tocwconfig/sampleConfig/statsd_config_windows.yaml @@ -67,4 +67,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/statsd_ecs_config.yaml b/translator/tocwconfig/sampleConfig/statsd_ecs_config.yaml index f4efbd3b82..c46dbdf101 100644 --- a/translator/tocwconfig/sampleConfig/statsd_ecs_config.yaml +++ b/translator/tocwconfig/sampleConfig/statsd_ecs_config.yaml @@ -55,4 +55,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/statsd_eks_config.yaml b/translator/tocwconfig/sampleConfig/statsd_eks_config.yaml index d8dc31630e..7c6ef6bb84 100644 --- a/translator/tocwconfig/sampleConfig/statsd_eks_config.yaml +++ b/translator/tocwconfig/sampleConfig/statsd_eks_config.yaml @@ -66,4 +66,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/trace_config_linux.yaml b/translator/tocwconfig/sampleConfig/trace_config_linux.yaml index b90c7dbdda..3262c61186 100644 --- a/translator/tocwconfig/sampleConfig/trace_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/trace_config_linux.yaml @@ -83,11 +83,15 @@ receivers: write_buffer_size: 0 http: endpoint: 127.0.0.1:4318 + idle_timeout: 0s include_metadata: false logs_url_path: /v1/logs max_request_body_size: 0 metrics_url_path: /v1/metrics + read_header_timeout: 0s + read_timeout: 0s traces_url_path: /v1/traces + write_timeout: 0s service: extensions: - agenthealth/traces @@ -119,4 +123,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/trace_config_windows.yaml b/translator/tocwconfig/sampleConfig/trace_config_windows.yaml index b3cac668b7..2748e10048 100644 --- a/translator/tocwconfig/sampleConfig/trace_config_windows.yaml +++ b/translator/tocwconfig/sampleConfig/trace_config_windows.yaml @@ -83,11 +83,15 @@ receivers: write_buffer_size: 0 http: endpoint: 127.0.0.1:4318 + idle_timeout: 0s include_metadata: false logs_url_path: /v1/logs max_request_body_size: 0 metrics_url_path: /v1/metrics + read_header_timeout: 0s + read_timeout: 0s traces_url_path: /v1/traces + write_timeout: 0s service: extensions: - agenthealth/traces @@ -119,4 +123,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/sampleConfig/windows_eventlog_only_config.yaml b/translator/tocwconfig/sampleConfig/windows_eventlog_only_config.yaml index 391143c01f..558843bdd0 100644 --- a/translator/tocwconfig/sampleConfig/windows_eventlog_only_config.yaml +++ b/translator/tocwconfig/sampleConfig/windows_eventlog_only_config.yaml @@ -33,4 +33,5 @@ service: metrics: address: "" level: None - traces: {} + traces: + level: None diff --git a/translator/tocwconfig/tocwconfig_test.go b/translator/tocwconfig/tocwconfig_test.go index 7b45433150..dc69be4ed4 100644 --- a/translator/tocwconfig/tocwconfig_test.go +++ b/translator/tocwconfig/tocwconfig_test.go @@ -27,6 +27,7 @@ import ( "github.com/aws/amazon-cloudwatch-agent/cfg/commonconfig" "github.com/aws/amazon-cloudwatch-agent/cfg/envconfig" "github.com/aws/amazon-cloudwatch-agent/internal/retryer" + "github.com/aws/amazon-cloudwatch-agent/tool/testutil" "github.com/aws/amazon-cloudwatch-agent/translator" "github.com/aws/amazon-cloudwatch-agent/translator/cmdutil" "github.com/aws/amazon-cloudwatch-agent/translator/config" @@ -426,52 +427,52 @@ func TestPrometheusConfigwithTargetAllocator(t *testing.T) { } -//func TestOtelPrometheusConfig(t *testing.T) { -// resetContext(t) -// context.CurrentContext().SetRunInContainer(true) -// context.CurrentContext().SetMode(config.ModeEC2) -// testutil.SetPrometheusRemoteWriteTestingEnv(t) -// t.Setenv(config.HOST_NAME, "host_name_from_env") -// temp := t.TempDir() -// prometheusConfigFileName := filepath.Join(temp, "prometheus.yaml") -// ecsSdFileName := filepath.Join(temp, "ecs_sd_results.yaml") -// expectedEnvVars := map[string]string{} -// tokenReplacements := map[string]string{ -// prometheusFileNameToken: strings.ReplaceAll(prometheusConfigFileName, "\\", "\\\\"), -// ecsSdFileNameToken: strings.ReplaceAll(ecsSdFileName, "\\", "\\\\"), -// } -// // Load prometheus config and replace ecs sd results file name token with temp file name -// testPrometheusConfig := strings.ReplaceAll(prometheusConfig, "{"+ecsSdFileNameToken+"}", ecsSdFileName) -// // Write the modified prometheus config to temp prometheus config file -// err := os.WriteFile(prometheusConfigFileName, []byte(testPrometheusConfig), os.ModePerm) -// require.NoError(t, err) -// // In the following checks, we first load the json and replace tokens with the temp files -// // Additionally, before comparing with actual, we again replace tokens with temp files in the expected toml & yaml -// checkTranslation(t, "prometheus_otel_config_linux", "linux", expectedEnvVars, "", tokenReplacements) -//} -// -//func TestCombinedPrometheusConfig(t *testing.T) { -// resetContext(t) -// context.CurrentContext().SetMode(config.ModeEC2) -// testutil.SetPrometheusRemoteWriteTestingEnv(t) -// t.Setenv(config.HOST_NAME, "host_name_from_env") -// temp := t.TempDir() -// prometheusConfigFileName := filepath.Join(temp, "prometheus.yaml") -// ecsSdFileName := filepath.Join(temp, "ecs_sd_results.yaml") -// expectedEnvVars := map[string]string{} -// tokenReplacements := map[string]string{ -// prometheusFileNameToken: strings.ReplaceAll(prometheusConfigFileName, "\\", "\\\\"), -// ecsSdFileNameToken: strings.ReplaceAll(ecsSdFileName, "\\", "\\\\"), -// } -// // Load prometheus config and replace ecs sd results file name token with temp file name -// testPrometheusConfig := strings.ReplaceAll(prometheusConfig, "{"+ecsSdFileNameToken+"}", ecsSdFileName) -// // Write the modified prometheus config to temp prometheus config file -// err := os.WriteFile(prometheusConfigFileName, []byte(testPrometheusConfig), os.ModePerm) -// require.NoError(t, err) -// // In the following checks, we first load the json and replace tokens with the temp files -// // Additionally, before comparing with actual, we again replace tokens with temp files in the expected toml & yaml -// checkTranslation(t, "prometheus_combined_config_linux", "linux", expectedEnvVars, "", tokenReplacements) -//} +func TestOtelPrometheusConfig(t *testing.T) { + resetContext(t) + context.CurrentContext().SetRunInContainer(true) + context.CurrentContext().SetMode(config.ModeEC2) + testutil.SetPrometheusRemoteWriteTestingEnv(t) + t.Setenv(config.HOST_NAME, "host_name_from_env") + temp := t.TempDir() + prometheusConfigFileName := filepath.Join(temp, "prometheus.yaml") + ecsSdFileName := filepath.Join(temp, "ecs_sd_results.yaml") + expectedEnvVars := map[string]string{} + tokenReplacements := map[string]string{ + prometheusFileNameToken: strings.ReplaceAll(prometheusConfigFileName, "\\", "\\\\"), + ecsSdFileNameToken: strings.ReplaceAll(ecsSdFileName, "\\", "\\\\"), + } + // Load prometheus config and replace ecs sd results file name token with temp file name + testPrometheusConfig := strings.ReplaceAll(prometheusConfig, "{"+ecsSdFileNameToken+"}", ecsSdFileName) + // Write the modified prometheus config to temp prometheus config file + err := os.WriteFile(prometheusConfigFileName, []byte(testPrometheusConfig), os.ModePerm) + require.NoError(t, err) + // In the following checks, we first load the json and replace tokens with the temp files + // Additionally, before comparing with actual, we again replace tokens with temp files in the expected toml & yaml + checkTranslation(t, "prometheus_otel_config_linux", "linux", expectedEnvVars, "", tokenReplacements) +} + +func TestCombinedPrometheusConfig(t *testing.T) { + resetContext(t) + context.CurrentContext().SetMode(config.ModeEC2) + testutil.SetPrometheusRemoteWriteTestingEnv(t) + t.Setenv(config.HOST_NAME, "host_name_from_env") + temp := t.TempDir() + prometheusConfigFileName := filepath.Join(temp, "prometheus.yaml") + ecsSdFileName := filepath.Join(temp, "ecs_sd_results.yaml") + expectedEnvVars := map[string]string{} + tokenReplacements := map[string]string{ + prometheusFileNameToken: strings.ReplaceAll(prometheusConfigFileName, "\\", "\\\\"), + ecsSdFileNameToken: strings.ReplaceAll(ecsSdFileName, "\\", "\\\\"), + } + // Load prometheus config and replace ecs sd results file name token with temp file name + testPrometheusConfig := strings.ReplaceAll(prometheusConfig, "{"+ecsSdFileNameToken+"}", ecsSdFileName) + // Write the modified prometheus config to temp prometheus config file + err := os.WriteFile(prometheusConfigFileName, []byte(testPrometheusConfig), os.ModePerm) + require.NoError(t, err) + // In the following checks, we first load the json and replace tokens with the temp files + // Additionally, before comparing with actual, we again replace tokens with temp files in the expected toml & yaml + checkTranslation(t, "prometheus_combined_config_linux", "linux", expectedEnvVars, "", tokenReplacements) +} func TestBasicConfig(t *testing.T) { testCases := map[string]testCase{ diff --git a/translator/tocwconfig/totomlconfig/testdata/agentToml.conf b/translator/tocwconfig/totomlconfig/testdata/agentToml.conf index 4b9a68b829..4a75a90a5b 100644 --- a/translator/tocwconfig/totomlconfig/testdata/agentToml.conf +++ b/translator/tocwconfig/totomlconfig/testdata/agentToml.conf @@ -54,6 +54,7 @@ pipe = false retention_in_days = 5 timezone = "UTC" + trim_timestamp = true [[inputs.logfile.file_config]] auto_removal = true diff --git a/translator/tocwconfig/totomlconfig/testdata/agentToml.json b/translator/tocwconfig/totomlconfig/testdata/agentToml.json index bba1c17499..206d3e8fb0 100644 --- a/translator/tocwconfig/totomlconfig/testdata/agentToml.json +++ b/translator/tocwconfig/totomlconfig/testdata/agentToml.json @@ -182,7 +182,8 @@ "log_group_name": "amazon-cloudwatch-agent.log", "log_stream_name": "amazon-cloudwatch-agent.log", "timezone": "UTC", - "retention_in_days": 5 + "retention_in_days": 5, + "trim_timestamp": true }, { "file_path": "/opt/aws/amazon-cloudwatch-agent/logs/test.log", diff --git a/translator/tocwconfig/totomlconfig/tomlConfigTemplate/tomlConfig.go b/translator/tocwconfig/totomlconfig/tomlConfigTemplate/tomlConfig.go index 2a26ad9bb2..b93f3832c8 100644 --- a/translator/tocwconfig/totomlconfig/tomlConfigTemplate/tomlConfig.go +++ b/translator/tocwconfig/totomlconfig/tomlConfigTemplate/tomlConfig.go @@ -126,6 +126,8 @@ type ( Pipe bool RetentionInDays int `toml:"retention_in_days"` Timezone string + //Customer specifies if the timestamp from the log message should be trimmed + TrimTimestamp bool `toml:"trim_timestamp"` //Customer specified service.name ServiceName string `toml:"service_name"` //Customer specified deployment.environment diff --git a/translator/translate/logs/logs_collected/files/collect_list/ruleTrimTimestamp.go b/translator/translate/logs/logs_collected/files/collect_list/ruleTrimTimestamp.go new file mode 100644 index 0000000000..6cd4ec8a4b --- /dev/null +++ b/translator/translate/logs/logs_collected/files/collect_list/ruleTrimTimestamp.go @@ -0,0 +1,33 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package collect_list //nolint:revive + +import ( + "github.com/aws/amazon-cloudwatch-agent/translator" +) + +const TrimTimestampSectionKey = "trim_timestamp" + +type TrimTimestamp struct { +} + +func (r *TrimTimestamp) ApplyRule(input interface{}) (string, interface{}) { + _, val := translator.DefaultCase(TrimTimestampSectionKey, "", input) + if val == "" { + return "", "" + } + + boolVal, ok := val.(bool) + if !ok { + return TrimTimestampSectionKey, false + } + + return TrimTimestampSectionKey, boolVal +} + +func init() { + l := new(TrimTimestamp) + r := []Rule{l} + RegisterRule(TrimTimestampSectionKey, r) +} diff --git a/translator/translate/otel/common/common.go b/translator/translate/otel/common/common.go index bd960faae1..9ab4a8c7df 100644 --- a/translator/translate/otel/common/common.go +++ b/translator/translate/otel/common/common.go @@ -14,6 +14,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/pipeline" "gopkg.in/yaml.v3" "github.com/aws/amazon-cloudwatch-agent/translator/translate/logs/util" @@ -118,6 +119,7 @@ const ( PipelineNameContainerInsightsJmx = "containerinsightsjmx" PipelineNameEmfLogs = "emf_logs" PipelineNamePrometheus = "prometheus" + PipelineNameKueue = "kueueContainerInsights" AppSignals = "application_signals" AppSignalsFallback = "app_signals" AppSignalsRules = "rules" @@ -129,9 +131,9 @@ var ( AppSignalsTracesFallback = ConfigKey(TracesKey, TracesCollectedKey, AppSignalsFallback) AppSignalsMetricsFallback = ConfigKey(LogsKey, MetricsCollectedKey, AppSignalsFallback) - AppSignalsConfigKeys = map[component.DataType][]string{ - component.DataTypeTraces: {AppSignalsTraces, AppSignalsTracesFallback}, - component.DataTypeMetrics: {AppSignalsMetrics, AppSignalsMetricsFallback}, + AppSignalsConfigKeys = map[pipeline.Signal][]string{ + pipeline.SignalTraces: {AppSignalsTraces, AppSignalsTracesFallback}, + pipeline.SignalMetrics: {AppSignalsMetrics, AppSignalsMetricsFallback}, } JmxConfigKey = ConfigKey(MetricsKey, MetricsCollectedKey, JmxKey) ContainerInsightsConfigKey = ConfigKey(LogsKey, MetricsCollectedKey, KubernetesKey) @@ -142,38 +144,44 @@ var ( MetricsAggregationDimensionsKey = ConfigKey(MetricsKey, AggregationDimensionsKey) ) +type TranslatorID interface { + component.ID | pipeline.ID + + Name() string +} + // Translator is used to translate the JSON config into an // OTEL config. -type Translator[C any] interface { +type Translator[C any, ID TranslatorID] interface { Translate(*confmap.Conf) (C, error) - ID() component.ID + ID() ID } // TranslatorMap is a set of translators by their types. -type TranslatorMap[C any] interface { +type TranslatorMap[C any, ID TranslatorID] interface { // Set a translator to the map. If the ID is already present, replaces the translator. // Otherwise, adds it to the end of the list. - Set(Translator[C]) + Set(Translator[C, ID]) // Get the translator for the component.ID. - Get(component.ID) (Translator[C], bool) + Get(ID) (Translator[C, ID], bool) // Merge another translator map in. - Merge(TranslatorMap[C]) + Merge(TranslatorMap[C, ID]) // Keys is the ordered component.IDs. - Keys() []component.ID + Keys() []ID // Range iterates over each translator in order and calls the callback function on each. - Range(func(Translator[C])) + Range(func(Translator[C, ID])) // Len is the number of translators in the map. Len() int } -type translatorMap[C any] struct { +type translatorMap[C any, ID TranslatorID] struct { // list stores the ordered translators. list *list.List // lookup stores the list.Elements containing the translators by ID. - lookup map[component.ID]*list.Element + lookup map[ID]*list.Element } -func (t translatorMap[C]) Set(translator Translator[C]) { +func (t translatorMap[C, ID]) Set(translator Translator[C, ID]) { if element, ok := t.lookup[translator.ID()]; ok { element.Value = translator } else { @@ -182,43 +190,43 @@ func (t translatorMap[C]) Set(translator Translator[C]) { } } -func (t translatorMap[C]) Get(id component.ID) (Translator[C], bool) { +func (t translatorMap[C, ID]) Get(id ID) (Translator[C, ID], bool) { element, ok := t.lookup[id] if !ok { return nil, ok } - return element.Value.(Translator[C]), ok + return element.Value.(Translator[C, ID]), ok } -func (t translatorMap[C]) Merge(other TranslatorMap[C]) { +func (t translatorMap[C, ID]) Merge(other TranslatorMap[C, ID]) { if other != nil { other.Range(t.Set) } } -func (t translatorMap[C]) Keys() []component.ID { - keys := make([]component.ID, 0, t.Len()) - t.Range(func(translator Translator[C]) { +func (t translatorMap[C, ID]) Keys() []ID { + keys := make([]ID, 0, t.Len()) + t.Range(func(translator Translator[C, ID]) { keys = append(keys, translator.ID()) }) return keys } -func (t translatorMap[C]) Range(callback func(translator Translator[C])) { +func (t translatorMap[C, ID]) Range(callback func(translator Translator[C, ID])) { for element := t.list.Front(); element != nil; element = element.Next() { - callback(element.Value.(Translator[C])) + callback(element.Value.(Translator[C, ID])) } } -func (t translatorMap[C]) Len() int { +func (t translatorMap[C, ID]) Len() int { return t.list.Len() } // NewTranslatorMap creates a TranslatorMap from the translators. -func NewTranslatorMap[C any](translators ...Translator[C]) TranslatorMap[C] { - t := translatorMap[C]{ +func NewTranslatorMap[C any, ID TranslatorID](translators ...Translator[C, ID]) TranslatorMap[C, ID] { + t := translatorMap[C, ID]{ list: list.New(), - lookup: make(map[component.ID]*list.Element, len(translators)), + lookup: make(map[ID]*list.Element, len(translators)), } for _, translator := range translators { t.Set(translator) @@ -226,11 +234,15 @@ func NewTranslatorMap[C any](translators ...Translator[C]) TranslatorMap[C] { return t } +type ID interface { + String() string +} + // A MissingKeyError occurs when a translator is used for a JSON // config that does not have a required key. This typically means // that the pipeline was configured incorrectly. type MissingKeyError struct { - ID component.ID + ID ID JsonKey string } @@ -238,14 +250,26 @@ func (e *MissingKeyError) Error() string { return fmt.Sprintf("%q missing key in JSON: %q", e.ID, e.JsonKey) } +// ComponentTranslator is a Translator that converts a JSON config into a component +type ComponentTranslator = Translator[component.Config, component.ID] + +// ComponentTranslatorMap is a map-like container which stores ComponentTranslators +type ComponentTranslatorMap = TranslatorMap[component.Config, component.ID] + // ComponentTranslators is a component ID and respective service pipeline. type ComponentTranslators struct { - Receivers TranslatorMap[component.Config] - Processors TranslatorMap[component.Config] - Exporters TranslatorMap[component.Config] - Extensions TranslatorMap[component.Config] + Receivers ComponentTranslatorMap + Processors ComponentTranslatorMap + Exporters ComponentTranslatorMap + Extensions ComponentTranslatorMap } +// PipelineTranslator is a Translator that converts a JSON config into a pipeline +type PipelineTranslator = Translator[*ComponentTranslators, pipeline.ID] + +// PipelineTranslatorMap is a map-like container which stores PipelineTranslators +type PipelineTranslatorMap = TranslatorMap[*ComponentTranslators, pipeline.ID] + // ConfigKey joins the keys separated by confmap.KeyDelimiter. // This helps translators navigate the confmap.Conf that the // JSON config is loaded into. diff --git a/translator/translate/otel/common/common_test.go b/translator/translate/otel/common/common_test.go index d4b9a24d30..24b940b13a 100644 --- a/translator/translate/otel/common/common_test.go +++ b/translator/translate/otel/common/common_test.go @@ -19,7 +19,7 @@ type testTranslator struct { result int } -var _ Translator[int] = (*testTranslator)(nil) +var _ Translator[int, component.ID] = (*testTranslator)(nil) func (t testTranslator) Translate(_ *confmap.Conf) (int, error) { return t.result, nil diff --git a/translator/translate/otel/exporter/awscloudwatch/translator.go b/translator/translate/otel/exporter/awscloudwatch/translator.go index 6ac2cd4e58..e14dd6c0c3 100644 --- a/translator/translate/otel/exporter/awscloudwatch/translator.go +++ b/translator/translate/otel/exporter/awscloudwatch/translator.go @@ -27,13 +27,13 @@ type translator struct { factory exporter.Factory } -var _ common.Translator[component.Config] = (*translator)(nil) +var _ common.ComponentTranslator = (*translator)(nil) -func NewTranslator() common.Translator[component.Config] { +func NewTranslator() common.ComponentTranslator { return NewTranslatorWithName("") } -func NewTranslatorWithName(name string) common.Translator[component.Config] { +func NewTranslatorWithName(name string) common.ComponentTranslator { return &translator{name, cloudwatch.NewFactory()} } diff --git a/translator/translate/otel/exporter/awscloudwatchlogs/translator.go b/translator/translate/otel/exporter/awscloudwatchlogs/translator.go index c24819d7f5..2e32dffc68 100644 --- a/translator/translate/otel/exporter/awscloudwatchlogs/translator.go +++ b/translator/translate/otel/exporter/awscloudwatchlogs/translator.go @@ -39,9 +39,9 @@ type translator struct { factory exporter.Factory } -var _ common.Translator[component.Config] = (*translator)(nil) +var _ common.ComponentTranslator = (*translator)(nil) -func NewTranslatorWithName(name string) common.Translator[component.Config] { +func NewTranslatorWithName(name string) common.ComponentTranslator { return &translator{name, awscloudwatchlogsexporter.NewFactory()} } diff --git a/translator/translate/otel/exporter/awscloudwatchlogs/translator_test.go b/translator/translate/otel/exporter/awscloudwatchlogs/translator_test.go index b2ddb6adf4..98214b5084 100644 --- a/translator/translate/otel/exporter/awscloudwatchlogs/translator_test.go +++ b/translator/translate/otel/exporter/awscloudwatchlogs/translator_test.go @@ -138,7 +138,7 @@ func TestTranslator(t *testing.T) { "emf_only": true, "endpoint": "https://cloudwatchlogs-endpoint", "imds_retries": 1, - "local_mode": "true", + "local_mode": true, "log_group_name": "emf/logs/default", "log_stream_name": "some_hostname/some_private_ip", "middleware": "agenthealth/logs", diff --git a/translator/translate/otel/exporter/awsemf/translator.go b/translator/translate/otel/exporter/awsemf/translator.go index 24ef1dcd82..76089b928c 100644 --- a/translator/translate/otel/exporter/awsemf/translator.go +++ b/translator/translate/otel/exporter/awsemf/translator.go @@ -24,10 +24,6 @@ import ( "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/receiver/awscontainerinsight" ) -const ( - kueuePipelineName = "kueueContainerInsights" -) - //go:embed awsemf_default_generic.yaml var defaultGenericConfig string @@ -64,13 +60,13 @@ type translator struct { factory exporter.Factory } -var _ common.Translator[component.Config] = (*translator)(nil) +var _ common.ComponentTranslator = (*translator)(nil) -func NewTranslator() common.Translator[component.Config] { +func NewTranslator() common.ComponentTranslator { return NewTranslatorWithName("") } -func NewTranslatorWithName(name string) common.Translator[component.Config] { +func NewTranslatorWithName(name string) common.ComponentTranslator { return &translator{name, awsemfexporter.NewFactory()} } @@ -173,7 +169,7 @@ func isKubernetes(conf *confmap.Conf) bool { // `kueue_container_insights` is a child of `kubernetes` in config spec. func isKubernetesKueue(conf *confmap.Conf, pipelineName string) bool { - return isKubernetes(conf) && pipelineName == kueuePipelineName && common.GetOrDefaultBool(conf, kubernetesKueueBasePathKey, false) + return isKubernetes(conf) && pipelineName == common.PipelineNameKueue && common.GetOrDefaultBool(conf, kubernetesKueueBasePathKey, false) } func isPrometheus(conf *confmap.Conf) bool { diff --git a/translator/translate/otel/exporter/awsemf/translator_test.go b/translator/translate/otel/exporter/awsemf/translator_test.go index 5d9e6c101c..43c4e03295 100644 --- a/translator/translate/otel/exporter/awsemf/translator_test.go +++ b/translator/translate/otel/exporter/awsemf/translator_test.go @@ -929,7 +929,7 @@ func TestTranslateAppSignals(t *testing.T) { }, }}, want: testutil.GetConfWithOverrides(t, filepath.Join("awsemf_default_appsignals.yaml"), map[string]any{ - "local_mode": "false", + "local_mode": false, "region": "us-east-1", "role_arn": "global_arn", "certificate_file_path": "/ca/bundle", @@ -945,7 +945,7 @@ func TestTranslateAppSignals(t *testing.T) { }, }}, want: testutil.GetConfWithOverrides(t, filepath.Join("awsemf_default_appsignals.yaml"), map[string]any{ - "local_mode": "true", + "local_mode": true, "region": "us-east-1", "role_arn": "global_arn", "certificate_file_path": "/ca/bundle", @@ -961,7 +961,7 @@ func TestTranslateAppSignals(t *testing.T) { }, }}, want: testutil.GetConfWithOverrides(t, filepath.Join("awsemf_default_appsignals.yaml"), map[string]any{ - "local_mode": "true", + "local_mode": true, "region": "us-east-1", "role_arn": "global_arn", "certificate_file_path": "/ca/bundle", @@ -977,7 +977,7 @@ func TestTranslateAppSignals(t *testing.T) { }, }}, want: testutil.GetConfWithOverrides(t, filepath.Join("awsemf_default_appsignals.yaml"), map[string]any{ - "local_mode": "false", + "local_mode": false, "region": "us-east-1", "role_arn": "global_arn", "certificate_file_path": "/ca/bundle", @@ -993,7 +993,7 @@ func TestTranslateAppSignals(t *testing.T) { }, }}, want: testutil.GetConfWithOverrides(t, filepath.Join("awsemf_default_appsignals.yaml"), map[string]any{ - "local_mode": "false", + "local_mode": false, "region": "us-east-1", "role_arn": "global_arn", "certificate_file_path": "/ca/bundle", @@ -1009,7 +1009,7 @@ func TestTranslateAppSignals(t *testing.T) { }, }}, want: testutil.GetConfWithOverrides(t, filepath.Join("awsemf_default_appsignals.yaml"), map[string]any{ - "local_mode": "true", + "local_mode": true, "region": "us-east-1", "role_arn": "global_arn", "certificate_file_path": "/ca/bundle", @@ -1025,7 +1025,7 @@ func TestTranslateAppSignals(t *testing.T) { }, }}, want: testutil.GetConfWithOverrides(t, filepath.Join("awsemf_default_appsignals.yaml"), map[string]any{ - "local_mode": "true", + "local_mode": true, "region": "us-east-1", "role_arn": "global_arn", "certificate_file_path": "/ca/bundle", @@ -1041,7 +1041,7 @@ func TestTranslateAppSignals(t *testing.T) { }, }}, want: testutil.GetConfWithOverrides(t, filepath.Join("awsemf_default_appsignals.yaml"), map[string]any{ - "local_mode": "false", + "local_mode": false, "region": "us-east-1", "role_arn": "global_arn", "certificate_file_path": "/ca/bundle", diff --git a/translator/translate/otel/exporter/awsxray/translator.go b/translator/translate/otel/exporter/awsxray/translator.go index fb27334f5c..dbecebfa37 100644 --- a/translator/translate/otel/exporter/awsxray/translator.go +++ b/translator/translate/otel/exporter/awsxray/translator.go @@ -33,7 +33,7 @@ type translator struct { factory exporter.Factory } -var _ common.Translator[component.Config] = (*translator)(nil) +var _ common.ComponentTranslator = (*translator)(nil) var ( indexedAttributes = []string{ @@ -43,11 +43,11 @@ var ( } ) -func NewTranslator() common.Translator[component.Config] { +func NewTranslator() common.ComponentTranslator { return NewTranslatorWithName("") } -func NewTranslatorWithName(name string) common.Translator[component.Config] { +func NewTranslatorWithName(name string) common.ComponentTranslator { return &translator{name, awsxrayexporter.NewFactory()} } diff --git a/translator/translate/otel/exporter/awsxray/translator_test.go b/translator/translate/otel/exporter/awsxray/translator_test.go index c3609d6c26..18d5d58486 100644 --- a/translator/translate/otel/exporter/awsxray/translator_test.go +++ b/translator/translate/otel/exporter/awsxray/translator_test.go @@ -46,7 +46,7 @@ func TestTranslator(t *testing.T) { want: confmap.NewFromStringMap(map[string]any{ "certificate_file_path": "/ca/bundle", "region": "us-east-1", - "local_mode": "true", + "local_mode": true, "role_arn": "global_arn", "imds_retries": 1, "telemetry": map[string]any{ diff --git a/translator/translate/otel/exporter/debug/translator.go b/translator/translate/otel/exporter/debug/translator.go index 34a4831ba1..ecad1cd071 100644 --- a/translator/translate/otel/exporter/debug/translator.go +++ b/translator/translate/otel/exporter/debug/translator.go @@ -20,9 +20,9 @@ type translator struct { factory exporter.Factory } -var _ common.Translator[component.Config] = (*translator)(nil) +var _ common.ComponentTranslator = (*translator)(nil) -func NewTranslator(opts ...common.TranslatorOption) common.Translator[component.Config] { +func NewTranslator(opts ...common.TranslatorOption) common.ComponentTranslator { t := &translator{factory: debugexporter.NewFactory()} for _, opt := range opts { opt(t) diff --git a/translator/translate/otel/exporter/debug/translator_test.go b/translator/translate/otel/exporter/debug/translator_test.go index 535a0670d6..1d6515f06d 100644 --- a/translator/translate/otel/exporter/debug/translator_test.go +++ b/translator/translate/otel/exporter/debug/translator_test.go @@ -43,7 +43,12 @@ func TestTranslate(t *testing.T) { "debug": true, }, }, - want: &debugexporter.Config{Verbosity: configtelemetry.LevelDetailed, SamplingInitial: 2, SamplingThereafter: 500}, + want: &debugexporter.Config{ + Verbosity: configtelemetry.LevelDetailed, + SamplingInitial: 2, + SamplingThereafter: 500, + UseInternalLogger: true, + }, }, } for name, testCase := range testCases { diff --git a/translator/translate/otel/exporter/prometheusremotewrite/testdata/config.yaml b/translator/translate/otel/exporter/prometheusremotewrite/testdata/config.yaml index f914de8958..fe83ba81d2 100644 --- a/translator/translate/otel/exporter/prometheusremotewrite/testdata/config.yaml +++ b/translator/translate/otel/exporter/prometheusremotewrite/testdata/config.yaml @@ -15,13 +15,17 @@ remote_write_queue: enabled: true queue_size: 10000 num_consumers: 5 -external_labels: [] +external_labels: {} write_buffer_size: 524288 endpoint: "https://aps-workspaces.us-east-1.amazonaws.com/workspaces/ws-12345/api/v1/remote_write" -headers: [] +headers: {} target_info: enabled: true export_created_metric: enabled: false add_metric_suffixes: true -max_batch_size_bytes: 3000000 \ No newline at end of file +max_batch_size_bytes: 3000000 +max_conns_per_host: 0 +max_idle_conns: 100 +max_idle_conns_per_host: 0 +idle_conn_timeout: 90000000000 diff --git a/translator/translate/otel/exporter/prometheusremotewrite/translator.go b/translator/translate/otel/exporter/prometheusremotewrite/translator.go index b89878d838..9de548c49c 100644 --- a/translator/translate/otel/exporter/prometheusremotewrite/translator.go +++ b/translator/translate/otel/exporter/prometheusremotewrite/translator.go @@ -24,13 +24,13 @@ type translator struct { factory exporter.Factory } -var _ common.Translator[component.Config] = (*translator)(nil) +var _ common.ComponentTranslator = (*translator)(nil) -func NewTranslator() common.Translator[component.Config] { +func NewTranslator() common.ComponentTranslator { return NewTranslatorWithName("") } -func NewTranslatorWithName(name string) common.Translator[component.Config] { +func NewTranslatorWithName(name string) common.ComponentTranslator { return &translator{name, prometheusremotewriteexporter.NewFactory()} } diff --git a/translator/translate/otel/exporter/translator.go b/translator/translate/otel/exporter/translator.go index d8c2acbc37..5fbc12fe59 100644 --- a/translator/translate/otel/exporter/translator.go +++ b/translator/translate/otel/exporter/translator.go @@ -16,11 +16,11 @@ type translator struct { factory exporter.Factory } -func NewDefaultTranslator(factory exporter.Factory) common.Translator[component.Config] { +func NewDefaultTranslator(factory exporter.Factory) common.ComponentTranslator { return NewDefaultTranslatorWithName("", factory) } -func NewDefaultTranslatorWithName(name string, factory exporter.Factory) common.Translator[component.Config] { +func NewDefaultTranslatorWithName(name string, factory exporter.Factory) common.ComponentTranslator { return &translator{name, factory} } diff --git a/translator/translate/otel/extension/agenthealth/translator.go b/translator/translate/otel/extension/agenthealth/translator.go index 506f3d2e9e..ea167260f5 100644 --- a/translator/translate/otel/extension/agenthealth/translator.go +++ b/translator/translate/otel/extension/agenthealth/translator.go @@ -7,6 +7,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/confmap" "go.opentelemetry.io/collector/extension" + "go.opentelemetry.io/collector/pipeline" "github.com/aws/amazon-cloudwatch-agent/cfg/envconfig" "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth" @@ -25,12 +26,21 @@ const ( ) var ( - MetricsID = component.NewIDWithName(agenthealth.TypeStr, component.DataTypeMetrics.String()) - LogsID = component.NewIDWithName(agenthealth.TypeStr, component.DataTypeLogs.String()) - TracesID = component.NewIDWithName(agenthealth.TypeStr, component.DataTypeTraces.String()) + MetricsID = component.NewIDWithName(agenthealth.TypeStr, pipeline.SignalMetrics.String()) + LogsID = component.NewIDWithName(agenthealth.TypeStr, pipeline.SignalLogs.String()) + TracesID = component.NewIDWithName(agenthealth.TypeStr, pipeline.SignalTraces.String()) StatusCodeID = component.NewIDWithName(agenthealth.TypeStr, "statuscode") ) +type Name string + +var ( + MetricsName = Name(pipeline.SignalMetrics.String()) + LogsName = Name(pipeline.SignalLogs.String()) + TracesName = Name(pipeline.SignalTraces.String()) + StatusCodeName = Name("statuscode") +) + type translator struct { name string operations []string @@ -39,11 +49,11 @@ type translator struct { isStatusCodeEnabled bool } -var _ common.Translator[component.Config] = (*translator)(nil) +var _ common.ComponentTranslator = (*translator)(nil) -func NewTranslatorWithStatusCode(name component.DataType, operations []string, isStatusCodeEnabled bool) common.Translator[component.Config] { +func NewTranslatorWithStatusCode(name Name, operations []string, isStatusCodeEnabled bool) common.ComponentTranslator { return &translator{ - name: name.String(), + name: string(name), operations: operations, factory: agenthealth.NewFactory(), isUsageDataEnabled: envconfig.IsUsageDataEnabled(), @@ -51,9 +61,9 @@ func NewTranslatorWithStatusCode(name component.DataType, operations []string, i } } -func NewTranslator(name component.DataType, operations []string) common.Translator[component.Config] { +func NewTranslator(name Name, operations []string) common.ComponentTranslator { return &translator{ - name: name.String(), + name: string(name), operations: operations, factory: agenthealth.NewFactory(), isUsageDataEnabled: envconfig.IsUsageDataEnabled(), diff --git a/translator/translate/otel/extension/agenthealth/translator_test.go b/translator/translate/otel/extension/agenthealth/translator_test.go index 41501ab0bf..b085015202 100644 --- a/translator/translate/otel/extension/agenthealth/translator_test.go +++ b/translator/translate/otel/extension/agenthealth/translator_test.go @@ -7,7 +7,6 @@ import ( "testing" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/confmap" "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth" @@ -77,9 +76,8 @@ func TestTranslate(t *testing.T) { } for name, testCase := range testCases { t.Run(name, func(t *testing.T) { - testType, _ := component.NewType("test") - tt := NewTranslator(testType, operations).(*translator) - assert.Equal(t, "agenthealth/test", tt.ID().String()) + tt := NewTranslator(LogsName, operations).(*translator) + assert.Equal(t, "agenthealth/logs", tt.ID().String()) tt.isUsageDataEnabled = testCase.isEnvUsageData conf := confmap.NewFromStringMap(testCase.input) got, err := tt.Translate(conf) diff --git a/translator/translate/otel/extension/awsproxy/translator.go b/translator/translate/otel/extension/awsproxy/translator.go index 46e16ea037..b00790d360 100644 --- a/translator/translate/otel/extension/awsproxy/translator.go +++ b/translator/translate/otel/extension/awsproxy/translator.go @@ -20,6 +20,8 @@ import ( "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" ) +const defaultEndpoint = "0.0.0.0:2000" + var ( endpointOverrideKey = common.ConfigKey(common.TracesKey, common.EndpointOverrideKey) localModeKey = common.ConfigKey(common.TracesKey, common.LocalModeKey) @@ -30,13 +32,13 @@ type translator struct { factory extension.Factory } -var _ common.Translator[component.Config] = (*translator)(nil) +var _ common.ComponentTranslator = (*translator)(nil) -func NewTranslator() common.Translator[component.Config] { +func NewTranslator() common.ComponentTranslator { return NewTranslatorWithName("") } -func NewTranslatorWithName(name string) common.Translator[component.Config] { +func NewTranslatorWithName(name string) common.ComponentTranslator { return &translator{name, awsproxy.NewFactory()} } @@ -49,6 +51,7 @@ func (t *translator) Translate(conf *confmap.Conf) (component.Config, error) { return nil, &common.MissingKeyError{ID: t.ID(), JsonKey: common.TracesKey} } cfg := t.factory.CreateDefaultConfig().(*awsproxy.Config) + cfg.ProxyConfig.Endpoint = defaultEndpoint cfg.ProxyConfig.CertificateFilePath = os.Getenv(envconfig.AWS_CA_BUNDLE) if conf.IsSet(endpointOverrideKey) { cfg.ProxyConfig.AWSEndpoint, _ = common.GetString(conf, endpointOverrideKey) diff --git a/translator/translate/otel/extension/awsproxy/translator_test.go b/translator/translate/otel/extension/awsproxy/translator_test.go index ac2ac73345..bfc897ee85 100644 --- a/translator/translate/otel/extension/awsproxy/translator_test.go +++ b/translator/translate/otel/extension/awsproxy/translator_test.go @@ -22,6 +22,8 @@ func TestTranslate(t *testing.T) { require.True(t, ok) wantCfg := awsproxy.NewFactory().CreateDefaultConfig().(*awsproxy.Config) wantCfg.ProxyConfig.IMDSRetries = 1 + // Upstream defaults to localhost but we want to stick with 0.0.0.0 as the default for AppSignals + wantCfg.ProxyConfig.Endpoint = "0.0.0.0:2000" assert.Equal(t, wantCfg, gotCfg) } } diff --git a/translator/translate/otel/extension/entitystore/translator.go b/translator/translate/otel/extension/entitystore/translator.go index e98d4ee549..6d4bb16416 100644 --- a/translator/translate/otel/extension/entitystore/translator.go +++ b/translator/translate/otel/extension/entitystore/translator.go @@ -19,9 +19,9 @@ type translator struct { factory extension.Factory } -var _ common.Translator[component.Config] = (*translator)(nil) +var _ common.ComponentTranslator = (*translator)(nil) -func NewTranslator() common.Translator[component.Config] { +func NewTranslator() common.ComponentTranslator { return &translator{ factory: entitystore.NewFactory(), } diff --git a/translator/translate/otel/extension/k8smetadata/translator.go b/translator/translate/otel/extension/k8smetadata/translator.go new file mode 100644 index 0000000000..9b50147c4e --- /dev/null +++ b/translator/translate/otel/extension/k8smetadata/translator.go @@ -0,0 +1,36 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package k8smetadata + +import ( + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/extension" + + "github.com/aws/amazon-cloudwatch-agent/extension/k8smetadata" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" +) + +type translator struct { + name string + factory extension.Factory +} + +var _ common.ComponentTranslator = (*translator)(nil) + +func NewTranslator() common.ComponentTranslator { + return &translator{ + factory: k8smetadata.NewFactory(), + } +} + +func (t *translator) ID() component.ID { + return component.NewIDWithName(t.factory.Type(), t.name) +} + +// Translate creates an extension configuration. +func (t *translator) Translate(_ *confmap.Conf) (component.Config, error) { + cfg := t.factory.CreateDefaultConfig().(*k8smetadata.Config) + return cfg, nil +} diff --git a/translator/translate/otel/extension/server/translator.go b/translator/translate/otel/extension/server/translator.go index 1be1d814ee..918c25b142 100644 --- a/translator/translate/otel/extension/server/translator.go +++ b/translator/translate/otel/extension/server/translator.go @@ -24,9 +24,9 @@ type translator struct { factory extension.Factory } -var _ common.Translator[component.Config] = (*translator)(nil) +var _ common.ComponentTranslator = (*translator)(nil) -func NewTranslator() common.Translator[component.Config] { +func NewTranslator() common.ComponentTranslator { return &translator{ factory: server.NewFactory(), } diff --git a/translator/translate/otel/extension/sigv4auth/translator.go b/translator/translate/otel/extension/sigv4auth/translator.go index 31adc23054..3d78b9f4b8 100644 --- a/translator/translate/otel/extension/sigv4auth/translator.go +++ b/translator/translate/otel/extension/sigv4auth/translator.go @@ -18,13 +18,13 @@ type translator struct { factory extension.Factory } -var _ common.Translator[component.Config] = (*translator)(nil) +var _ common.ComponentTranslator = (*translator)(nil) -func NewTranslator() common.Translator[component.Config] { +func NewTranslator() common.ComponentTranslator { return NewTranslatorWithName("") } -func NewTranslatorWithName(name string) common.Translator[component.Config] { +func NewTranslatorWithName(name string) common.ComponentTranslator { return &translator{name, sigv4authextension.NewFactory()} } diff --git a/translator/translate/otel/pipeline/applicationsignals/translator.go b/translator/translate/otel/pipeline/applicationsignals/translator.go index 47706a1317..1627eb8a1d 100644 --- a/translator/translate/otel/pipeline/applicationsignals/translator.go +++ b/translator/translate/otel/pipeline/applicationsignals/translator.go @@ -8,6 +8,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/pipeline" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/exporter/awsemf" @@ -24,47 +25,47 @@ import ( ) type translator struct { - dataType component.DataType + signal pipeline.Signal } -var _ common.Translator[*common.ComponentTranslators] = (*translator)(nil) +var _ common.PipelineTranslator = (*translator)(nil) -func NewTranslator(dataType component.DataType) common.Translator[*common.ComponentTranslators] { +func NewTranslator(signal pipeline.Signal) common.PipelineTranslator { return &translator{ - dataType, + signal, } } -func (t *translator) ID() component.ID { - return component.NewIDWithName(t.dataType, common.AppSignals) +func (t *translator) ID() pipeline.ID { + return pipeline.NewIDWithName(t.signal, common.AppSignals) } func (t *translator) Translate(conf *confmap.Conf) (*common.ComponentTranslators, error) { - configKey, ok := common.AppSignalsConfigKeys[t.dataType] + configKey, ok := common.AppSignalsConfigKeys[t.signal] if !ok { - return nil, fmt.Errorf("no config key defined for data type: %s", t.dataType) + return nil, fmt.Errorf("no config key defined for signal: %s", t.signal) } if conf == nil || (!conf.IsSet(configKey[0]) && !conf.IsSet(configKey[1])) { return nil, &common.MissingKeyError{ID: t.ID(), JsonKey: configKey[0]} } translators := &common.ComponentTranslators{ - Receivers: common.NewTranslatorMap(otlp.NewTranslator(common.WithName(common.AppSignals), otlp.WithDataType(t.dataType))), - Processors: common.NewTranslatorMap[component.Config](), - Exporters: common.NewTranslatorMap[component.Config](), - Extensions: common.NewTranslatorMap[component.Config](), + Receivers: common.NewTranslatorMap(otlp.NewTranslator(common.WithName(common.AppSignals), otlp.WithSignal(t.signal))), + Processors: common.NewTranslatorMap[component.Config, component.ID](), + Exporters: common.NewTranslatorMap[component.Config, component.ID](), + Extensions: common.NewTranslatorMap[component.Config, component.ID](), } - if t.dataType == component.DataTypeMetrics { + if t.signal == pipeline.SignalMetrics { translators.Processors.Set(metricstransformprocessor.NewTranslatorWithName(common.AppSignals)) } - translators.Processors.Set(resourcedetection.NewTranslator(resourcedetection.WithDataType(t.dataType))) - translators.Processors.Set(awsapplicationsignals.NewTranslator(awsapplicationsignals.WithDataType(t.dataType))) + translators.Processors.Set(resourcedetection.NewTranslator(resourcedetection.WithSignal(t.signal))) + translators.Processors.Set(awsapplicationsignals.NewTranslator(awsapplicationsignals.WithSignal(t.signal))) // ECS is not in scope for entity association, so we only add the entity processor in non-ECS platforms isECS := ecsutil.GetECSUtilSingleton().IsECS() - if t.dataType == component.DataTypeMetrics && !isECS { + if t.signal == pipeline.SignalMetrics && !isECS { translators.Processors.Set(awsentity.NewTranslatorWithEntityType(awsentity.Service, common.AppSignals, false)) } @@ -72,16 +73,16 @@ func (t *translator) Translate(conf *confmap.Conf) (*common.ComponentTranslators translators.Exporters.Set(debug.NewTranslator(common.WithName(common.AppSignals))) } - if t.dataType == component.DataTypeTraces { + if t.signal == pipeline.SignalTraces { translators.Exporters.Set(awsxray.NewTranslatorWithName(common.AppSignals)) translators.Extensions.Set(awsproxy.NewTranslatorWithName(common.AppSignals)) - translators.Extensions.Set(agenthealth.NewTranslator(component.DataTypeTraces, []string{agenthealth.OperationPutTraceSegments})) - translators.Extensions.Set(agenthealth.NewTranslatorWithStatusCode(component.MustNewType("statuscode"), nil, true)) + translators.Extensions.Set(agenthealth.NewTranslator(agenthealth.TracesName, []string{agenthealth.OperationPutTraceSegments})) + translators.Extensions.Set(agenthealth.NewTranslatorWithStatusCode(agenthealth.StatusCodeName, nil, true)) } else { translators.Exporters.Set(awsemf.NewTranslatorWithName(common.AppSignals)) - translators.Extensions.Set(agenthealth.NewTranslator(component.DataTypeLogs, []string{agenthealth.OperationPutLogEvents})) - translators.Extensions.Set(agenthealth.NewTranslatorWithStatusCode(component.MustNewType("statuscode"), nil, true)) + translators.Extensions.Set(agenthealth.NewTranslator(agenthealth.LogsName, []string{agenthealth.OperationPutLogEvents})) + translators.Extensions.Set(agenthealth.NewTranslatorWithStatusCode(agenthealth.StatusCodeName, nil, true)) } return translators, nil } diff --git a/translator/translate/otel/pipeline/applicationsignals/translator_test.go b/translator/translate/otel/pipeline/applicationsignals/translator_test.go index ae3c94ead3..5913ab13d0 100644 --- a/translator/translate/otel/pipeline/applicationsignals/translator_test.go +++ b/translator/translate/otel/pipeline/applicationsignals/translator_test.go @@ -11,6 +11,7 @@ import ( "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/pipeline" "github.com/aws/amazon-cloudwatch-agent/internal/util/collections" "github.com/aws/amazon-cloudwatch-agent/translator/config" @@ -27,7 +28,7 @@ func TestTranslatorTraces(t *testing.T) { exporters []string extensions []string } - tt := NewTranslator(component.DataTypeTraces) + tt := NewTranslator(pipeline.SignalTraces) assert.EqualValues(t, "traces/application_signals", tt.ID().String()) testCases := map[string]struct { input map[string]interface{} @@ -103,7 +104,7 @@ func TestTranslatorMetricsForKubernetes(t *testing.T) { exporters []string extensions []string } - tt := NewTranslator(component.DataTypeMetrics) + tt := NewTranslator(pipeline.SignalMetrics) assert.EqualValues(t, "metrics/application_signals", tt.ID().String()) testCases := map[string]struct { input map[string]interface{} @@ -204,7 +205,7 @@ func TestTranslatorMetricsForEC2(t *testing.T) { exporters []string extensions []string } - tt := NewTranslator(component.DataTypeMetrics) + tt := NewTranslator(pipeline.SignalMetrics) assert.EqualValues(t, "metrics/application_signals", tt.ID().String()) testCases := map[string]struct { input map[string]interface{} @@ -284,7 +285,7 @@ func TestTranslatorMetricsForECS(t *testing.T) { exporters []string extensions []string } - tt := NewTranslator(component.DataTypeMetrics) + tt := NewTranslator(pipeline.SignalMetrics) assert.EqualValues(t, "metrics/application_signals", tt.ID().String()) testCases := map[string]struct { input map[string]interface{} diff --git a/translator/translate/otel/pipeline/containerinsights/translator.go b/translator/translate/otel/pipeline/containerinsights/translator.go index 52a0907f41..5e37431ff1 100644 --- a/translator/translate/otel/pipeline/containerinsights/translator.go +++ b/translator/translate/otel/pipeline/containerinsights/translator.go @@ -8,11 +8,13 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/pipeline" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/exporter/awsemf" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/extension/agenthealth" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/processor/batchprocessor" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/processor/filterprocessor" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/processor/gpu" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/processor/kueue" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/processor/metricstransformprocessor" @@ -21,8 +23,7 @@ import ( ) const ( - ciPipelineName = common.PipelineNameContainerInsights - kueuePipelineName = "kueueContainerInsights" + ciPipelineName = common.PipelineNameContainerInsights ) var ( @@ -35,18 +36,18 @@ type translator struct { pipelineName string } -var _ common.Translator[*common.ComponentTranslators] = (*translator)(nil) +var _ common.PipelineTranslator = (*translator)(nil) -func NewTranslator() common.Translator[*common.ComponentTranslators] { +func NewTranslator() common.PipelineTranslator { return NewTranslatorWithName(ciPipelineName) } -func NewTranslatorWithName(pipelineName string) common.Translator[*common.ComponentTranslators] { +func NewTranslatorWithName(pipelineName string) common.PipelineTranslator { return &translator{pipelineName: pipelineName} } -func (t *translator) ID() component.ID { - return component.NewIDWithName(component.DataTypeMetrics, t.pipelineName) +func (t *translator) ID() pipeline.ID { + return pipeline.NewIDWithName(pipeline.SignalMetrics, t.pipelineName) } // Translate creates a pipeline for container insights if the logs.metrics_collected.ecs or logs.metrics_collected.kubernetes @@ -56,17 +57,22 @@ func (t *translator) Translate(conf *confmap.Conf) (*common.ComponentTranslators return nil, &common.MissingKeyError{ID: t.ID(), JsonKey: fmt.Sprint(ecsKey, " or ", eksKey)} } - // create processor map with default batch processor based on pipeline name - processors := common.NewTranslatorMap(batchprocessor.NewTranslatorWithNameAndSection(t.pipelineName, common.LogsKey)) + // create processor map with + // - default batch processor + // - filter processor to drop prometheus metadata + processors := common.NewTranslatorMap( + batchprocessor.NewTranslatorWithNameAndSection(t.pipelineName, common.LogsKey), + filterprocessor.NewTranslator(common.WithName(t.pipelineName)), + ) // create exporter map with default emf exporter based on pipeline name exporters := common.NewTranslatorMap(awsemf.NewTranslatorWithName(t.pipelineName)) // create extensions map based on pipeline name - extensions := common.NewTranslatorMap(agenthealth.NewTranslator(component.DataTypeLogs, []string{agenthealth.OperationPutLogEvents}), - agenthealth.NewTranslatorWithStatusCode(component.MustNewType("statuscode"), nil, true), + extensions := common.NewTranslatorMap(agenthealth.NewTranslator(agenthealth.LogsName, []string{agenthealth.OperationPutLogEvents}), + agenthealth.NewTranslatorWithStatusCode(agenthealth.StatusCodeName, nil, true), ) // create variable for receivers, use switch block below to assign - var receivers common.TranslatorMap[component.Config] + var receivers common.TranslatorMap[component.Config, component.ID] switch t.pipelineName { case ciPipelineName: @@ -82,10 +88,11 @@ func (t *translator) Translate(conf *confmap.Conf) (*common.ComponentTranslators processors.Set(gpu.NewTranslatorWithName(t.pipelineName)) } } - case kueuePipelineName: + case common.PipelineNameKueue: // add prometheus receiver for kueue receivers = common.NewTranslatorMap((awscontainerinsightskueue.NewTranslator())) processors.Set(kueue.NewTranslatorWithName(t.pipelineName)) + default: return nil, fmt.Errorf("unknown container insights pipeline name: %s", t.pipelineName) } diff --git a/translator/translate/otel/pipeline/containerinsights/translator_test.go b/translator/translate/otel/pipeline/containerinsights/translator_test.go index 6eca5a51c7..07ea56df97 100644 --- a/translator/translate/otel/pipeline/containerinsights/translator_test.go +++ b/translator/translate/otel/pipeline/containerinsights/translator_test.go @@ -46,7 +46,7 @@ func TestTranslator(t *testing.T) { want: &want{ pipelineType: "metrics/containerinsights", receivers: []string{"awscontainerinsightreceiver"}, - processors: []string{"batch/containerinsights"}, + processors: []string{"batch/containerinsights", "filter/containerinsights"}, exporters: []string{"awsemf/containerinsights"}, extensions: []string{"agenthealth/logs", "agenthealth/statuscode"}, }, @@ -62,7 +62,7 @@ func TestTranslator(t *testing.T) { want: &want{ pipelineType: "metrics/containerinsights", receivers: []string{"awscontainerinsightreceiver"}, - processors: []string{"batch/containerinsights"}, + processors: []string{"batch/containerinsights", "filter/containerinsights"}, exporters: []string{"awsemf/containerinsights"}, extensions: []string{"agenthealth/logs", "agenthealth/statuscode"}, }, @@ -94,7 +94,7 @@ func TestKueueTranslator(t *testing.T) { exporters []string extensions []string } - cit := NewTranslatorWithName(kueuePipelineName) + cit := NewTranslatorWithName(common.PipelineNameKueue) require.EqualValues(t, "metrics/kueueContainerInsights", cit.ID().String()) testCases := map[string]struct { input map[string]interface{} @@ -117,6 +117,7 @@ func TestKueueTranslator(t *testing.T) { receivers: []string{"awscontainerinsightskueuereceiver"}, processors: []string{ "batch/kueueContainerInsights", + "filter/kueueContainerInsights", "kueueattributes/kueueContainerInsights", }, exporters: []string{"awsemf/kueueContainerInsights"}, diff --git a/translator/translate/otel/pipeline/containerinsights/translators.go b/translator/translate/otel/pipeline/containerinsights/translators.go index ae986cc10e..e01b449dfd 100644 --- a/translator/translate/otel/pipeline/containerinsights/translators.go +++ b/translator/translate/otel/pipeline/containerinsights/translators.go @@ -5,24 +5,25 @@ package containerinsights import ( "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/pipeline" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" - "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/pipeline" + pipelinetranslator "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/pipeline" ) var ( LogsKey = common.ConfigKey(common.LogsKey, common.MetricsCollectedKey) ) -func NewTranslators(conf *confmap.Conf) pipeline.TranslatorMap { - translators := common.NewTranslatorMap[*common.ComponentTranslators]() +func NewTranslators(conf *confmap.Conf) pipelinetranslator.TranslatorMap { + translators := common.NewTranslatorMap[*common.ComponentTranslators, pipeline.ID]() // create default container insights translator ciTranslator := NewTranslatorWithName(ciPipelineName) translators.Set(ciTranslator) // create kueue container insights translator KueueContainerInsightsEnabled := common.KueueContainerInsightsEnabled(conf) if KueueContainerInsightsEnabled { - kueueTranslator := NewTranslatorWithName(kueuePipelineName) + kueueTranslator := NewTranslatorWithName(common.PipelineNameKueue) translators.Set(kueueTranslator) } // return the translator map diff --git a/translator/translate/otel/pipeline/containerinsights/translators_test.go b/translator/translate/otel/pipeline/containerinsights/translators_test.go index 178a0159b3..78720fd415 100644 --- a/translator/translate/otel/pipeline/containerinsights/translators_test.go +++ b/translator/translate/otel/pipeline/containerinsights/translators_test.go @@ -92,7 +92,7 @@ func TestTranslators(t *testing.T) { } else { require.NotNil(t, got) assert.Equal(t, len(testCase.want), got.Len()) - got.Range(func(tr common.Translator[*common.ComponentTranslators]) { + got.Range(func(tr common.PipelineTranslator) { w, ok := testCase.want[tr.ID().String()] require.True(t, ok) g, err := tr.Translate(conf) diff --git a/translator/translate/otel/pipeline/containerinsightsjmx/translator.go b/translator/translate/otel/pipeline/containerinsightsjmx/translator.go index 6c4bda310c..0f05f12019 100644 --- a/translator/translate/otel/pipeline/containerinsightsjmx/translator.go +++ b/translator/translate/otel/pipeline/containerinsightsjmx/translator.go @@ -4,8 +4,8 @@ package containerinsightsjmx import ( - "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/pipeline" "github.com/aws/amazon-cloudwatch-agent/translator/context" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" @@ -28,14 +28,14 @@ var ( type translator struct { } -var _ common.Translator[*common.ComponentTranslators] = (*translator)(nil) +var _ common.PipelineTranslator = (*translator)(nil) -func NewTranslator() common.Translator[*common.ComponentTranslators] { +func NewTranslator() common.PipelineTranslator { return &translator{} } -func (t *translator) ID() component.ID { - return component.NewIDWithName(component.DataTypeMetrics, common.PipelineNameContainerInsightsJmx) +func (t *translator) ID() pipeline.ID { + return pipeline.NewIDWithName(pipeline.SignalMetrics, common.PipelineNameContainerInsightsJmx) } // Translate creates a pipeline for container insights jmx if the logs.metrics_collected.kubernetes @@ -69,8 +69,8 @@ func (t *translator) Translate(conf *confmap.Conf) (*common.ComponentTranslators awsemf.NewTranslatorWithName(common.PipelineNameContainerInsightsJmx), ), Extensions: common.NewTranslatorMap( - agenthealth.NewTranslator(component.DataTypeLogs, []string{agenthealth.OperationPutLogEvents}), - agenthealth.NewTranslatorWithStatusCode(component.MustNewType("statuscode"), nil, true), + agenthealth.NewTranslator(agenthealth.LogsName, []string{agenthealth.OperationPutLogEvents}), + agenthealth.NewTranslatorWithStatusCode(agenthealth.StatusCodeName, nil, true), ), } diff --git a/translator/translate/otel/pipeline/emf_logs/translator.go b/translator/translate/otel/pipeline/emf_logs/translator.go index c155b8176c..221b52fc9b 100644 --- a/translator/translate/otel/pipeline/emf_logs/translator.go +++ b/translator/translate/otel/pipeline/emf_logs/translator.go @@ -8,6 +8,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/pipeline" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/exporter/awscloudwatchlogs" @@ -24,18 +25,16 @@ var ( serviceAddressStructuredLogKey = common.ConfigKey(structuredLogKey, common.ServiceAddress) ) -type translator struct { - id component.ID -} +type translator struct{} -var _ common.Translator[*common.ComponentTranslators] = (*translator)(nil) +var _ common.PipelineTranslator = (*translator)(nil) -func NewTranslator() common.Translator[*common.ComponentTranslators] { +func NewTranslator() common.PipelineTranslator { return &translator{} } -func (t *translator) ID() component.ID { - return component.NewIDWithName(component.DataTypeLogs, common.PipelineNameEmfLogs) +func (t *translator) ID() pipeline.ID { + return pipeline.NewIDWithName(pipeline.SignalLogs, common.PipelineNameEmfLogs) } // Translate creates a pipeline for emf if emf logs are collected @@ -47,11 +46,11 @@ func (t *translator) Translate(conf *confmap.Conf) (*common.ComponentTranslators return nil, &common.MissingKeyError{ID: t.ID(), JsonKey: emfKey} } translators := common.ComponentTranslators{ - Receivers: common.NewTranslatorMap[component.Config](), + Receivers: common.NewTranslatorMap[component.Config, component.ID](), Processors: common.NewTranslatorMap(batchprocessor.NewTranslatorWithNameAndSection(common.PipelineNameEmfLogs, common.LogsKey)), // EMF logs sit under metrics_collected in "logs" Exporters: common.NewTranslatorMap(awscloudwatchlogs.NewTranslatorWithName(common.PipelineNameEmfLogs)), - Extensions: common.NewTranslatorMap(agenthealth.NewTranslator(component.DataTypeLogs, []string{agenthealth.OperationPutLogEvents}), - agenthealth.NewTranslatorWithStatusCode(component.MustNewType("statuscode"), nil, true), + Extensions: common.NewTranslatorMap(agenthealth.NewTranslator(agenthealth.LogsName, []string{agenthealth.OperationPutLogEvents}), + agenthealth.NewTranslatorWithStatusCode(agenthealth.StatusCodeName, nil, true), ), } if serviceAddress, ok := common.GetString(conf, serviceAddressEMFKey); ok { diff --git a/translator/translate/otel/pipeline/host/translator.go b/translator/translate/otel/pipeline/host/translator.go index 2e8502b536..918887c755 100644 --- a/translator/translate/otel/pipeline/host/translator.go +++ b/translator/translate/otel/pipeline/host/translator.go @@ -11,6 +11,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/pipeline" "github.com/aws/amazon-cloudwatch-agent/translator/config" "github.com/aws/amazon-cloudwatch-agent/translator/context" @@ -19,6 +20,7 @@ import ( "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/exporter/awsemf" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/exporter/prometheusremotewrite" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/extension/agenthealth" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/extension/k8smetadata" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/extension/sigv4auth" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/processor/awsentity" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/processor/batchprocessor" @@ -32,25 +34,25 @@ import ( type translator struct { name string common.DestinationProvider - receivers common.TranslatorMap[component.Config] + receivers common.ComponentTranslatorMap } +var _ common.PipelineTranslator = (*translator)(nil) + var supportedEntityProcessorDestinations = [...]string{ common.DefaultDestination, common.CloudWatchKey, common.CloudWatchLogsKey, } -var _ common.Translator[*common.ComponentTranslators] = (*translator)(nil) - // NewTranslator creates a new host pipeline translator. The receiver types // passed in are converted to config.ComponentIDs, sorted, and used directly // in the translated pipeline. func NewTranslator( name string, - receivers common.TranslatorMap[component.Config], + receivers common.ComponentTranslatorMap, opts ...common.TranslatorOption, -) common.Translator[*common.ComponentTranslators] { +) common.PipelineTranslator { t := &translator{name: name, receivers: receivers} for _, opt := range opts { opt(t) @@ -61,8 +63,8 @@ func NewTranslator( return t } -func (t translator) ID() component.ID { - return component.NewIDWithName(component.DataTypeMetrics, t.name) +func (t translator) ID() pipeline.ID { + return pipeline.NewIDWithName(pipeline.SignalMetrics, t.name) } // Translate creates a pipeline if metrics section exists. @@ -71,14 +73,14 @@ func (t translator) Translate(conf *confmap.Conf) (*common.ComponentTranslators, return nil, fmt.Errorf("no receivers configured in pipeline %s", t.name) } - var entityProcessor common.Translator[component.Config] + var entityProcessor common.ComponentTranslator var ec2TaggerEnabled bool translators := common.ComponentTranslators{ Receivers: t.receivers, - Processors: common.NewTranslatorMap[component.Config](), - Exporters: common.NewTranslatorMap[component.Config](), - Extensions: common.NewTranslatorMap[component.Config](), + Processors: common.NewTranslatorMap[component.Config, component.ID](), + Exporters: common.NewTranslatorMap[component.Config, component.ID](), + Extensions: common.NewTranslatorMap[component.Config, component.ID](), } if strings.HasPrefix(t.name, common.PipelineNameHostDeltaMetrics) || strings.HasPrefix(t.name, common.PipelineNameHostOtlpMetrics) { @@ -107,6 +109,7 @@ func (t translator) Translate(conf *confmap.Conf) (*common.ComponentTranslators, // TODO: For OTLP, the entity processor is only on K8S for now. Eventually this should be added to EC2 if currentContext.KubernetesMode() != "" { entityProcessor = awsentity.NewTranslatorWithEntityType(awsentity.Service, common.OtlpKey, false) + translators.Extensions.Set(k8smetadata.NewTranslator()) } case common.PipelineNameHostCustomMetrics: if !currentContext.RunInContainer() { @@ -128,8 +131,8 @@ func (t translator) Translate(conf *confmap.Conf) (*common.ComponentTranslators, switch t.Destination() { case common.DefaultDestination, common.CloudWatchKey: translators.Exporters.Set(awscloudwatch.NewTranslator()) - translators.Extensions.Set(agenthealth.NewTranslator(component.DataTypeMetrics, []string{agenthealth.OperationPutMetricData})) - translators.Extensions.Set(agenthealth.NewTranslatorWithStatusCode(component.MustNewType("statuscode"), nil, true)) + translators.Extensions.Set(agenthealth.NewTranslator(agenthealth.MetricsName, []string{agenthealth.OperationPutMetricData})) + translators.Extensions.Set(agenthealth.NewTranslatorWithStatusCode(agenthealth.StatusCodeName, nil, true)) case common.AMPKey: if conf.IsSet(common.MetricsAggregationDimensionsKey) { translators.Processors.Set(rollupprocessor.NewTranslator()) @@ -140,8 +143,8 @@ func (t translator) Translate(conf *confmap.Conf) (*common.ComponentTranslators, case common.CloudWatchLogsKey: translators.Processors.Set(batchprocessor.NewTranslatorWithNameAndSection(t.name, common.LogsKey)) translators.Exporters.Set(awsemf.NewTranslator()) - translators.Extensions.Set(agenthealth.NewTranslator(component.DataTypeLogs, []string{agenthealth.OperationPutLogEvents})) - translators.Extensions.Set(agenthealth.NewTranslatorWithStatusCode(component.MustNewType("statuscode"), nil, true)) + translators.Extensions.Set(agenthealth.NewTranslator(agenthealth.LogsName, []string{agenthealth.OperationPutLogEvents})) + translators.Extensions.Set(agenthealth.NewTranslatorWithStatusCode(agenthealth.StatusCodeName, nil, true)) default: return nil, fmt.Errorf("pipeline (%s) does not support destination (%s) in configuration", t.name, t.Destination()) } diff --git a/translator/translate/otel/pipeline/host/translator_test.go b/translator/translate/otel/pipeline/host/translator_test.go index 8a23380093..610b997059 100644 --- a/translator/translate/otel/pipeline/host/translator_test.go +++ b/translator/translate/otel/pipeline/host/translator_test.go @@ -22,7 +22,7 @@ type testTranslator struct { id component.ID } -var _ common.Translator[component.Config] = (*testTranslator)(nil) +var _ common.ComponentTranslator = (*testTranslator)(nil) func (t testTranslator) Translate(_ *confmap.Conf) (component.Config, error) { return nil, nil @@ -134,7 +134,7 @@ func TestTranslator(t *testing.T) { receivers: []string{"nop", "other"}, processors: []string{"cumulativetodelta/hostOtlpMetrics", "awsentity/service/otlp"}, exporters: []string{"awscloudwatch"}, - extensions: []string{"agenthealth/metrics", "agenthealth/statuscode"}, + extensions: []string{"k8smetadata", "agenthealth/metrics", "agenthealth/statuscode"}, }, }, "WithOtlpMetrics/CloudWatchLogsEC2": { @@ -192,7 +192,7 @@ func TestTranslator(t *testing.T) { receivers: []string{"nop", "other"}, processors: []string{"cumulativetodelta/hostOtlpMetrics/cloudwatchlogs", "awsentity/service/otlp", "batch/hostOtlpMetrics/cloudwatchlogs"}, exporters: []string{"awsemf"}, - extensions: []string{"agenthealth/logs", "agenthealth/statuscode"}, + extensions: []string{"k8smetadata", "agenthealth/logs", "agenthealth/statuscode"}, }, }, "WithMetricsKeyStatsD": { diff --git a/translator/translate/otel/pipeline/host/translators.go b/translator/translate/otel/pipeline/host/translators.go index ca9a6595aa..b6ba865d80 100644 --- a/translator/translate/otel/pipeline/host/translators.go +++ b/translator/translate/otel/pipeline/host/translators.go @@ -8,10 +8,10 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/pipeline" "github.com/aws/amazon-cloudwatch-agent/receiver/adapter" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" - "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/pipeline" adaptertranslator "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/receiver/adapter" otlpreceiver "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/receiver/otlp" ) @@ -21,12 +21,12 @@ var ( LogsKey = common.ConfigKey(common.LogsKey, common.MetricsCollectedKey) ) -func NewTranslators(conf *confmap.Conf, configSection, os string) (pipeline.TranslatorMap, error) { - translators := common.NewTranslatorMap[*common.ComponentTranslators]() - hostReceivers := common.NewTranslatorMap[component.Config]() - hostCustomReceivers := common.NewTranslatorMap[component.Config]() - deltaReceivers := common.NewTranslatorMap[component.Config]() - otlpReceivers := common.NewTranslatorMap[component.Config]() +func NewTranslators(conf *confmap.Conf, configSection, os string) (common.TranslatorMap[*common.ComponentTranslators, pipeline.ID], error) { + translators := common.NewTranslatorMap[*common.ComponentTranslators, pipeline.ID]() + hostReceivers := common.NewTranslatorMap[component.Config, component.ID]() + hostCustomReceivers := common.NewTranslatorMap[component.Config, component.ID]() + deltaReceivers := common.NewTranslatorMap[component.Config, component.ID]() + otlpReceivers := common.NewTranslatorMap[component.Config, component.ID]() // Gather adapter receivers if configSection == MetricsKey { @@ -34,7 +34,7 @@ func NewTranslators(conf *confmap.Conf, configSection, os string) (pipeline.Tran if err != nil { return nil, fmt.Errorf("error finding receivers in config: %w", err) } - adapterReceivers.Range(func(translator common.Translator[component.Config]) { + adapterReceivers.Range(func(translator common.ComponentTranslator) { if translator.ID().Type() == adapter.Type(common.DiskIOKey) || translator.ID().Type() == adapter.Type(common.NetKey) { deltaReceivers.Set(translator) } else if translator.ID().Type() == adapter.Type(common.StatsDMetricKey) || translator.ID().Type() == adapter.Type(common.CollectDPluginKey) { @@ -50,14 +50,14 @@ func NewTranslators(conf *confmap.Conf, configSection, os string) (pipeline.Tran case []any: for index := range v { otlpReceivers.Set(otlpreceiver.NewTranslator( - otlpreceiver.WithDataType(component.DataTypeMetrics), + otlpreceiver.WithSignal(pipeline.SignalMetrics), otlpreceiver.WithConfigKey(common.ConfigKey(configSection, common.OtlpKey)), common.WithIndex(index), )) } case map[string]any: otlpReceivers.Set(otlpreceiver.NewTranslator( - otlpreceiver.WithDataType(component.DataTypeMetrics), + otlpreceiver.WithSignal(pipeline.SignalMetrics), otlpreceiver.WithConfigKey(common.ConfigKey(configSection, common.OtlpKey)), )) } @@ -79,7 +79,7 @@ func NewTranslators(conf *confmap.Conf, configSection, os string) (pipeline.Tran switch destination { case common.AMPKey: // PRW exporter does not need the delta conversion. - receivers := common.NewTranslatorMap[component.Config]() + receivers := common.NewTranslatorMap[component.Config, component.ID]() receivers.Merge(hostReceivers) receivers.Merge(deltaReceivers) receivers.Merge(otlpReceivers) diff --git a/translator/translate/otel/pipeline/host/translators_test.go b/translator/translate/otel/pipeline/host/translators_test.go index 8977b99fd2..ed02b8e6a2 100644 --- a/translator/translate/otel/pipeline/host/translators_test.go +++ b/translator/translate/otel/pipeline/host/translators_test.go @@ -10,6 +10,7 @@ import ( "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/pipeline" "github.com/aws/amazon-cloudwatch-agent/internal/util/collections" translatorcontext "github.com/aws/amazon-cloudwatch-agent/translator" @@ -181,7 +182,7 @@ func TestTranslators(t *testing.T) { } else { require.NotNil(t, got) assert.Equal(t, len(testCase.want), got.Len()) - got.Range(func(tr common.Translator[*common.ComponentTranslators]) { + got.Range(func(tr common.Translator[*common.ComponentTranslators, pipeline.ID]) { w, ok := testCase.want[tr.ID().String()] require.True(t, ok) g, err := tr.Translate(conf) diff --git a/translator/translate/otel/pipeline/jmx/translator.go b/translator/translate/otel/pipeline/jmx/translator.go index 5de029e533..8295d0af36 100644 --- a/translator/translate/otel/pipeline/jmx/translator.go +++ b/translator/translate/otel/pipeline/jmx/translator.go @@ -9,6 +9,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/pipeline" "github.com/aws/amazon-cloudwatch-agent/translator/context" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" @@ -39,9 +40,9 @@ type translator struct { common.DestinationProvider } -var _ common.Translator[*common.ComponentTranslators] = (*translator)(nil) +var _ common.PipelineTranslator = (*translator)(nil) -func NewTranslator(opts ...common.TranslatorOption) common.Translator[*common.ComponentTranslators] { +func NewTranslator(opts ...common.TranslatorOption) common.PipelineTranslator { t := &translator{name: common.PipelineNameJmx} t.SetIndex(-1) for _, opt := range opts { @@ -56,8 +57,8 @@ func NewTranslator(opts ...common.TranslatorOption) common.Translator[*common.Co return t } -func (t *translator) ID() component.ID { - return component.NewIDWithName(component.DataTypeMetrics, t.name) +func (t *translator) ID() pipeline.ID { + return pipeline.NewIDWithName(pipeline.SignalMetrics, t.name) } // Translate creates a pipeline for jmx if jmx metrics are collected @@ -76,12 +77,12 @@ func (t *translator) Translate(conf *confmap.Conf) (*common.ComponentTranslators } translators := common.ComponentTranslators{ - Receivers: common.NewTranslatorMap[component.Config](), + Receivers: common.NewTranslatorMap[component.Config, component.ID](), Processors: common.NewTranslatorMap( filterprocessor.NewTranslator(common.WithName(common.PipelineNameJmx), common.WithIndex(t.Index())), ), - Exporters: common.NewTranslatorMap[component.Config](), - Extensions: common.NewTranslatorMap[component.Config](), + Exporters: common.NewTranslatorMap[component.Config, component.ID](), + Extensions: common.NewTranslatorMap[component.Config, component.ID](), } if context.CurrentContext().RunInContainer() { @@ -113,7 +114,7 @@ func (t *translator) Translate(conf *confmap.Conf) (*common.ComponentTranslators case common.DefaultDestination, common.CloudWatchKey: translators.Processors.Set(cumulativetodeltaprocessor.NewTranslator(common.WithName(common.PipelineNameJmx), cumulativetodeltaprocessor.WithConfigKeys(common.JmxConfigKey))) translators.Exporters.Set(awscloudwatch.NewTranslator()) - translators.Extensions.Set(agenthealth.NewTranslatorWithStatusCode(component.DataTypeMetrics, []string{agenthealth.OperationPutMetricData}, true)) + translators.Extensions.Set(agenthealth.NewTranslatorWithStatusCode(agenthealth.MetricsName, []string{agenthealth.OperationPutMetricData}, true)) case common.AMPKey: translators.Processors.Set(batchprocessor.NewTranslatorWithNameAndSection(t.name, common.MetricsKey)) if conf.IsSet(common.MetricsAggregationDimensionsKey) { diff --git a/translator/translate/otel/pipeline/jmx/translator_test.go b/translator/translate/otel/pipeline/jmx/translator_test.go index 61f2543c3d..5cb5b40a05 100644 --- a/translator/translate/otel/pipeline/jmx/translator_test.go +++ b/translator/translate/otel/pipeline/jmx/translator_test.go @@ -10,6 +10,7 @@ import ( "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/pipeline" "github.com/aws/amazon-cloudwatch-agent/internal/util/collections" "github.com/aws/amazon-cloudwatch-agent/translator/context" @@ -36,7 +37,7 @@ func TestTranslator(t *testing.T) { input: map[string]any{}, index: -1, wantErr: &common.MissingKeyError{ - ID: component.NewIDWithName(component.DataTypeMetrics, "jmx"), + ID: pipeline.NewIDWithName(pipeline.SignalMetrics, "jmx"), JsonKey: "metrics::metrics_collected::jmx", }, }, @@ -50,7 +51,7 @@ func TestTranslator(t *testing.T) { }, index: -1, wantErr: &common.MissingKeyError{ - ID: component.NewIDWithName(component.DataTypeMetrics, "jmx"), + ID: pipeline.NewIDWithName(pipeline.SignalMetrics, "jmx"), JsonKey: "metrics::metrics_collected::jmx::::measurement", }, }, @@ -66,7 +67,7 @@ func TestTranslator(t *testing.T) { }, index: -1, wantErr: &common.MissingKeyError{ - ID: component.NewIDWithName(component.DataTypeMetrics, "jmx"), + ID: pipeline.NewIDWithName(pipeline.SignalMetrics, "jmx"), JsonKey: "metrics::metrics_collected::jmx::::measurement", }, }, @@ -89,7 +90,7 @@ func TestTranslator(t *testing.T) { }, index: -1, wantErr: &common.MissingKeyError{ - ID: component.NewIDWithName(component.DataTypeMetrics, "jmx"), + ID: pipeline.NewIDWithName(pipeline.SignalMetrics, "jmx"), JsonKey: "metrics::metrics_collected::jmx::::measurement", }, }, @@ -103,7 +104,7 @@ func TestTranslator(t *testing.T) { }, index: 1, wantErr: &common.MissingKeyError{ - ID: component.NewIDWithName(component.DataTypeMetrics, "jmx/1"), + ID: pipeline.NewIDWithName(pipeline.SignalMetrics, "jmx/1"), JsonKey: "metrics::metrics_collected::jmx[1]::::measurement", }, }, diff --git a/translator/translate/otel/pipeline/jmx/translators.go b/translator/translate/otel/pipeline/jmx/translators.go index 3aefb3c8b4..02502b9e28 100644 --- a/translator/translate/otel/pipeline/jmx/translators.go +++ b/translator/translate/otel/pipeline/jmx/translators.go @@ -5,13 +5,13 @@ package jmx import ( "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/pipeline" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" - "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/pipeline" ) -func NewTranslators(conf *confmap.Conf) pipeline.TranslatorMap { - translators := common.NewTranslatorMap[*common.ComponentTranslators]() +func NewTranslators(conf *confmap.Conf) common.PipelineTranslatorMap { + translators := common.NewTranslatorMap[*common.ComponentTranslators, pipeline.ID]() destinations := common.GetMetricsDestinations(conf) switch v := conf.Get(common.JmxConfigKey).(type) { case []any: diff --git a/translator/translate/otel/pipeline/jmx/translators_test.go b/translator/translate/otel/pipeline/jmx/translators_test.go index 5f793b9496..6115ba86b2 100644 --- a/translator/translate/otel/pipeline/jmx/translators_test.go +++ b/translator/translate/otel/pipeline/jmx/translators_test.go @@ -8,8 +8,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/pipeline" _ "github.com/aws/amazon-cloudwatch-agent/translator/registerrules" ) @@ -17,11 +17,11 @@ import ( func TestTranslators(t *testing.T) { testCases := map[string]struct { input map[string]any - want []component.ID + want []pipeline.ID }{ "WithEmpty": { input: map[string]any{}, - want: []component.ID{}, + want: []pipeline.ID{}, }, "WithSingle": { input: map[string]any{ @@ -31,8 +31,8 @@ func TestTranslators(t *testing.T) { }, }, }, - want: []component.ID{ - component.MustNewIDWithName("metrics", "jmx"), + want: []pipeline.ID{ + pipeline.MustNewIDWithName("metrics", "jmx"), }, }, "WithSingle/Destinations": { @@ -48,8 +48,8 @@ func TestTranslators(t *testing.T) { }, }, }, - want: []component.ID{ - component.MustNewIDWithName("metrics", "jmx/amp"), + want: []pipeline.ID{ + pipeline.MustNewIDWithName("metrics", "jmx/amp"), }, }, "WithMultiple": { @@ -63,9 +63,9 @@ func TestTranslators(t *testing.T) { }, }, }, - want: []component.ID{ - component.MustNewIDWithName("metrics", "jmx/0"), - component.MustNewIDWithName("metrics", "jmx/1"), + want: []pipeline.ID{ + pipeline.MustNewIDWithName("metrics", "jmx/0"), + pipeline.MustNewIDWithName("metrics", "jmx/1"), }, }, "WithMultiple/Destinations": { @@ -85,11 +85,11 @@ func TestTranslators(t *testing.T) { }, }, }, - want: []component.ID{ - component.MustNewIDWithName("metrics", "jmx/cloudwatch/0"), - component.MustNewIDWithName("metrics", "jmx/amp/0"), - component.MustNewIDWithName("metrics", "jmx/cloudwatch/1"), - component.MustNewIDWithName("metrics", "jmx/amp/1"), + want: []pipeline.ID{ + pipeline.MustNewIDWithName("metrics", "jmx/cloudwatch/0"), + pipeline.MustNewIDWithName("metrics", "jmx/amp/0"), + pipeline.MustNewIDWithName("metrics", "jmx/cloudwatch/1"), + pipeline.MustNewIDWithName("metrics", "jmx/amp/1"), }, }, } diff --git a/translator/translate/otel/pipeline/nop/translator.go b/translator/translate/otel/pipeline/nop/translator.go index 0efd23b470..ad21899cb7 100644 --- a/translator/translate/otel/pipeline/nop/translator.go +++ b/translator/translate/otel/pipeline/nop/translator.go @@ -9,6 +9,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/confmap" "go.opentelemetry.io/collector/exporter/nopexporter" + "go.opentelemetry.io/collector/pipeline" "go.opentelemetry.io/collector/receiver/nopreceiver" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" @@ -30,14 +31,14 @@ var ( type translator struct { } -var _ common.Translator[*common.ComponentTranslators] = (*translator)(nil) +var _ common.PipelineTranslator = (*translator)(nil) -func NewTranslator() common.Translator[*common.ComponentTranslators] { +func NewTranslator() common.PipelineTranslator { return &translator{} } -func (t *translator) ID() component.ID { - return component.NewIDWithName(component.DataTypeMetrics, pipelineName) +func (t *translator) ID() pipeline.ID { + return pipeline.NewIDWithName(pipeline.SignalMetrics, pipelineName) } func (t *translator) Translate(conf *confmap.Conf) (*common.ComponentTranslators, error) { @@ -47,9 +48,9 @@ func (t *translator) Translate(conf *confmap.Conf) (*common.ComponentTranslators translators := &common.ComponentTranslators{ Receivers: common.NewTranslatorMap(receiver.NewDefaultTranslator(nopreceiver.NewFactory())), - Processors: common.NewTranslatorMap[component.Config](), + Processors: common.NewTranslatorMap[component.Config, component.ID](), Exporters: common.NewTranslatorMap(exporter.NewDefaultTranslator(nopexporter.NewFactory())), - Extensions: common.NewTranslatorMap[component.Config](), + Extensions: common.NewTranslatorMap[component.Config, component.ID](), } return translators, nil } diff --git a/translator/translate/otel/pipeline/prometheus/translator.go b/translator/translate/otel/pipeline/prometheus/translator.go index a83f777faa..62787e67c1 100644 --- a/translator/translate/otel/pipeline/prometheus/translator.go +++ b/translator/translate/otel/pipeline/prometheus/translator.go @@ -7,8 +7,8 @@ import ( "fmt" "time" - "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/pipeline" "github.com/aws/amazon-cloudwatch-agent/translator/translate/logs/metrics_collected/prometheus" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" @@ -32,9 +32,9 @@ type translator struct { common.DestinationProvider } -var _ common.Translator[*common.ComponentTranslators] = (*translator)(nil) +var _ common.PipelineTranslator = (*translator)(nil) -func NewTranslator(opts ...common.TranslatorOption) common.Translator[*common.ComponentTranslators] { +func NewTranslator(opts ...common.TranslatorOption) common.PipelineTranslator { t := &translator{name: common.PipelineNamePrometheus} for _, opt := range opts { opt(t) @@ -45,8 +45,8 @@ func NewTranslator(opts ...common.TranslatorOption) common.Translator[*common.Co return t } -func (t *translator) ID() component.ID { - return component.NewIDWithName(component.DataTypeMetrics, t.name) +func (t *translator) ID() pipeline.ID { + return pipeline.NewIDWithName(pipeline.SignalMetrics, t.name) } // Translate creates a pipeline for prometheus if the logs.metrics_collected.prometheus @@ -71,8 +71,8 @@ func (t *translator) Translate(conf *confmap.Conf) (*common.ComponentTranslators batchprocessor.NewTranslatorWithNameAndSection(t.name, common.LogsKey), // prometheus sits under metrics_collected in "logs" ), Exporters: common.NewTranslatorMap(awsemf.NewTranslatorWithName(common.PipelineNamePrometheus)), - Extensions: common.NewTranslatorMap(agenthealth.NewTranslator(component.DataTypeLogs, []string{agenthealth.OperationPutLogEvents}), - agenthealth.NewTranslatorWithStatusCode(component.MustNewType("statuscode"), nil, true)), + Extensions: common.NewTranslatorMap(agenthealth.NewTranslator(agenthealth.LogsName, []string{agenthealth.OperationPutLogEvents}), + agenthealth.NewTranslatorWithStatusCode(agenthealth.StatusCodeName, nil, true)), }, nil case common.AMPKey: if !conf.IsSet(MetricsKey) { diff --git a/translator/translate/otel/pipeline/prometheus/translator_test.go b/translator/translate/otel/pipeline/prometheus/translator_test.go index 2b04cb4467..0c0c2006ae 100644 --- a/translator/translate/otel/pipeline/prometheus/translator_test.go +++ b/translator/translate/otel/pipeline/prometheus/translator_test.go @@ -11,6 +11,7 @@ import ( "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/pipeline" "github.com/aws/amazon-cloudwatch-agent/internal/util/collections" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" @@ -35,7 +36,7 @@ func TestTranslator(t *testing.T) { input: map[string]any{}, destination: common.AMPKey, wantErr: &common.MissingKeyError{ - ID: component.NewIDWithName(component.DataTypeMetrics, "prometheus/amp"), + ID: pipeline.NewIDWithName(pipeline.SignalMetrics, "prometheus/amp"), JsonKey: "metrics::metrics_collected::prometheus or logs::metrics_collected::prometheus", }, }, @@ -43,7 +44,7 @@ func TestTranslator(t *testing.T) { input: map[string]any{}, destination: common.CloudWatchLogsKey, wantErr: &common.MissingKeyError{ - ID: component.NewIDWithName(component.DataTypeMetrics, "prometheus/cloudwatchlogs"), + ID: pipeline.NewIDWithName(pipeline.SignalMetrics, "prometheus/cloudwatchlogs"), JsonKey: "metrics::metrics_collected::prometheus or logs::metrics_collected::prometheus", }, }, diff --git a/translator/translate/otel/pipeline/prometheus/translators.go b/translator/translate/otel/pipeline/prometheus/translators.go index 6ee08f4e2e..f2fc0a4261 100644 --- a/translator/translate/otel/pipeline/prometheus/translators.go +++ b/translator/translate/otel/pipeline/prometheus/translators.go @@ -5,13 +5,13 @@ package prometheus import ( "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/pipeline" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" - "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/pipeline" ) -func NewTranslators(conf *confmap.Conf) pipeline.TranslatorMap { - translators := common.NewTranslatorMap[*common.ComponentTranslators]() +func NewTranslators(conf *confmap.Conf) common.PipelineTranslatorMap { + translators := common.NewTranslatorMap[*common.ComponentTranslators, pipeline.ID]() var destinations []string if conf.IsSet(LogsKey) { destinations = append(destinations, common.CloudWatchLogsKey) diff --git a/translator/translate/otel/pipeline/prometheus/translators_test.go b/translator/translate/otel/pipeline/prometheus/translators_test.go index 00ec31e83e..616eee54fd 100644 --- a/translator/translate/otel/pipeline/prometheus/translators_test.go +++ b/translator/translate/otel/pipeline/prometheus/translators_test.go @@ -8,8 +8,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/pipeline" _ "github.com/aws/amazon-cloudwatch-agent/translator/registerrules" ) @@ -18,11 +18,11 @@ func TestTranslators(t *testing.T) { testCases := map[string]struct { input map[string]any - want []component.ID + want []pipeline.ID }{ "WithEmpty": { input: map[string]any{}, - want: []component.ID{}, + want: []pipeline.ID{}, }, "WithMetricsWithoutDestinations": { input: map[string]any{ @@ -32,8 +32,8 @@ func TestTranslators(t *testing.T) { }, }, }, - want: []component.ID{ - component.MustNewIDWithName("metrics", "prometheus/amp"), + want: []pipeline.ID{ + pipeline.MustNewIDWithName("metrics", "prometheus/amp"), }, }, "WithLogsWithoutDestinations": { @@ -44,8 +44,8 @@ func TestTranslators(t *testing.T) { }, }, }, - want: []component.ID{ - component.MustNewIDWithName("metrics", "prometheus/cloudwatchlogs"), + want: []pipeline.ID{ + pipeline.MustNewIDWithName("metrics", "prometheus/cloudwatchlogs"), }, }, "WithMetricsWithCloudWatchDestination": { @@ -59,8 +59,8 @@ func TestTranslators(t *testing.T) { }, }, }, - want: []component.ID{ - component.MustNewIDWithName("metrics", "prometheus/amp"), + want: []pipeline.ID{ + pipeline.MustNewIDWithName("metrics", "prometheus/amp"), }, }, "WithMetricsWithAMP": { @@ -76,8 +76,8 @@ func TestTranslators(t *testing.T) { }, }, }, - want: []component.ID{ - component.MustNewIDWithName("metrics", "prometheus/amp"), + want: []pipeline.ID{ + pipeline.MustNewIDWithName("metrics", "prometheus/amp"), }, }, "WithLogsWithCloudWatch": { @@ -93,8 +93,8 @@ func TestTranslators(t *testing.T) { }, }, }, - want: []component.ID{ - component.MustNewIDWithName("metrics", "prometheus/cloudwatchlogs"), + want: []pipeline.ID{ + pipeline.MustNewIDWithName("metrics", "prometheus/cloudwatchlogs"), }, }, "WithMultiple/Destinations": { @@ -116,9 +116,9 @@ func TestTranslators(t *testing.T) { }, }, }, - want: []component.ID{ - component.MustNewIDWithName("metrics", "prometheus/amp"), - component.MustNewIDWithName("metrics", "prometheus/cloudwatchlogs"), + want: []pipeline.ID{ + pipeline.MustNewIDWithName("metrics", "prometheus/amp"), + pipeline.MustNewIDWithName("metrics", "prometheus/cloudwatchlogs"), }, }, } diff --git a/translator/translate/otel/pipeline/translator.go b/translator/translate/otel/pipeline/translator.go index e6dce92018..ef0bfcd05a 100644 --- a/translator/translate/otel/pipeline/translator.go +++ b/translator/translate/otel/pipeline/translator.go @@ -8,6 +8,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/pipeline" "go.opentelemetry.io/collector/service/pipelines" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" @@ -17,23 +18,23 @@ var ( ErrNoPipelines = errors.New("no valid pipelines") ) -type Translator = common.Translator[*common.ComponentTranslators] +type Translator = common.PipelineTranslator -type TranslatorMap = common.TranslatorMap[*common.ComponentTranslators] +type TranslatorMap = common.TranslatorMap[*common.ComponentTranslators, pipeline.ID] type Translation struct { - // Pipelines is a map of component IDs to service pipelines. + // Pipelines is a map of pipeline IDs to service pipelines. Pipelines pipelines.Config Translators common.ComponentTranslators } type translator struct { - translators common.TranslatorMap[*common.ComponentTranslators] + translators common.TranslatorMap[*common.ComponentTranslators, pipeline.ID] } -var _ common.Translator[*Translation] = (*translator)(nil) +var _ common.Translator[*Translation, component.ID] = (*translator)(nil) // ID doesn't really matter -func NewTranslator(translators common.TranslatorMap[*common.ComponentTranslators]) common.Translator[*Translation] { +func NewTranslator(translators common.TranslatorMap[*common.ComponentTranslators, pipeline.ID]) common.Translator[*Translation, component.ID] { return &translator{translators: translators} } @@ -47,13 +48,13 @@ func (t *translator) Translate(conf *confmap.Conf) (*Translation, error) { translation := Translation{ Pipelines: make(pipelines.Config), Translators: common.ComponentTranslators{ - Receivers: common.NewTranslatorMap[component.Config](), - Processors: common.NewTranslatorMap[component.Config](), - Exporters: common.NewTranslatorMap[component.Config](), - Extensions: common.NewTranslatorMap[component.Config](), + Receivers: common.NewTranslatorMap[component.Config, component.ID](), + Processors: common.NewTranslatorMap[component.Config, component.ID](), + Exporters: common.NewTranslatorMap[component.Config, component.ID](), + Extensions: common.NewTranslatorMap[component.Config, component.ID](), }, } - t.translators.Range(func(pt common.Translator[*common.ComponentTranslators]) { + t.translators.Range(func(pt common.PipelineTranslator) { if pipeline, _ := pt.Translate(conf); pipeline != nil { translation.Pipelines[pt.ID()] = &pipelines.PipelineConfig{ Receivers: pipeline.Receivers.Keys(), diff --git a/translator/translate/otel/pipeline/translator_test.go b/translator/translate/otel/pipeline/translator_test.go index afaf0fe1b0..e3f90313d5 100644 --- a/translator/translate/otel/pipeline/translator_test.go +++ b/translator/translate/otel/pipeline/translator_test.go @@ -9,6 +9,7 @@ import ( "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/pipeline" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" ) @@ -17,28 +18,27 @@ type testTranslator struct { result *common.ComponentTranslators } -var _ common.Translator[*common.ComponentTranslators] = (*testTranslator)(nil) +var _ common.PipelineTranslator = (*testTranslator)(nil) func (t testTranslator) Translate(_ *confmap.Conf) (*common.ComponentTranslators, error) { return t.result, nil } -func (t testTranslator) ID() component.ID { - newType, _ := component.NewType("") - return component.NewID(newType) +func (t testTranslator) ID() pipeline.ID { + return pipeline.NewIDWithName(pipeline.SignalLogs, "test") } func TestTranslator(t *testing.T) { - pt := NewTranslator(common.NewTranslatorMap[*common.ComponentTranslators]()) + pt := NewTranslator(common.NewTranslatorMap[*common.ComponentTranslators, pipeline.ID]()) got, err := pt.Translate(confmap.New()) require.Equal(t, ErrNoPipelines, err) require.Nil(t, got) pt = NewTranslator(common.NewTranslatorMap[*common.ComponentTranslators](&testTranslator{ result: &common.ComponentTranslators{ - Receivers: common.NewTranslatorMap[component.Config](), - Processors: common.NewTranslatorMap[component.Config](), - Exporters: common.NewTranslatorMap[component.Config](), - Extensions: common.NewTranslatorMap[component.Config](), + Receivers: common.NewTranslatorMap[component.Config, component.ID](), + Processors: common.NewTranslatorMap[component.Config, component.ID](), + Exporters: common.NewTranslatorMap[component.Config, component.ID](), + Extensions: common.NewTranslatorMap[component.Config, component.ID](), }, })) got, err = pt.Translate(confmap.New()) diff --git a/translator/translate/otel/pipeline/xray/translator.go b/translator/translate/otel/pipeline/xray/translator.go index 1da9220e51..557712b49b 100644 --- a/translator/translate/otel/pipeline/xray/translator.go +++ b/translator/translate/otel/pipeline/xray/translator.go @@ -8,6 +8,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/pipeline" "go.opentelemetry.io/collector/processor/batchprocessor" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" @@ -30,14 +31,14 @@ var ( type translator struct { } -var _ common.Translator[*common.ComponentTranslators] = (*translator)(nil) +var _ common.PipelineTranslator = (*translator)(nil) -func NewTranslator() common.Translator[*common.ComponentTranslators] { +func NewTranslator() common.PipelineTranslator { return &translator{} } -func (t *translator) ID() component.ID { - return component.NewIDWithName(component.DataTypeTraces, pipelineName) +func (t *translator) ID() pipeline.ID { + return pipeline.NewIDWithName(pipeline.SignalTraces, pipelineName) } func (t *translator) Translate(conf *confmap.Conf) (*common.ComponentTranslators, error) { @@ -45,18 +46,18 @@ func (t *translator) Translate(conf *confmap.Conf) (*common.ComponentTranslators return nil, &common.MissingKeyError{ID: t.ID(), JsonKey: fmt.Sprint(xrayKey, " or ", otlpKey)} } translators := &common.ComponentTranslators{ - Receivers: common.NewTranslatorMap[component.Config](), + Receivers: common.NewTranslatorMap[component.Config, component.ID](), Processors: common.NewTranslatorMap(processor.NewDefaultTranslatorWithName(pipelineName, batchprocessor.NewFactory())), Exporters: common.NewTranslatorMap(awsxrayexporter.NewTranslator()), - Extensions: common.NewTranslatorMap(agenthealth.NewTranslator(component.DataTypeTraces, []string{agenthealth.OperationPutTraceSegments}), - agenthealth.NewTranslatorWithStatusCode(component.MustNewType("statuscode"), nil, true)), + Extensions: common.NewTranslatorMap(agenthealth.NewTranslator(agenthealth.TracesName, []string{agenthealth.OperationPutTraceSegments}), + agenthealth.NewTranslatorWithStatusCode(agenthealth.StatusCodeName, nil, true)), } if conf.IsSet(xrayKey) { translators.Receivers.Set(awsxrayreceiver.NewTranslator()) } if conf.IsSet(otlpKey) { translators.Receivers.Set(otlp.NewTranslator( - otlp.WithDataType(component.DataTypeTraces), + otlp.WithSignal(pipeline.SignalTraces), otlp.WithConfigKey(otlpKey)), ) } diff --git a/translator/translate/otel/processor/awsapplicationsignals/translator.go b/translator/translate/otel/processor/awsapplicationsignals/translator.go index 7c087bdf17..ca0143c6d8 100644 --- a/translator/translate/otel/processor/awsapplicationsignals/translator.go +++ b/translator/translate/otel/processor/awsapplicationsignals/translator.go @@ -10,6 +10,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/pipeline" "go.opentelemetry.io/collector/processor" "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsapplicationsignals" @@ -22,9 +23,9 @@ import ( ) type translator struct { - name string - dataType component.DataType - factory processor.Factory + name string + signal pipeline.Signal + factory processor.Factory } type Option interface { @@ -37,17 +38,17 @@ func (o optionFunc) apply(t *translator) { o(t) } -// WithDataType determines where the translator should look to find +// WithSignal determines where the translator should look to find // the configuration. -func WithDataType(dataType component.DataType) Option { +func WithSignal(signal pipeline.Signal) Option { return optionFunc(func(t *translator) { - t.dataType = dataType + t.signal = signal }) } -var _ common.Translator[component.Config] = (*translator)(nil) +var _ common.ComponentTranslator = (*translator)(nil) -func NewTranslator(opts ...Option) common.Translator[component.Config] { +func NewTranslator(opts ...Option) common.ComponentTranslator { t := &translator{factory: awsapplicationsignals.NewFactory()} for _, opt := range opts { opt.apply(t) @@ -60,7 +61,7 @@ func (t *translator) ID() component.ID { } func (t *translator) Translate(conf *confmap.Conf) (component.Config, error) { - configKey := common.AppSignalsConfigKeys[t.dataType] + configKey := common.AppSignalsConfigKeys[t.signal] cfg := t.factory.CreateDefaultConfig().(*appsignalsconfig.Config) hostedIn, hostedInConfigured := common.GetHostedIn(conf) diff --git a/translator/translate/otel/processor/awsapplicationsignals/translator_test.go b/translator/translate/otel/processor/awsapplicationsignals/translator_test.go index 9f222d0116..496143a7cf 100644 --- a/translator/translate/otel/processor/awsapplicationsignals/translator_test.go +++ b/translator/translate/otel/processor/awsapplicationsignals/translator_test.go @@ -11,8 +11,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/pipeline" "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsapplicationsignals" "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsapplicationsignals/config" @@ -45,7 +45,7 @@ func TestTranslate(t *testing.T) { json.Unmarshal([]byte(validAppSignalsRulesConfig), &validJsonMap) json.Unmarshal([]byte(invalidAppSignalsRulesConfig), &invalidJsonMap) - tt := NewTranslator(WithDataType(component.DataTypeMetrics)) + tt := NewTranslator(WithSignal(pipeline.SignalMetrics)) testCases := map[string]struct { input map[string]interface{} want string diff --git a/translator/translate/otel/processor/awsentity/translator.go b/translator/translate/otel/processor/awsentity/translator.go index 0dd5a4608d..f43df46629 100644 --- a/translator/translate/otel/processor/awsentity/translator.go +++ b/translator/translate/otel/processor/awsentity/translator.go @@ -29,13 +29,13 @@ type translator struct { scrapeDatapointAttribute bool } -func NewTranslator() common.Translator[component.Config] { +func NewTranslator() common.ComponentTranslator { return &translator{ factory: awsentity.NewFactory(), } } -func NewTranslatorWithEntityType(entityType string, name string, scrapeDatapointAttribute bool) common.Translator[component.Config] { +func NewTranslatorWithEntityType(entityType string, name string, scrapeDatapointAttribute bool) common.ComponentTranslator { pipelineName := strings.ToLower(entityType) if name != "" { pipelineName = pipelineName + "/" + name diff --git a/translator/translate/otel/processor/batchprocessor/translator.go b/translator/translate/otel/processor/batchprocessor/translator.go index b2e5739f5a..af9bef3aa6 100644 --- a/translator/translate/otel/processor/batchprocessor/translator.go +++ b/translator/translate/otel/processor/batchprocessor/translator.go @@ -26,9 +26,9 @@ type translator struct { factory processor.Factory } -var _ common.Translator[component.Config] = (*translator)(nil) +var _ common.ComponentTranslator = (*translator)(nil) -func NewTranslatorWithNameAndSection(name string, telemetrySectionKey string) common.Translator[component.Config] { +func NewTranslatorWithNameAndSection(name string, telemetrySectionKey string) common.ComponentTranslator { return &translator{name, telemetrySectionKey, batchprocessor.NewFactory()} } diff --git a/translator/translate/otel/processor/batchprocessor/translator_test.go b/translator/translate/otel/processor/batchprocessor/translator_test.go index 2c6beb7055..4de64a35b6 100644 --- a/translator/translate/otel/processor/batchprocessor/translator_test.go +++ b/translator/translate/otel/processor/batchprocessor/translator_test.go @@ -9,7 +9,6 @@ import ( "time" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/confmap" "go.opentelemetry.io/collector/processor/batchprocessor" @@ -18,7 +17,7 @@ import ( func TestTranslator(t *testing.T) { testCases := map[string]struct { - translator common.Translator[component.Config] + translator common.ComponentTranslator input map[string]interface{} want *batchprocessor.Config wantErr error diff --git a/translator/translate/otel/processor/cumulativetodeltaprocessor/translator.go b/translator/translate/otel/processor/cumulativetodeltaprocessor/translator.go index 650ac1aa0d..9dda4f6ab4 100644 --- a/translator/translate/otel/processor/cumulativetodeltaprocessor/translator.go +++ b/translator/translate/otel/processor/cumulativetodeltaprocessor/translator.go @@ -56,10 +56,10 @@ type translator struct { keys []string } -var _ common.Translator[component.Config] = (*translator)(nil) +var _ common.ComponentTranslator = (*translator)(nil) var _ common.NameSetter = (*translator)(nil) -func NewTranslator(opts ...common.TranslatorOption) common.Translator[component.Config] { +func NewTranslator(opts ...common.TranslatorOption) common.ComponentTranslator { t := &translator{factory: cumulativetodeltaprocessor.NewFactory()} for _, opt := range opts { opt(t) diff --git a/translator/translate/otel/processor/ec2taggerprocessor/translator.go b/translator/translate/otel/processor/ec2taggerprocessor/translator.go index 2add6ecf55..2b46b4dd6b 100644 --- a/translator/translate/otel/processor/ec2taggerprocessor/translator.go +++ b/translator/translate/otel/processor/ec2taggerprocessor/translator.go @@ -24,13 +24,13 @@ type translator struct { factory processor.Factory } -var _ common.Translator[component.Config] = (*translator)(nil) +var _ common.ComponentTranslator = (*translator)(nil) -func NewTranslator() common.Translator[component.Config] { +func NewTranslator() common.ComponentTranslator { return NewTranslatorWithName("") } -func NewTranslatorWithName(name string) common.Translator[component.Config] { +func NewTranslatorWithName(name string) common.ComponentTranslator { return &translator{name, ec2tagger.NewFactory()} } @@ -59,13 +59,15 @@ func (t *translator) Translate(conf *confmap.Conf) (component.Config, error) { } } + cfg.RefreshTagsInterval = time.Duration(0) + cfg.RefreshVolumesInterval = time.Duration(0) if value, ok := common.GetString(conf, common.ConfigKey(common.MetricsKey, common.MetricsCollectedKey, common.DiskKey, common.AppendDimensionsKey, ec2tagger.AttributeVolumeId)); ok && value == ec2tagger.ValueAppendDimensionVolumeId { + cfg.RefreshVolumesInterval = 5 * time.Minute cfg.EBSDeviceKeys = []string{"*"} cfg.DiskDeviceTagKey = "device" } cfg.MiddlewareID = &agenthealth.StatusCodeID - cfg.RefreshIntervalSeconds = time.Duration(0) cfg.IMDSRetries = retryer.GetDefaultRetryNumber() return cfg, nil diff --git a/translator/translate/otel/processor/ec2taggerprocessor/translator_test.go b/translator/translate/otel/processor/ec2taggerprocessor/translator_test.go index 376f2d862f..b36c89e45a 100644 --- a/translator/translate/otel/processor/ec2taggerprocessor/translator_test.go +++ b/translator/translate/otel/processor/ec2taggerprocessor/translator_test.go @@ -29,7 +29,7 @@ func TestTranslator(t *testing.T) { JsonKey: Ec2taggerKey, }, }, - "FullEc2TaggerProcessorConfig": { + "FullEc2TaggerProcessorNoVolumeConfig": { input: map[string]interface{}{ "metrics": map[string]interface{}{ "append_dimensions": map[string]interface{}{ @@ -41,7 +41,8 @@ func TestTranslator(t *testing.T) { }, }, want: &ec2tagger.Config{ - RefreshIntervalSeconds: 0 * time.Second, + RefreshTagsInterval: 0 * time.Second, + RefreshVolumesInterval: 0 * time.Minute, EC2MetadataTags: []string{"ImageId", "InstanceId", "InstanceType"}, EC2InstanceTagKeys: []string{"AutoScalingGroupName"}, }, @@ -65,7 +66,8 @@ func TestTranslator(t *testing.T) { }, }, want: &ec2tagger.Config{ - RefreshIntervalSeconds: 0 * time.Second, + RefreshTagsInterval: 0 * time.Second, + RefreshVolumesInterval: 5 * time.Minute, EC2MetadataTags: []string{"ImageId", "InstanceId", "InstanceType"}, EC2InstanceTagKeys: []string{"AutoScalingGroupName"}, DiskDeviceTagKey: "device", @@ -82,7 +84,8 @@ func TestTranslator(t *testing.T) { require.NotNil(t, got) gotCfg, ok := got.(*ec2tagger.Config) require.True(t, ok) - require.Equal(t, tc.want.RefreshIntervalSeconds, gotCfg.RefreshIntervalSeconds) + require.Equal(t, tc.want.RefreshTagsInterval, gotCfg.RefreshTagsInterval) + require.Equal(t, tc.want.RefreshVolumesInterval, gotCfg.RefreshVolumesInterval) sort.Strings(gotCfg.EC2MetadataTags) require.Equal(t, tc.want.EC2MetadataTags, gotCfg.EC2MetadataTags) require.Equal(t, tc.want.EC2InstanceTagKeys, gotCfg.EC2InstanceTagKeys) diff --git a/translator/translate/otel/processor/filterprocessor/filter_containerinsights_config.yaml b/translator/translate/otel/processor/filterprocessor/filter_containerinsights_config.yaml new file mode 100644 index 0000000000..167528866c --- /dev/null +++ b/translator/translate/otel/processor/filterprocessor/filter_containerinsights_config.yaml @@ -0,0 +1,9 @@ +metrics: + exclude: + match_type: strict + metric_names: + - up + - scrape_duration_seconds + - scrape_samples_scraped + - scrape_series_added + - scrape_samples_post_metric_relabeling \ No newline at end of file diff --git a/translator/translate/otel/processor/filterprocessor/translator.go b/translator/translate/otel/processor/filterprocessor/translator.go index b6faafa234..1cdc80987f 100644 --- a/translator/translate/otel/processor/filterprocessor/translator.go +++ b/translator/translate/otel/processor/filterprocessor/translator.go @@ -23,15 +23,18 @@ const ( //go:embed filter_jmx_config.yaml var containerInsightsJmxConfig string +//go:embed filter_containerinsights_config.yaml +var containerInsightsConfig string + type translator struct { common.NameProvider common.IndexProvider factory processor.Factory } -var _ common.Translator[component.Config] = (*translator)(nil) +var _ common.ComponentTranslator = (*translator)(nil) -func NewTranslator(opts ...common.TranslatorOption) common.Translator[component.Config] { +func NewTranslator(opts ...common.TranslatorOption) common.ComponentTranslator { t := &translator{factory: filterprocessor.NewFactory()} t.SetIndex(-1) for _, opt := range opts { @@ -43,7 +46,7 @@ func NewTranslator(opts ...common.TranslatorOption) common.Translator[component. return t } -var _ common.Translator[component.Config] = (*translator)(nil) +var _ common.ComponentTranslator = (*translator)(nil) func (t *translator) ID() component.ID { return component.NewIDWithName(t.factory.Type(), t.Name()) @@ -52,7 +55,8 @@ func (t *translator) ID() component.ID { // Translate creates a processor config based on the fields in the // Metrics section of the JSON config. func (t *translator) Translate(conf *confmap.Conf) (component.Config, error) { - if conf == nil || (!conf.IsSet(common.JmxConfigKey) && t.Name() != common.PipelineNameContainerInsightsJmx) { + // also checking for container insights pipeline to add default filtering for prometheus metadata + if conf == nil || (t.Name() != common.PipelineNameContainerInsights && t.Name() != common.PipelineNameKueue && t.Name() != common.PipelineNameContainerInsightsJmx && !conf.IsSet(common.JmxConfigKey)) { return nil, &common.MissingKeyError{ID: t.ID(), JsonKey: common.JmxConfigKey} } @@ -60,6 +64,9 @@ func (t *translator) Translate(conf *confmap.Conf) (component.Config, error) { if t.Name() == common.PipelineNameContainerInsightsJmx { return common.GetYamlFileToYamlConfig(cfg, containerInsightsJmxConfig) } + if t.Name() == common.PipelineNameContainerInsights || t.Name() == common.PipelineNameKueue { + return common.GetYamlFileToYamlConfig(cfg, containerInsightsConfig) + } jmxMap := common.GetIndexedMap(conf, common.JmxConfigKey, t.Index()) diff --git a/translator/translate/otel/processor/filterprocessor/translator_test.go b/translator/translate/otel/processor/filterprocessor/translator_test.go index d87fff55ff..bf558c18a9 100644 --- a/translator/translate/otel/processor/filterprocessor/translator_test.go +++ b/translator/translate/otel/processor/filterprocessor/translator_test.go @@ -144,3 +144,17 @@ func TestContainerInsightsJmx(t *testing.T) { assert.True(t, ok) assert.Equal(t, len(expectedCfg.Metrics.Include.MetricNames), len(actualCfg.Metrics.Include.MetricNames)) } + +func TestContainerInsights(t *testing.T) { + transl := NewTranslator(common.WithName(common.PipelineNameContainerInsights)).(*translator) + expectedCfg := transl.factory.CreateDefaultConfig().(*filterprocessor.Config) + c := testutil.GetConf(t, "filter_containerinsights_config.yaml") + require.NoError(t, c.Unmarshal(&expectedCfg)) + + conf := confmap.NewFromStringMap(testutil.GetJson(t, filepath.Join("testdata", "config.json"))) + translatedCfg, err := transl.Translate(conf) + assert.NoError(t, err) + actualCfg, ok := translatedCfg.(*filterprocessor.Config) + assert.True(t, ok) + assert.Equal(t, len(expectedCfg.Metrics.Exclude.MetricNames), len(actualCfg.Metrics.Exclude.MetricNames)) +} diff --git a/translator/translate/otel/processor/gpu/translator.go b/translator/translate/otel/processor/gpu/translator.go index ab7538ee0f..d3451e3e5f 100644 --- a/translator/translate/otel/processor/gpu/translator.go +++ b/translator/translate/otel/processor/gpu/translator.go @@ -17,9 +17,9 @@ type translator struct { factory processor.Factory } -var _ common.Translator[component.Config] = (*translator)(nil) +var _ common.ComponentTranslator = (*translator)(nil) -func NewTranslatorWithName(name string) common.Translator[component.Config] { +func NewTranslatorWithName(name string) common.ComponentTranslator { return &translator{name, gpuattributes.NewFactory()} } diff --git a/translator/translate/otel/processor/kueue/translator.go b/translator/translate/otel/processor/kueue/translator.go index d072144b4d..5e75886ec4 100644 --- a/translator/translate/otel/processor/kueue/translator.go +++ b/translator/translate/otel/processor/kueue/translator.go @@ -17,9 +17,9 @@ type translator struct { factory processor.Factory } -var _ common.Translator[component.Config] = (*translator)(nil) +var _ common.ComponentTranslator = (*translator)(nil) -func NewTranslatorWithName(name string) common.Translator[component.Config] { +func NewTranslatorWithName(name string) common.ComponentTranslator { return &translator{name, kueueattributes.NewFactory()} } diff --git a/translator/translate/otel/processor/metricsdecorator/translator.go b/translator/translate/otel/processor/metricsdecorator/translator.go index f2cc4e70a5..a658aa3b29 100644 --- a/translator/translate/otel/processor/metricsdecorator/translator.go +++ b/translator/translate/otel/processor/metricsdecorator/translator.go @@ -29,7 +29,7 @@ type ContextStatement struct { } type Translator interface { - common.Translator[component.Config] + common.ComponentTranslator // IsSet determines whether the config has the fields needed for the translator. IsSet(conf *confmap.Conf) bool } @@ -115,7 +115,7 @@ func (t *translator) Translate(conf *confmap.Conf) (component.Config, error) { } c := confmap.NewFromStringMap(map[string]any{ - "metric_statements": contextStatement, + "metric_statements": []ContextStatement{contextStatement}, }) if err := c.Unmarshal(&cfg); err != nil { return nil, fmt.Errorf("unable to unmarshal metric decoration processor: %w", err) diff --git a/translator/translate/otel/processor/metricsdecorator/translator_unix_test.go b/translator/translate/otel/processor/metricsdecorator/translator_unix_test.go index d00e60d538..357c172e7d 100644 --- a/translator/translate/otel/processor/metricsdecorator/translator_unix_test.go +++ b/translator/translate/otel/processor/metricsdecorator/translator_unix_test.go @@ -35,9 +35,9 @@ func TestTranslate(t *testing.T) { conf := confmap.NewFromStringMap(testutil.GetJson(t, filepath.Join("testdata", "unix", "config.json"))) translatedCfg, err := transl.Translate(conf) - assert.NoError(t, err) + require.NoError(t, err) actualCfg, ok := translatedCfg.(*transformprocessor.Config) - assert.True(t, ok) + require.True(t, ok) // sort the statements for consistency assert.Len(t, expectedCfg.MetricStatements, 1) @@ -64,7 +64,7 @@ func TestMetricDecoration(t *testing.T) { metrics.AddGaugeMetricDataPoint("other_metric", "none", 0.0, 0, 0, nil) ctx := context.Background() - proc, err := transl.factory.CreateMetricsProcessor(ctx, processortest.NewNopCreateSettings(), cfg, sink) + proc, err := transl.factory.CreateMetrics(ctx, processortest.NewNopSettings(), cfg, sink) require.NotNil(t, proc) require.NoError(t, err) actualMetrics := pmetric.NewMetrics() diff --git a/translator/translate/otel/processor/metricsdecorator/translator_windows_test.go b/translator/translate/otel/processor/metricsdecorator/translator_windows_test.go index 4b0a8c9593..5793e10ff0 100644 --- a/translator/translate/otel/processor/metricsdecorator/translator_windows_test.go +++ b/translator/translate/otel/processor/metricsdecorator/translator_windows_test.go @@ -67,7 +67,7 @@ func TestMetricDecoration(t *testing.T) { metrics.AddGaugeMetricDataPoint("Connections_Established", "none", 0.0, 0, 0, nil) ctx := context.Background() - proc, err := transl.factory.CreateMetricsProcessor(ctx, processortest.NewNopCreateSettings(), cfg, sink) + proc, err := transl.factory.CreateMetrics(ctx, processortest.NewNopSettings(), cfg, sink) require.NotNil(t, proc) require.NoError(t, err) actualMetrics := pmetric.NewMetrics() diff --git a/translator/translate/otel/processor/metricstransformprocessor/translator.go b/translator/translate/otel/processor/metricstransformprocessor/translator.go index e05e64c673..2db32741b8 100644 --- a/translator/translate/otel/processor/metricstransformprocessor/translator.go +++ b/translator/translate/otel/processor/metricstransformprocessor/translator.go @@ -59,9 +59,9 @@ type translator struct { factory processor.Factory } -var _ common.Translator[component.Config] = (*translator)(nil) +var _ common.ComponentTranslator = (*translator)(nil) -func NewTranslatorWithName(name string) common.Translator[component.Config] { +func NewTranslatorWithName(name string) common.ComponentTranslator { return &translator{name, metricstransformprocessor.NewFactory()} } diff --git a/translator/translate/otel/processor/metricstransformprocessor/translator_test.go b/translator/translate/otel/processor/metricstransformprocessor/translator_test.go index 6dea9b36b1..ec8c85cb1d 100644 --- a/translator/translate/otel/processor/metricstransformprocessor/translator_test.go +++ b/translator/translate/otel/processor/metricstransformprocessor/translator_test.go @@ -11,7 +11,6 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/confmap" "github.com/aws/amazon-cloudwatch-agent/internal/util/testutil" @@ -20,7 +19,7 @@ import ( func TestTranslator(t *testing.T) { testCases := map[string]struct { - translator common.Translator[component.Config] + translator common.ComponentTranslator input map[string]interface{} want *confmap.Conf wantErr error diff --git a/translator/translate/otel/processor/resourcedetection/translator.go b/translator/translate/otel/processor/resourcedetection/translator.go index 76b146fd77..ce8737d019 100644 --- a/translator/translate/otel/processor/resourcedetection/translator.go +++ b/translator/translate/otel/processor/resourcedetection/translator.go @@ -9,6 +9,7 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/pipeline" "go.opentelemetry.io/collector/processor" "github.com/aws/amazon-cloudwatch-agent/translator/config" @@ -25,9 +26,9 @@ var appSignalsDefaultResourceDetectionConfig string var appSignalsECSResourceDetectionConfig string type translator struct { - name string - dataType component.DataType - factory processor.Factory + name string + signal pipeline.Signal + factory processor.Factory } type Option interface { @@ -40,17 +41,17 @@ func (o optionFunc) apply(t *translator) { o(t) } -// WithDataType determines where the translator should look to find +// WithSignal determines where the translator should look to find // the configuration. -func WithDataType(dataType component.DataType) Option { +func WithSignal(signal pipeline.Signal) Option { return optionFunc(func(t *translator) { - t.dataType = dataType + t.signal = signal }) } -var _ common.Translator[component.Config] = (*translator)(nil) +var _ common.ComponentTranslator = (*translator)(nil) -func NewTranslator(opts ...Option) common.Translator[component.Config] { +func NewTranslator(opts ...Option) common.ComponentTranslator { t := &translator{factory: resourcedetectionprocessor.NewFactory()} for _, opt := range opts { opt.apply(t) diff --git a/translator/translate/otel/processor/resourcedetection/translator_test.go b/translator/translate/otel/processor/resourcedetection/translator_test.go index 0133c6a483..aa01ae25cb 100644 --- a/translator/translate/otel/processor/resourcedetection/translator_test.go +++ b/translator/translate/otel/processor/resourcedetection/translator_test.go @@ -9,8 +9,8 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/pipeline" translatorconfig "github.com/aws/amazon-cloudwatch-agent/translator/config" "github.com/aws/amazon-cloudwatch-agent/translator/context" @@ -18,7 +18,7 @@ import ( ) func TestTranslate(t *testing.T) { - tt := NewTranslator(WithDataType(component.DataTypeTraces)) + tt := NewTranslator(WithSignal(pipeline.SignalTraces)) testCases := map[string]struct { input map[string]interface{} mode string diff --git a/translator/translate/otel/processor/resourceprocessor/translator.go b/translator/translate/otel/processor/resourceprocessor/translator.go index 49edeaca3e..a24f8c6d39 100644 --- a/translator/translate/otel/processor/resourceprocessor/translator.go +++ b/translator/translate/otel/processor/resourceprocessor/translator.go @@ -26,12 +26,12 @@ type translator struct { } var ( - baseKey = common.ConfigKey(common.LogsKey, common.MetricsCollectedKey) - k8sKey = common.ConfigKey(baseKey, common.KubernetesKey) - _ common.Translator[component.Config] = (*translator)(nil) + baseKey = common.ConfigKey(common.LogsKey, common.MetricsCollectedKey) + k8sKey = common.ConfigKey(baseKey, common.KubernetesKey) + _ common.ComponentTranslator = (*translator)(nil) ) -func NewTranslator(opts ...common.TranslatorOption) common.Translator[component.Config] { +func NewTranslator(opts ...common.TranslatorOption) common.ComponentTranslator { t := &translator{factory: resourceprocessor.NewFactory()} t.SetIndex(-1) for _, opt := range opts { @@ -43,7 +43,7 @@ func NewTranslator(opts ...common.TranslatorOption) common.Translator[component. return t } -var _ common.Translator[component.Config] = (*translator)(nil) +var _ common.ComponentTranslator = (*translator)(nil) func (t *translator) ID() component.ID { return component.NewIDWithName(t.factory.Type(), t.Name()) diff --git a/translator/translate/otel/processor/rollupprocessor/translator.go b/translator/translate/otel/processor/rollupprocessor/translator.go index 855693e3a5..1ac0f62e46 100644 --- a/translator/translate/otel/processor/rollupprocessor/translator.go +++ b/translator/translate/otel/processor/rollupprocessor/translator.go @@ -20,13 +20,13 @@ type translator struct { factory processor.Factory } -var _ common.Translator[component.Config] = (*translator)(nil) +var _ common.ComponentTranslator = (*translator)(nil) -func NewTranslator() common.Translator[component.Config] { +func NewTranslator() common.ComponentTranslator { return NewTranslatorWithName("") } -func NewTranslatorWithName(name string) common.Translator[component.Config] { +func NewTranslatorWithName(name string) common.ComponentTranslator { return &translator{name: name, factory: rollupprocessor.NewFactory()} } diff --git a/translator/translate/otel/processor/transformprocessor/translate.go b/translator/translate/otel/processor/transformprocessor/translate.go index f0306a689f..b801db9bce 100644 --- a/translator/translate/otel/processor/transformprocessor/translate.go +++ b/translator/translate/otel/processor/transformprocessor/translate.go @@ -26,9 +26,9 @@ type translator struct { factory processor.Factory } -var _ common.Translator[component.Config] = (*translator)(nil) +var _ common.ComponentTranslator = (*translator)(nil) -func NewTranslatorWithName(name string) common.Translator[component.Config] { +func NewTranslatorWithName(name string) common.ComponentTranslator { return &translator{name, transformprocessor.NewFactory()} } diff --git a/translator/translate/otel/processor/translator.go b/translator/translate/otel/processor/translator.go index 4c126fe18c..f697e05dd3 100644 --- a/translator/translate/otel/processor/translator.go +++ b/translator/translate/otel/processor/translator.go @@ -16,11 +16,11 @@ type translator struct { factory processor.Factory } -func NewDefaultTranslator(factory processor.Factory) common.Translator[component.Config] { +func NewDefaultTranslator(factory processor.Factory) common.ComponentTranslator { return NewDefaultTranslatorWithName("", factory) } -func NewDefaultTranslatorWithName(name string, factory processor.Factory) common.Translator[component.Config] { +func NewDefaultTranslatorWithName(name string, factory processor.Factory) common.ComponentTranslator { return &translator{name, factory} } diff --git a/translator/translate/otel/receiver/adapter/translator.go b/translator/translate/otel/receiver/adapter/translator.go index d180ef6190..0df7c64b04 100644 --- a/translator/translate/otel/receiver/adapter/translator.go +++ b/translator/translate/otel/receiver/adapter/translator.go @@ -33,14 +33,14 @@ type translator struct { defaultMetricCollectionInterval time.Duration } -var _ common.Translator[component.Config] = (*translator)(nil) +var _ common.ComponentTranslator = (*translator)(nil) // NewTranslator creates a new adapter receiver translator. -func NewTranslator(inputName, cfgKey string, defaultMetricCollectionInterval time.Duration) common.Translator[component.Config] { +func NewTranslator(inputName, cfgKey string, defaultMetricCollectionInterval time.Duration) common.ComponentTranslator { return NewTranslatorWithName("", inputName, cfgKey, time.Duration(0), defaultMetricCollectionInterval) } -func NewTranslatorWithName(name, inputName, cfgKey string, preferMetricCollectionInterval, defaultMetricCollectionInterval time.Duration) common.Translator[component.Config] { +func NewTranslatorWithName(name, inputName, cfgKey string, preferMetricCollectionInterval, defaultMetricCollectionInterval time.Duration) common.ComponentTranslator { return &translator{name, adapter.Type(inputName), cfgKey, preferMetricCollectionInterval, defaultMetricCollectionInterval} } diff --git a/translator/translate/otel/receiver/adapter/translators.go b/translator/translate/otel/receiver/adapter/translators.go index f47740aa4f..6607f8b089 100644 --- a/translator/translate/otel/receiver/adapter/translators.go +++ b/translator/translate/otel/receiver/adapter/translators.go @@ -74,8 +74,8 @@ var ( // plugins require adapter translators. Logs is processed first, so any // colliding metrics translators will override them. This follows the rule // setup. -func FindReceiversInConfig(conf *confmap.Conf, os string) (common.TranslatorMap[component.Config], error) { - translators := common.NewTranslatorMap[component.Config]() +func FindReceiversInConfig(conf *confmap.Conf, os string) (common.TranslatorMap[component.Config, component.ID], error) { + translators := common.NewTranslatorMap[component.Config, component.ID]() translators.Merge(fromLogs(conf)) metricTranslators, err := fromMetrics(conf, os) translators.Merge(metricTranslators) @@ -84,8 +84,8 @@ func FindReceiversInConfig(conf *confmap.Conf, os string) (common.TranslatorMap[ // fromMetrics creates adapter receiver translators based on the os-specific // metrics section in the config. -func fromMetrics(conf *confmap.Conf, os string) (common.TranslatorMap[component.Config], error) { - translators := common.NewTranslatorMap[component.Config]() +func fromMetrics(conf *confmap.Conf, os string) (common.TranslatorMap[component.Config, component.ID], error) { + translators := common.NewTranslatorMap[component.Config, component.ID]() switch os { case translatorconfig.OS_TYPE_LINUX, translatorconfig.OS_TYPE_DARWIN: translators.Merge(fromLinuxMetrics(conf)) @@ -99,7 +99,7 @@ func fromMetrics(conf *confmap.Conf, os string) (common.TranslatorMap[component. // fromLinuxMetrics creates a translator for each subsection within the // metrics::metrics_collected section of the config. Can be anything. -func fromLinuxMetrics(conf *confmap.Conf) common.TranslatorMap[component.Config] { +func fromLinuxMetrics(conf *confmap.Conf) common.TranslatorMap[component.Config, component.ID] { var validInputs map[string]bool if _, ok := conf.Get(common.ConfigKey(metricKey)).(map[string]interface{}); ok { rule := &metrics_collect.CollectMetrics{} @@ -113,8 +113,8 @@ func fromLinuxMetrics(conf *confmap.Conf) common.TranslatorMap[component.Config] // within the metrics::metrics_collected section. See windowsInputSet for // allow list. If non-allow-listed subsections exist, they will be grouped // under a windows performance counter adapter translator. -func fromWindowsMetrics(conf *confmap.Conf) common.TranslatorMap[component.Config] { - translators := common.NewTranslatorMap[component.Config]() +func fromWindowsMetrics(conf *confmap.Conf) common.TranslatorMap[component.Config, component.ID] { + translators := common.NewTranslatorMap[component.Config, component.ID]() if inputs, ok := conf.Get(metricKey).(map[string]interface{}); ok { for inputName := range inputs { if otelReceivers.Contains(inputName) { @@ -138,13 +138,13 @@ func fromWindowsMetrics(conf *confmap.Conf) common.TranslatorMap[component.Confi // fromLogs creates a translator for each subsection within logs::logs_collected // along with a socket listener translator if "emf" or "structuredlog" are present // within the logs:metrics_collected section. -func fromLogs(conf *confmap.Conf) common.TranslatorMap[component.Config] { +func fromLogs(conf *confmap.Conf) common.TranslatorMap[component.Config, component.ID] { return fromInputs(conf, nil, logKey) } // fromInputs converts all the keys in the section into adapter translators. -func fromInputs(conf *confmap.Conf, validInputs map[string]bool, baseKey string) common.TranslatorMap[component.Config] { - translators := common.NewTranslatorMap[component.Config]() +func fromInputs(conf *confmap.Conf, validInputs map[string]bool, baseKey string) common.TranslatorMap[component.Config, component.ID] { + translators := common.NewTranslatorMap[component.Config, component.ID]() if inputs, ok := conf.Get(baseKey).(map[string]interface{}); ok { for inputName := range inputs { if skipInputSet.Contains(inputName) { @@ -183,8 +183,8 @@ func fromInputs(conf *confmap.Conf, validInputs map[string]bool, baseKey string) // to provide a unique identifier for the receivers and easy in compare with the alias // https://github.com/influxdata/telegraf/blob/d8db3ca3a293bc24a9120b590984b09e2de1851a/models/running_input.go#L60 // and generate the appropriate running input when starting adapter -func fromMultipleInput(conf *confmap.Conf, inputName, os string) common.TranslatorMap[component.Config] { - translators := common.NewTranslatorMap[component.Config]() +func fromMultipleInput(conf *confmap.Conf, inputName, os string) common.TranslatorMap[component.Config, component.ID] { + translators := common.NewTranslatorMap[component.Config, component.ID]() cfgKey := common.ConfigKey(metricKey, inputName) if inputName == procstat.SectionKey { diff --git a/translator/translate/otel/receiver/awscontainerinsight/translator.go b/translator/translate/otel/receiver/awscontainerinsight/translator.go index 6994e4fe7a..696989680d 100644 --- a/translator/translate/otel/receiver/awscontainerinsight/translator.go +++ b/translator/translate/otel/receiver/awscontainerinsight/translator.go @@ -41,14 +41,14 @@ type translator struct { services []*collections.Pair[string, string] } -var _ common.Translator[component.Config] = (*translator)(nil) +var _ common.ComponentTranslator = (*translator)(nil) // NewTranslator creates a new aws container insight receiver translator. -func NewTranslator() common.Translator[component.Config] { +func NewTranslator() common.ComponentTranslator { return NewTranslatorWithName("") } -func NewTranslatorWithName(name string) common.Translator[component.Config] { +func NewTranslatorWithName(name string) common.ComponentTranslator { baseKey := common.ConfigKey(common.LogsKey, common.MetricsCollectedKey) return &translator{ name: name, diff --git a/translator/translate/otel/receiver/awscontainerinsightskueue/translator.go b/translator/translate/otel/receiver/awscontainerinsightskueue/translator.go index 7b24c714fb..e62600380f 100644 --- a/translator/translate/otel/receiver/awscontainerinsightskueue/translator.go +++ b/translator/translate/otel/receiver/awscontainerinsightskueue/translator.go @@ -24,14 +24,14 @@ type translator struct { factory receiver.Factory } -var _ common.Translator[component.Config] = (*translator)(nil) +var _ common.ComponentTranslator = (*translator)(nil) // NewTranslator creates a new aws container insight receiver translator. -func NewTranslator() common.Translator[component.Config] { +func NewTranslator() common.ComponentTranslator { return NewTranslatorWithName("") } -func NewTranslatorWithName(name string) common.Translator[component.Config] { +func NewTranslatorWithName(name string) common.ComponentTranslator { return &translator{ name: name, factory: awscontainerinsightskueuereceiver.NewFactory(), diff --git a/translator/translate/otel/receiver/awsxray/translator.go b/translator/translate/otel/receiver/awsxray/translator.go index 36834a9b3a..44bb88ab40 100644 --- a/translator/translate/otel/receiver/awsxray/translator.go +++ b/translator/translate/otel/receiver/awsxray/translator.go @@ -36,13 +36,13 @@ type translator struct { factory receiver.Factory } -var _ common.Translator[component.Config] = (*translator)(nil) +var _ common.ComponentTranslator = (*translator)(nil) -func NewTranslator() common.Translator[component.Config] { +func NewTranslator() common.ComponentTranslator { return NewTranslatorWithName("") } -func NewTranslatorWithName(name string) common.Translator[component.Config] { +func NewTranslatorWithName(name string) common.ComponentTranslator { return &translator{name, awsxrayreceiver.NewFactory()} } diff --git a/translator/translate/otel/receiver/jmx/translator.go b/translator/translate/otel/receiver/jmx/translator.go index 3f4f4b0308..7b366f283c 100644 --- a/translator/translate/otel/receiver/jmx/translator.go +++ b/translator/translate/otel/receiver/jmx/translator.go @@ -64,9 +64,9 @@ func WithIndex(index int) Option { } } -var _ common.Translator[component.Config] = (*translator)(nil) +var _ common.ComponentTranslator = (*translator)(nil) -func NewTranslator(opts ...Option) common.Translator[component.Config] { +func NewTranslator(opts ...Option) common.ComponentTranslator { t := &translator{index: -1, factory: jmxreceiver.NewFactory()} for _, opt := range opts { opt(t) diff --git a/translator/translate/otel/receiver/otlp/translator.go b/translator/translate/otel/receiver/otlp/translator.go index 756d578db7..0f6e43fa7f 100644 --- a/translator/translate/otel/receiver/otlp/translator.go +++ b/translator/translate/otel/receiver/otlp/translator.go @@ -11,6 +11,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/configtls" "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/pipeline" "go.opentelemetry.io/collector/receiver" "go.opentelemetry.io/collector/receiver/otlpreceiver" @@ -29,16 +30,16 @@ type translator struct { common.NameProvider common.IndexProvider configKey string - dataType component.DataType + signal pipeline.Signal factory receiver.Factory } -// WithDataType determines where the translator should look to find +// WithSignal determines where the translator should look to find // the configuration. -func WithDataType(dataType component.DataType) common.TranslatorOption { +func WithSignal(signal pipeline.Signal) common.TranslatorOption { return func(target any) { if t, ok := target.(*translator); ok { - t.dataType = dataType + t.signal = signal } } } @@ -51,16 +52,16 @@ func WithConfigKey(configKey string) common.TranslatorOption { } } -var _ common.Translator[component.Config] = (*translator)(nil) +var _ common.ComponentTranslator = (*translator)(nil) -func NewTranslator(opts ...common.TranslatorOption) common.Translator[component.Config] { +func NewTranslator(opts ...common.TranslatorOption) common.ComponentTranslator { t := &translator{factory: otlpreceiver.NewFactory()} t.SetIndex(-1) for _, opt := range opts { opt(t) } - if t.Name() == "" && t.dataType.String() != "" { - t.SetName(t.dataType.String()) + if t.Name() == "" && t.signal.String() != "" { + t.SetName(t.signal.String()) if t.Index() != -1 { t.SetName(t.Name() + "/" + strconv.Itoa(t.Index())) } @@ -87,9 +88,9 @@ func (t *translator) Translate(conf *confmap.Conf) (component.Config, error) { cfg.HTTP.Endpoint = defaultHttpEndpoint if t.Name() == common.AppSignals { - appSignalsConfigKeys, ok := common.AppSignalsConfigKeys[t.dataType] + appSignalsConfigKeys, ok := common.AppSignalsConfigKeys[t.signal] if !ok { - return nil, fmt.Errorf("no application_signals config key defined for data type: %s", t.dataType) + return nil, fmt.Errorf("no application_signals config key defined for signal: %s", t.signal) } if conf.IsSet(appSignalsConfigKeys[0]) { configKey = appSignalsConfigKeys[0] diff --git a/translator/translate/otel/receiver/otlp/translator_test.go b/translator/translate/otel/receiver/otlp/translator_test.go index e45435f018..91cc176274 100644 --- a/translator/translate/otel/receiver/otlp/translator_test.go +++ b/translator/translate/otel/receiver/otlp/translator_test.go @@ -9,8 +9,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/pipeline" "go.opentelemetry.io/collector/receiver/otlpreceiver" "github.com/aws/amazon-cloudwatch-agent/internal/util/testutil" @@ -26,7 +26,7 @@ func TestTranslatorWithoutDataType(t *testing.T) { } func TestTracesTranslator(t *testing.T) { - tt := NewTranslator(WithDataType(component.DataTypeTraces), WithConfigKey(common.ConfigKey(common.TracesKey, common.TracesCollectedKey, common.OtlpKey))) + tt := NewTranslator(WithSignal(pipeline.SignalTraces), WithConfigKey(common.ConfigKey(common.TracesKey, common.TracesCollectedKey, common.OtlpKey))) testCases := map[string]struct { input map[string]interface{} want *confmap.Conf @@ -117,7 +117,7 @@ func TestMetricsTranslator(t *testing.T) { input: map[string]interface{}{"metrics": map[string]interface{}{}}, index: -1, wantErr: &common.MissingKeyError{ - ID: NewTranslator(WithDataType(component.DataTypeMetrics)).ID(), + ID: NewTranslator(WithSignal(pipeline.SignalMetrics)).ID(), JsonKey: common.ConfigKey(common.MetricsKey, common.MetricsCollectedKey, common.OtlpKey), }, }, @@ -173,9 +173,9 @@ func TestMetricsTranslator(t *testing.T) { for name, testCase := range testCases { t.Run(name, func(t *testing.T) { conf := confmap.NewFromStringMap(testCase.input) - tt := NewTranslator(WithDataType(component.DataTypeMetrics), WithConfigKey(common.ConfigKey(common.MetricsKey, common.MetricsCollectedKey, common.OtlpKey))) + tt := NewTranslator(WithSignal(pipeline.SignalMetrics), WithConfigKey(common.ConfigKey(common.MetricsKey, common.MetricsCollectedKey, common.OtlpKey))) if testCase.index != -1 { - tt = NewTranslator(WithDataType(component.DataTypeMetrics), WithConfigKey(common.ConfigKey(common.MetricsKey, common.MetricsCollectedKey, common.OtlpKey)), common.WithIndex(testCase.index)) + tt = NewTranslator(WithSignal(pipeline.SignalMetrics), WithConfigKey(common.ConfigKey(common.MetricsKey, common.MetricsCollectedKey, common.OtlpKey)), common.WithIndex(testCase.index)) } got, err := tt.Translate(conf) assert.Equal(t, testCase.wantErr, err) @@ -214,7 +214,7 @@ func TestMetricsEmfTranslator(t *testing.T) { input: map[string]interface{}{"logs": map[string]interface{}{}}, index: -1, wantErr: &common.MissingKeyError{ - ID: NewTranslator(WithDataType(component.DataTypeMetrics)).ID(), + ID: NewTranslator(WithSignal(pipeline.SignalMetrics)).ID(), JsonKey: common.ConfigKey(common.LogsKey, common.MetricsCollectedKey, common.OtlpKey), }, }, @@ -271,7 +271,7 @@ func TestMetricsEmfTranslator(t *testing.T) { t.Run(name, func(t *testing.T) { conf := confmap.NewFromStringMap(testCase.input) tt := NewTranslator( - WithDataType(component.DataTypeMetrics), + WithSignal(pipeline.SignalMetrics), WithConfigKey(common.ConfigKey(common.LogsKey, common.MetricsCollectedKey, common.OtlpKey)), common.WithIndex(testCase.index), ) @@ -290,7 +290,7 @@ func TestMetricsEmfTranslator(t *testing.T) { } func TestTranslateAppSignals(t *testing.T) { - tt := NewTranslator(common.WithName(common.AppSignals), WithDataType(component.DataTypeTraces)) + tt := NewTranslator(common.WithName(common.AppSignals), WithSignal(pipeline.SignalTraces)) testCases := map[string]struct { input map[string]interface{} want *confmap.Conf diff --git a/translator/translate/otel/receiver/prometheus/translator.go b/translator/translate/otel/receiver/prometheus/translator.go index f0f9912586..656c22b143 100644 --- a/translator/translate/otel/receiver/prometheus/translator.go +++ b/translator/translate/otel/receiver/prometheus/translator.go @@ -35,9 +35,9 @@ type translator struct { type Option func(any) -var _ common.Translator[component.Config] = (*translator)(nil) +var _ common.ComponentTranslator = (*translator)(nil) -func NewTranslator(opts ...Option) common.Translator[component.Config] { +func NewTranslator(opts ...Option) common.ComponentTranslator { t := &translator{factory: prometheusreceiver.NewFactory()} for _, opt := range opts { opt(t) diff --git a/translator/translate/otel/receiver/tcplog/translator.go b/translator/translate/otel/receiver/tcplog/translator.go index 7bd34e9ed6..3db0906c74 100644 --- a/translator/translate/otel/receiver/tcplog/translator.go +++ b/translator/translate/otel/receiver/tcplog/translator.go @@ -21,7 +21,7 @@ type translator struct { factory receiver.Factory } -var _ common.Translator[component.Config] = (*translator)(nil) +var _ common.ComponentTranslator = (*translator)(nil) var ( baseKey = common.ConfigKey(common.LogsKey, common.MetricsCollectedKey, common.Emf) @@ -34,12 +34,12 @@ const ( ) // NewTranslator creates a new tcp logs receiver translator. -func NewTranslator() common.Translator[component.Config] { +func NewTranslator() common.ComponentTranslator { return NewTranslatorWithName("") } // NewTranslatorWithName creates a new tcp logs receiver translator. -func NewTranslatorWithName(name string) common.Translator[component.Config] { +func NewTranslatorWithName(name string) common.ComponentTranslator { return &translator{name, tcplogreceiver.NewFactory()} } diff --git a/translator/translate/otel/receiver/translator.go b/translator/translate/otel/receiver/translator.go index c07b95a3c6..ef01ed7b30 100644 --- a/translator/translate/otel/receiver/translator.go +++ b/translator/translate/otel/receiver/translator.go @@ -16,11 +16,11 @@ type translator struct { factory receiver.Factory } -func NewDefaultTranslator(factory receiver.Factory) common.Translator[component.Config] { +func NewDefaultTranslator(factory receiver.Factory) common.ComponentTranslator { return NewDefaultTranslatorWithName("", factory) } -func NewDefaultTranslatorWithName(name string, factory receiver.Factory) common.Translator[component.Config] { +func NewDefaultTranslatorWithName(name string, factory receiver.Factory) common.ComponentTranslator { return &translator{name, factory} } diff --git a/translator/translate/otel/receiver/udplog/translator.go b/translator/translate/otel/receiver/udplog/translator.go index 8b0237fe93..c315bf9818 100644 --- a/translator/translate/otel/receiver/udplog/translator.go +++ b/translator/translate/otel/receiver/udplog/translator.go @@ -21,7 +21,7 @@ type translator struct { factory receiver.Factory } -var _ common.Translator[component.Config] = (*translator)(nil) +var _ common.ComponentTranslator = (*translator)(nil) var ( baseKey = common.ConfigKey(common.LogsKey, common.MetricsCollectedKey, common.Emf) @@ -34,12 +34,12 @@ const ( ) // NewTranslator creates a new udp logs receiver translator. -func NewTranslator() common.Translator[component.Config] { +func NewTranslator() common.ComponentTranslator { return NewTranslatorWithName("") } // NewTranslatorWithName creates a new udp logs receiver translator. -func NewTranslatorWithName(name string) common.Translator[component.Config] { +func NewTranslatorWithName(name string) common.ComponentTranslator { return &translator{name, udplogreceiver.NewFactory()} } diff --git a/translator/translate/otel/translate_otel.go b/translator/translate/otel/translate_otel.go index c1ece60e04..ea1bb6f137 100644 --- a/translator/translate/otel/translate_otel.go +++ b/translator/translate/otel/translate_otel.go @@ -13,6 +13,7 @@ import ( "go.opentelemetry.io/collector/config/configtelemetry" "go.opentelemetry.io/collector/confmap" "go.opentelemetry.io/collector/otelcol" + "go.opentelemetry.io/collector/pipeline" "go.opentelemetry.io/collector/service" "go.opentelemetry.io/collector/service/telemetry" "go.uber.org/multierr" @@ -22,7 +23,7 @@ import ( "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/extension/entitystore" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/extension/server" - "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/pipeline" + pipelinetranslator "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/pipeline" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/pipeline/applicationsignals" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/pipeline/containerinsights" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/pipeline/containerinsightsjmx" @@ -35,9 +36,9 @@ import ( "github.com/aws/amazon-cloudwatch-agent/translator/util/ecsutil" ) -var registry = common.NewTranslatorMap[*common.ComponentTranslators]() +var registry = common.NewTranslatorMap[*common.ComponentTranslators, pipeline.ID]() -func RegisterPipeline(translators ...pipeline.Translator) { +func RegisterPipeline(translators ...pipelinetranslator.Translator) { for _, translator := range translators { registry.Set(translator) } @@ -55,7 +56,7 @@ func Translate(jsonConfig interface{}, os string) (*otelcol.Config, error) { log.Printf("W! CSM has already been deprecated") } - translators := common.NewTranslatorMap[*common.ComponentTranslators]() + translators := common.NewTranslatorMap[*common.ComponentTranslators, pipeline.ID]() metricsHostTranslators, err := host.NewTranslators(conf, host.MetricsKey, os) if err != nil { return nil, err @@ -68,18 +69,18 @@ func Translate(jsonConfig interface{}, os string) (*otelcol.Config, error) { translators.Merge(logsHostTranslators) containerInsightsTranslators := containerinsights.NewTranslators(conf) translators.Merge(containerInsightsTranslators) - translators.Set(applicationsignals.NewTranslator(component.DataTypeTraces)) - translators.Set(applicationsignals.NewTranslator(component.DataTypeMetrics)) + translators.Set(applicationsignals.NewTranslator(pipeline.SignalTraces)) + translators.Set(applicationsignals.NewTranslator(pipeline.SignalMetrics)) translators.Merge(prometheus.NewTranslators(conf)) translators.Set(emf_logs.NewTranslator()) translators.Set(xray.NewTranslator()) translators.Set(containerinsightsjmx.NewTranslator()) translators.Merge(jmx.NewTranslators(conf)) translators.Merge(registry) - pipelines, err := pipeline.NewTranslator(translators).Translate(conf) - if pipelines == nil { + pipelines, err := pipelinetranslator.NewTranslator(translators).Translate(conf) + if err != nil { translators.Set(nop.NewTranslator()) - pipelines, err = pipeline.NewTranslator(translators).Translate(conf) + pipelines, err = pipelinetranslator.NewTranslator(translators).Translate(conf) if err != nil { return nil, err } @@ -91,6 +92,7 @@ func Translate(jsonConfig interface{}, os string) (*otelcol.Config, error) { if context.CurrentContext().KubernetesMode() != "" { pipelines.Translators.Extensions.Set(server.NewTranslator()) } + cfg := &otelcol.Config{ Receivers: map[component.ID]component.Config{}, Exporters: map[component.ID]component.Config{}, @@ -100,6 +102,7 @@ func Translate(jsonConfig interface{}, os string) (*otelcol.Config, error) { Telemetry: telemetry.Config{ Logs: getLoggingConfig(conf), Metrics: telemetry.MetricsConfig{Level: configtelemetry.LevelNone}, + Traces: telemetry.TracesConfig{Level: configtelemetry.LevelNone}, }, Pipelines: pipelines.Pipelines, Extensions: pipelines.Translators.Extensions.Keys(), @@ -165,17 +168,17 @@ func build(conf *confmap.Conf, cfg *otelcol.Config, translators common.Component } // buildComponents attempts to translate a component for each ID in the set. -func buildComponents[C component.Config]( +func buildComponents[C component.Config, ID common.TranslatorID]( conf *confmap.Conf, - ids []component.ID, - components map[component.ID]C, - getTranslator func(component.ID) (common.Translator[C], bool), + ids []ID, + components map[ID]C, + getTranslator func(ID) (common.Translator[C, ID], bool), ) error { var errs error for _, id := range ids { translator, ok := getTranslator(id) if !ok { - errs = multierr.Append(errs, fmt.Errorf("missing translator for %v", id.Type())) + errs = multierr.Append(errs, fmt.Errorf("missing translator for %v", id.Name())) continue } cfg, err := translator.Translate(conf) diff --git a/translator/translate/otel/translate_otel_test.go b/translator/translate/otel/translate_otel_test.go index 309743caad..bb75406c7a 100644 --- a/translator/translate/otel/translate_otel_test.go +++ b/translator/translate/otel/translate_otel_test.go @@ -8,8 +8,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/pipeline" "github.com/aws/amazon-cloudwatch-agent/tool/testutil" "github.com/aws/amazon-cloudwatch-agent/translator" @@ -229,7 +229,7 @@ func TestTranslator(t *testing.T) { } type testTranslator struct { - id component.ID + id pipeline.ID version int } @@ -237,23 +237,25 @@ func (t testTranslator) Translate(_ *confmap.Conf) (*common.ComponentTranslators return nil, nil } -func (t testTranslator) ID() component.ID { +func (t testTranslator) ID() pipeline.ID { return t.id } -var _ common.Translator[*common.ComponentTranslators] = (*testTranslator)(nil) +var _ common.PipelineTranslator = (*testTranslator)(nil) func TestRegisterPipeline(t *testing.T) { - testType, _ := component.NewType("test") - original := &testTranslator{id: component.NewID(testType), version: 1} + + original := &testTranslator{id: pipeline.NewID(pipeline.SignalLogs), version: 1} tm := common.NewTranslatorMap[*common.ComponentTranslators](original) assert.Equal(t, 0, registry.Len()) - first := &testTranslator{id: component.NewID(testType), version: 2} - second := &testTranslator{id: component.NewID(testType), version: 3} + + first := &testTranslator{id: pipeline.NewID(pipeline.SignalLogs), version: 2} + second := &testTranslator{id: pipeline.NewID(pipeline.SignalLogs), version: 3} RegisterPipeline(first, second) assert.Equal(t, 1, registry.Len()) + tm.Merge(registry) - got, ok := tm.Get(component.NewID(testType)) + got, ok := tm.Get(pipeline.NewID(pipeline.SignalLogs)) assert.True(t, ok) assert.Equal(t, second.version, got.(*testTranslator).version) assert.NotEqual(t, first.version, got.(*testTranslator).version) diff --git a/translator/util/eksdetector/eksdetector.go b/translator/util/eksdetector/eksdetector.go index 58830883d6..a8677b98ed 100644 --- a/translator/util/eksdetector/eksdetector.go +++ b/translator/util/eksdetector/eksdetector.go @@ -5,7 +5,10 @@ package eksdetector import ( "context" + "encoding/base64" + "encoding/json" "fmt" + "strings" "sync" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -15,6 +18,7 @@ import ( type Detector interface { getConfigMap(namespace string, name string) (map[string]string, error) + getIssuer() (string, error) } type EksDetector struct { @@ -55,7 +59,8 @@ var ( } // IsEKS checks if the agent is running on EKS. This is done by using the kubernetes API to determine if the aws-auth - // configmap exists in the kube-system namespace + // configmap exists in the kube-system namespace or by extracting the "iss" field from the service account token and + // checking if it contains "eks" as a fall-back IsEKS = func() IsEKSCache { once.Do(func() { var errors error @@ -71,6 +76,11 @@ var ( awsAuth, err := eksDetector.getConfigMap(authConfigNamespace, authConfigConfigMap) if err == nil { value = awsAuth != nil + } else { + issuer, err := eksDetector.getIssuer() + if err == nil { + value = strings.Contains(strings.ToLower(issuer), "eks") + } } } isEKSCacheSingleton = IsEKSCache{Value: value, Err: errors} @@ -90,6 +100,41 @@ func (d *EksDetector) getConfigMap(namespace string, name string) (map[string]st return configMap.Data, nil } +// getIssuer retrieves the issuer ("iss") from the service account token +func (d *EksDetector) getIssuer() (string, error) { + conf, err := getInClusterConfig() + if err != nil { + return "", fmt.Errorf("failed to get in-cluster config: %w", err) + } + + token := conf.BearerToken + if token == "" { + return "", fmt.Errorf("empty token in config") + } + + parts := strings.Split(token, ".") + if len(parts) < 2 { + return "", fmt.Errorf("missing payload") + } + + decoded, err := base64.RawURLEncoding.DecodeString(parts[1]) + if err != nil { + return "", fmt.Errorf("failed to decode token payload: %w", err) + } + + var claims map[string]interface{} + if err = json.Unmarshal(decoded, &claims); err != nil { + return "", fmt.Errorf("failed to unmarshal token payload: %w", err) + } + + iss, ok := claims["iss"].(string) + if !ok { + return "", fmt.Errorf("issuer field not found in token") + } + + return iss, nil +} + func getClient() (kubernetes.Interface, error) { //Get cluster config confs, err := getInClusterConfig() diff --git a/translator/util/eksdetector/eksdetector_test.go b/translator/util/eksdetector/eksdetector_test.go index a5dafaf446..7a569e3963 100644 --- a/translator/util/eksdetector/eksdetector_test.go +++ b/translator/util/eksdetector/eksdetector_test.go @@ -4,6 +4,7 @@ package eksdetector import ( + "encoding/base64" "fmt" "testing" @@ -54,9 +55,17 @@ func TestEKS(t *testing.T) { } testDetector.On("getConfigMap", authConfigNamespace, authConfigConfigMap).Return(map[string]string{conventions.AttributeK8SClusterName: "my-cluster"}, nil) + isEks := IsEKS() assert.True(t, isEks.Value) assert.NoError(t, isEks.Err) + + testDetector.On("getConfigMap", authConfigNamespace, authConfigConfigMap).Return(nil, fmt.Errorf("configmap not found")) + testDetector.On("getIssuer").Return("https://oidc.eks.us-west-2.amazonaws.com/id/someid", nil) + + isEks = IsEKS() + assert.True(t, isEks.Value) + assert.NoError(t, isEks.Err) } func Test_getConfigMap(t *testing.T) { @@ -82,6 +91,23 @@ func Test_getConfigMap(t *testing.T) { assert.NotNil(t, res) } +func Test_getIssuer(t *testing.T) { + client := fake.NewSimpleClientset() + testDetector := &EksDetector{Clientset: client} + + payload := `{"iss":"https://oidc.eks.us-west-2.amazonaws.com/id/someid"}` + encodedPayload := base64.RawURLEncoding.EncodeToString([]byte(payload)) + dummyToken := "header." + encodedPayload + ".signature" + + getInClusterConfig = func() (*rest.Config, error) { + return &rest.Config{BearerToken: dummyToken}, nil + } + + issuer, err := testDetector.getIssuer() + assert.NoError(t, err) + assert.Equal(t, "https://oidc.eks.us-west-2.amazonaws.com/id/someid", issuer) +} + func Test_getClientError(t *testing.T) { //InClusterConfig error getInClusterConfig = func() (*rest.Config, error) { diff --git a/translator/util/eksdetector/eksdetectortestutil.go b/translator/util/eksdetector/eksdetectortestutil.go index c3b14a48c5..dc449d9482 100644 --- a/translator/util/eksdetector/eksdetectortestutil.go +++ b/translator/util/eksdetector/eksdetectortestutil.go @@ -25,7 +25,7 @@ var ( return &EksDetector{Clientset: fake.NewSimpleClientset()}, nil } - // TestIsEKSCacheEKS os used for unit testing EKS route + // TestIsEKSCacheEKS is used for unit testing EKS route TestIsEKSCacheEKS = func() IsEKSCache { return IsEKSCache{Value: true, Err: nil} } @@ -44,3 +44,8 @@ func (detector *MockDetector) getConfigMap(namespace string, name string) (map[s args := detector.Called(namespace, name) return args.Get(0).(map[string]string), args.Error(1) } + +func (detector *MockDetector) getIssuer() (string, error) { + args := detector.Called() + return args.Get(0).(string), args.Error(1) +} From 43561432b3630f6bfb93dcc8be5d143c1a9d767e Mon Sep 17 00:00:00 2001 From: musa-asad Date: Mon, 10 Mar 2025 02:33:35 -0400 Subject: [PATCH 02/17] fix lint --- extension/k8smetadata/extension.go | 3 +-- plugins/processors/awsentity/processor.go | 4 ++-- .../translate/otel/processor/resourceprocessor/translator.go | 4 +--- 3 files changed, 4 insertions(+), 7 deletions(-) diff --git a/extension/k8smetadata/extension.go b/extension/k8smetadata/extension.go index ebeecad117..5a664a0a2d 100644 --- a/extension/k8smetadata/extension.go +++ b/extension/k8smetadata/extension.go @@ -31,14 +31,13 @@ type KubernetesMetadata struct { ready atomic.Bool safeStopCh *k8sclient.SafeChannel mu sync.Mutex - clientset kubernetes.Interface endpointSliceWatcher *k8sclient.EndpointSliceWatcher } var _ extension.Extension = (*KubernetesMetadata)(nil) func jitterSleep(seconds int) { - jitter := time.Duration(rand.Intn(seconds)) * time.Second + jitter := time.Duration(rand.Intn(seconds)) * time.Second // nolint:gosec time.Sleep(jitter) } diff --git a/plugins/processors/awsentity/processor.go b/plugins/processors/awsentity/processor.go index 6886475414..d54e69ed8a 100644 --- a/plugins/processors/awsentity/processor.go +++ b/plugins/processors/awsentity/processor.go @@ -106,7 +106,7 @@ var getServiceNameSource = func() (string, string) { return es.GetMetricServiceNameAndSource() } -var getPodMeta = func(ctx context.Context) k8sclient.PodMetadata { +var getPodMeta = func() k8sclient.PodMetadata { podMeta := k8sclient.PodMetadata{} k8sMetadata := k8smetadata.GetKubernetesMetadata() @@ -193,7 +193,7 @@ func (p *awsEntityProcessor) processMetrics(ctx context.Context, md pmetric.Metr } } if p.config.KubernetesMode != "" { - p.k8sscraper.Scrape(rm.At(i).Resource(), getPodMeta(ctx)) + p.k8sscraper.Scrape(rm.At(i).Resource(), getPodMeta()) if p.config.Platform == config.ModeEC2 { ec2Info = getEC2InfoFromEntityStore() } diff --git a/translator/translate/otel/processor/resourceprocessor/translator.go b/translator/translate/otel/processor/resourceprocessor/translator.go index a24f8c6d39..9427fe0c4a 100644 --- a/translator/translate/otel/processor/resourceprocessor/translator.go +++ b/translator/translate/otel/processor/resourceprocessor/translator.go @@ -26,9 +26,7 @@ type translator struct { } var ( - baseKey = common.ConfigKey(common.LogsKey, common.MetricsCollectedKey) - k8sKey = common.ConfigKey(baseKey, common.KubernetesKey) - _ common.ComponentTranslator = (*translator)(nil) + _ common.ComponentTranslator = (*translator)(nil) ) func NewTranslator(opts ...common.TranslatorOption) common.ComponentTranslator { From 57bfa367cb2f86ced8a5df8986315979c294233b Mon Sep 17 00:00:00 2001 From: Musa Date: Mon, 10 Mar 2025 04:43:09 -0400 Subject: [PATCH 03/17] Fix lint --- plugins/processors/awsentity/processor.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/processors/awsentity/processor.go b/plugins/processors/awsentity/processor.go index d54e69ed8a..b8686952ac 100644 --- a/plugins/processors/awsentity/processor.go +++ b/plugins/processors/awsentity/processor.go @@ -138,7 +138,7 @@ func newAwsEntityProcessor(config *Config, logger *zap.Logger) *awsEntityProcess } } -func (p *awsEntityProcessor) processMetrics(ctx context.Context, md pmetric.Metrics) (pmetric.Metrics, error) { +func (p *awsEntityProcessor) processMetrics(_ context.Context, md pmetric.Metrics) (pmetric.Metrics, error) { // Get the following metric attributes from the EntityStore: PlatformType, EC2.InstanceId, EC2.AutoScalingGroup rm := md.ResourceMetrics() From 9906d562261324593d5cc4059180cc345327e2c5 Mon Sep 17 00:00:00 2001 From: Musa Date: Tue, 11 Mar 2025 20:49:37 -0400 Subject: [PATCH 04/17] Fix comment. Co-authored-by: Lisa Guo --- internal/k8sCommon/k8sclient/endpointslicewatcher.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/k8sCommon/k8sclient/endpointslicewatcher.go b/internal/k8sCommon/k8sclient/endpointslicewatcher.go index 764fa6520b..53995ddb3a 100644 --- a/internal/k8sCommon/k8sclient/endpointslicewatcher.go +++ b/internal/k8sCommon/k8sclient/endpointslicewatcher.go @@ -39,7 +39,7 @@ type kvPair struct { value PodMetadata // value: {"workload", "namespace", "node"} } -// NewEndpointSliceWatcher creates an EndpointSlice watcher for the new approach (when USE_LIST_POD=false). +// NewEndpointSliceWatcher creates an EndpointSlice watcher func NewEndpointSliceWatcher( logger *zap.Logger, factory informers.SharedInformerFactory, From ca56a5bff65eecdcead740cc5dc98c5a47a9ed75 Mon Sep 17 00:00:00 2001 From: musa-asad Date: Tue, 11 Mar 2025 20:54:02 -0400 Subject: [PATCH 05/17] rename --- extension/k8smetadata/extension.go | 10 +++++----- extension/k8smetadata/extension_test.go | 2 +- extension/k8smetadata/factory.go | 16 ++++++++-------- 3 files changed, 14 insertions(+), 14 deletions(-) diff --git a/extension/k8smetadata/extension.go b/extension/k8smetadata/extension.go index 5a664a0a2d..e0a1eb56b6 100644 --- a/extension/k8smetadata/extension.go +++ b/extension/k8smetadata/extension.go @@ -25,7 +25,7 @@ const ( jitterKubernetesAPISeconds = 10 ) -type KubernetesMetadata struct { +type kubernetesMetadata struct { logger *zap.Logger config *Config ready atomic.Bool @@ -34,14 +34,14 @@ type KubernetesMetadata struct { endpointSliceWatcher *k8sclient.EndpointSliceWatcher } -var _ extension.Extension = (*KubernetesMetadata)(nil) +var _ extension.Extension = (*kubernetesMetadata)(nil) func jitterSleep(seconds int) { jitter := time.Duration(rand.Intn(seconds)) * time.Second // nolint:gosec time.Sleep(jitter) } -func (e *KubernetesMetadata) Start(_ context.Context, _ component.Host) error { +func (e *kubernetesMetadata) Start(_ context.Context, _ component.Host) error { e.mu.Lock() defer e.mu.Unlock() @@ -79,14 +79,14 @@ func (e *KubernetesMetadata) Start(_ context.Context, _ component.Host) error { return nil } -func (e *KubernetesMetadata) Shutdown(_ context.Context) error { +func (e *kubernetesMetadata) Shutdown(_ context.Context) error { e.mu.Lock() defer e.mu.Unlock() e.safeStopCh.Close() return nil } -func (e *KubernetesMetadata) GetPodMetadata(ip string) k8sclient.PodMetadata { +func (e *kubernetesMetadata) GetPodMetadata(ip string) k8sclient.PodMetadata { pm, ok := e.endpointSliceWatcher.IPToPodMetadata.Load(ip) if !ok { e.logger.Debug("GetPodMetadata: no mapping found for IP", zap.String("ip", ip)) diff --git a/extension/k8smetadata/extension_test.go b/extension/k8smetadata/extension_test.go index 02aa7655d3..57be0a4754 100644 --- a/extension/k8smetadata/extension_test.go +++ b/extension/k8smetadata/extension_test.go @@ -26,7 +26,7 @@ func TestKubernetesMetadata_GetPodMetadata(t *testing.T) { } esw.IPToPodMetadata.Store(testIP, expected) - kMeta := &KubernetesMetadata{ + kMeta := &kubernetesMetadata{ logger: zap.NewNop(), endpointSliceWatcher: esw, } diff --git a/extension/k8smetadata/factory.go b/extension/k8smetadata/factory.go index 25d77edf1b..0e0cd1c5c9 100644 --- a/extension/k8smetadata/factory.go +++ b/extension/k8smetadata/factory.go @@ -12,16 +12,16 @@ import ( ) var ( - TypeStr, _ = component.NewType("k8smetadata") - kubernetesMetadata *KubernetesMetadata - mutex sync.RWMutex + TypeStr, _ = component.NewType("k8smetadata") + kubernetesMetadataExt *kubernetesMetadata + mutex sync.RWMutex ) -func GetKubernetesMetadata() *KubernetesMetadata { +func GetKubernetesMetadata() *kubernetesMetadata { mutex.RLock() defer mutex.RUnlock() - if kubernetesMetadata != nil && kubernetesMetadata.ready.Load() { - return kubernetesMetadata + if kubernetesMetadataExt != nil && kubernetesMetadataExt.ready.Load() { + return kubernetesMetadataExt } return nil } @@ -42,9 +42,9 @@ func createDefaultConfig() component.Config { func createExtension(_ context.Context, settings extension.Settings, cfg component.Config) (extension.Extension, error) { mutex.Lock() defer mutex.Unlock() - kubernetesMetadata = &KubernetesMetadata{ + kubernetesMetadataExt = &kubernetesMetadata{ logger: settings.Logger, config: cfg.(*Config), } - return kubernetesMetadata, nil + return kubernetesMetadataExt, nil } From b4f386a5c6bd9d59b514096987bbba464bb4e485 Mon Sep 17 00:00:00 2001 From: musa-asad Date: Wed, 12 Mar 2025 12:09:02 -0400 Subject: [PATCH 06/17] Revert "rename" This reverts commit ca56a5bff65eecdcead740cc5dc98c5a47a9ed75. --- extension/k8smetadata/extension.go | 10 +++++----- extension/k8smetadata/extension_test.go | 2 +- extension/k8smetadata/factory.go | 16 ++++++++-------- 3 files changed, 14 insertions(+), 14 deletions(-) diff --git a/extension/k8smetadata/extension.go b/extension/k8smetadata/extension.go index e0a1eb56b6..5a664a0a2d 100644 --- a/extension/k8smetadata/extension.go +++ b/extension/k8smetadata/extension.go @@ -25,7 +25,7 @@ const ( jitterKubernetesAPISeconds = 10 ) -type kubernetesMetadata struct { +type KubernetesMetadata struct { logger *zap.Logger config *Config ready atomic.Bool @@ -34,14 +34,14 @@ type kubernetesMetadata struct { endpointSliceWatcher *k8sclient.EndpointSliceWatcher } -var _ extension.Extension = (*kubernetesMetadata)(nil) +var _ extension.Extension = (*KubernetesMetadata)(nil) func jitterSleep(seconds int) { jitter := time.Duration(rand.Intn(seconds)) * time.Second // nolint:gosec time.Sleep(jitter) } -func (e *kubernetesMetadata) Start(_ context.Context, _ component.Host) error { +func (e *KubernetesMetadata) Start(_ context.Context, _ component.Host) error { e.mu.Lock() defer e.mu.Unlock() @@ -79,14 +79,14 @@ func (e *kubernetesMetadata) Start(_ context.Context, _ component.Host) error { return nil } -func (e *kubernetesMetadata) Shutdown(_ context.Context) error { +func (e *KubernetesMetadata) Shutdown(_ context.Context) error { e.mu.Lock() defer e.mu.Unlock() e.safeStopCh.Close() return nil } -func (e *kubernetesMetadata) GetPodMetadata(ip string) k8sclient.PodMetadata { +func (e *KubernetesMetadata) GetPodMetadata(ip string) k8sclient.PodMetadata { pm, ok := e.endpointSliceWatcher.IPToPodMetadata.Load(ip) if !ok { e.logger.Debug("GetPodMetadata: no mapping found for IP", zap.String("ip", ip)) diff --git a/extension/k8smetadata/extension_test.go b/extension/k8smetadata/extension_test.go index 57be0a4754..02aa7655d3 100644 --- a/extension/k8smetadata/extension_test.go +++ b/extension/k8smetadata/extension_test.go @@ -26,7 +26,7 @@ func TestKubernetesMetadata_GetPodMetadata(t *testing.T) { } esw.IPToPodMetadata.Store(testIP, expected) - kMeta := &kubernetesMetadata{ + kMeta := &KubernetesMetadata{ logger: zap.NewNop(), endpointSliceWatcher: esw, } diff --git a/extension/k8smetadata/factory.go b/extension/k8smetadata/factory.go index 0e0cd1c5c9..25d77edf1b 100644 --- a/extension/k8smetadata/factory.go +++ b/extension/k8smetadata/factory.go @@ -12,16 +12,16 @@ import ( ) var ( - TypeStr, _ = component.NewType("k8smetadata") - kubernetesMetadataExt *kubernetesMetadata - mutex sync.RWMutex + TypeStr, _ = component.NewType("k8smetadata") + kubernetesMetadata *KubernetesMetadata + mutex sync.RWMutex ) -func GetKubernetesMetadata() *kubernetesMetadata { +func GetKubernetesMetadata() *KubernetesMetadata { mutex.RLock() defer mutex.RUnlock() - if kubernetesMetadataExt != nil && kubernetesMetadataExt.ready.Load() { - return kubernetesMetadataExt + if kubernetesMetadata != nil && kubernetesMetadata.ready.Load() { + return kubernetesMetadata } return nil } @@ -42,9 +42,9 @@ func createDefaultConfig() component.Config { func createExtension(_ context.Context, settings extension.Settings, cfg component.Config) (extension.Extension, error) { mutex.Lock() defer mutex.Unlock() - kubernetesMetadataExt = &kubernetesMetadata{ + kubernetesMetadata = &KubernetesMetadata{ logger: settings.Logger, config: cfg.(*Config), } - return kubernetesMetadataExt, nil + return kubernetesMetadata, nil } From e024ef69e520a341366af353f5de27d1554b3ef4 Mon Sep 17 00:00:00 2001 From: musa-asad Date: Thu, 13 Mar 2025 01:59:31 -0400 Subject: [PATCH 07/17] add tests; fix race condition --- extension/k8smetadata/extension_test.go | 22 +++++++ .../k8sclient/endpointslicewatcher.go | 10 ++- .../k8sclient/endpointslicewatcher_test.go | 6 +- .../k8sCommon/k8sclient/kubernetes_utils.go | 9 ++- .../k8sclient/kubernetes_utils_test.go | 6 ++ .../processors/awsentity/processor_test.go | 65 +++++++++++++++++++ 6 files changed, 110 insertions(+), 8 deletions(-) diff --git a/extension/k8smetadata/extension_test.go b/extension/k8smetadata/extension_test.go index 02aa7655d3..3128d9a650 100644 --- a/extension/k8smetadata/extension_test.go +++ b/extension/k8smetadata/extension_test.go @@ -37,3 +37,25 @@ func TestKubernetesMetadata_GetPodMetadata(t *testing.T) { unknown := kMeta.GetPodMetadata("9.9.9.9") assert.Equal(t, k8sclient.PodMetadata{}, unknown, "GetPodMetadata should return empty if the IP is not present") } + +func TestKubernetesMetadata_GetPodMetadata_Incomplete(t *testing.T) { + esw := &k8sclient.EndpointSliceWatcher{ + IPToPodMetadata: &sync.Map{}, + } + + const testIP = "2.2.2.2" + expected := k8sclient.PodMetadata{ + Workload: "incomplete-workload", + Namespace: "", + Node: "", + } + esw.IPToPodMetadata.Store(testIP, expected) + + kMeta := &KubernetesMetadata{ + logger: zap.NewNop(), + endpointSliceWatcher: esw, + } + + got := kMeta.GetPodMetadata(testIP) + assert.Equal(t, expected, got, "GetPodMetadata should return the stored incomplete PodMetadata for IP %s", testIP) +} diff --git a/internal/k8sCommon/k8sclient/endpointslicewatcher.go b/internal/k8sCommon/k8sclient/endpointslicewatcher.go index 53995ddb3a..2db5f8c132 100644 --- a/internal/k8sCommon/k8sclient/endpointslicewatcher.go +++ b/internal/k8sCommon/k8sclient/endpointslicewatcher.go @@ -78,7 +78,7 @@ func (w *EndpointSliceWatcher) Run(stopCh chan struct{}) { func (w *EndpointSliceWatcher) WaitForCacheSync(stopCh chan struct{}) { if !cache.WaitForNamedCacheSync("endpointSliceWatcher", stopCh, w.informer.HasSynced) { - w.logger.Fatal("timed out waiting for endpointSliceWatcher cache to sync") + w.logger.Error("timed out waiting for endpointSliceWatcher cache to sync") } w.logger.Info("endpointSliceWatcher: Cache synced") } @@ -226,7 +226,9 @@ func (w *EndpointSliceWatcher) handleSliceUpdate(oldObj, newObj interface{}) { // 3) For each key in oldKeys that doesn't exist in newKeys, remove it for k := range oldKeysSet { if _, stillPresent := newKeysSet[k]; !stillPresent { - w.deleter.DeleteWithDelay(w.IPToPodMetadata, k) + if oldVal, ok := w.IPToPodMetadata.Load(k); ok { + w.deleter.DeleteWithDelay(w.IPToPodMetadata, k, oldVal) + } } } @@ -267,7 +269,9 @@ func (w *EndpointSliceWatcher) removeSliceKeys(slice *discv1.EndpointSlice) { keys := val.([]string) for _, k := range keys { - w.deleter.DeleteWithDelay(w.IPToPodMetadata, k) + if currentVal, ok := w.IPToPodMetadata.Load(k); ok { + w.deleter.DeleteWithDelay(w.IPToPodMetadata, k, currentVal) + } } w.sliceToKeysMap.Delete(sliceUID) } diff --git a/internal/k8sCommon/k8sclient/endpointslicewatcher_test.go b/internal/k8sCommon/k8sclient/endpointslicewatcher_test.go index 05ef32bea1..dbd3ec7402 100644 --- a/internal/k8sCommon/k8sclient/endpointslicewatcher_test.go +++ b/internal/k8sCommon/k8sclient/endpointslicewatcher_test.go @@ -21,8 +21,10 @@ import ( // MockDeleter deletes a key immediately, useful for testing. type MockDeleter struct{} -func (md *MockDeleter) DeleteWithDelay(m *sync.Map, key interface{}) { - m.Delete(key) +func (md *MockDeleter) DeleteWithDelay(m *sync.Map, key interface{}, expected interface{}) { + if current, ok := m.Load(key); ok && reflect.DeepEqual(current, expected) { + m.Delete(key) + } } var mockDeleter = &MockDeleter{} diff --git a/internal/k8sCommon/k8sclient/kubernetes_utils.go b/internal/k8sCommon/k8sclient/kubernetes_utils.go index fffd3faf58..08677e4b60 100644 --- a/internal/k8sCommon/k8sclient/kubernetes_utils.go +++ b/internal/k8sCommon/k8sclient/kubernetes_utils.go @@ -5,6 +5,7 @@ package k8sclient import ( "fmt" + "reflect" "regexp" "sync" "time" @@ -104,7 +105,7 @@ func (sc *SafeChannel) Close() { // Deleter represents a type that can delete a key from a map after a certain delay. type Deleter interface { - DeleteWithDelay(m *sync.Map, key interface{}) + DeleteWithDelay(m *sync.Map, key interface{}, expected interface{}) } // TimedDeleter deletes a key after a specified delay. @@ -112,9 +113,11 @@ type TimedDeleter struct { Delay time.Duration } -func (td *TimedDeleter) DeleteWithDelay(m *sync.Map, key interface{}) { +func (td *TimedDeleter) DeleteWithDelay(m *sync.Map, key interface{}, expected interface{}) { go func() { time.Sleep(td.Delay) - m.Delete(key) + if current, ok := m.Load(key); ok && reflect.DeepEqual(current, expected) { + m.Delete(key) + } }() } diff --git a/internal/k8sCommon/k8sclient/kubernetes_utils_test.go b/internal/k8sCommon/k8sclient/kubernetes_utils_test.go index 21f3b0f3d5..621898bbcf 100644 --- a/internal/k8sCommon/k8sclient/kubernetes_utils_test.go +++ b/internal/k8sCommon/k8sclient/kubernetes_utils_test.go @@ -68,6 +68,12 @@ func TestInferWorkloadName(t *testing.T) { service: "service", expected: "service", }, + { + name: "No match, empty fallback returns full pod name", + podName: "custom-app-xyz123", + service: "", + expected: "custom-app-xyz123", + }, } for _, tc := range testCases { diff --git a/plugins/processors/awsentity/processor_test.go b/plugins/processors/awsentity/processor_test.go index 5ed975be01..30e8930366 100644 --- a/plugins/processors/awsentity/processor_test.go +++ b/plugins/processors/awsentity/processor_test.go @@ -16,6 +16,7 @@ import ( "go.uber.org/zap/zapcore" "github.com/aws/amazon-cloudwatch-agent/extension/entitystore" + "github.com/aws/amazon-cloudwatch-agent/internal/k8sCommon/k8sclient" "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsentity/entityattributes" "github.com/aws/amazon-cloudwatch-agent/translator/config" ) @@ -82,6 +83,16 @@ func newMockSetAutoScalingGroup(es *mockEntityStore) func(string) { } } +func newMockPodMeta(workload, namespace, node string) func() k8sclient.PodMetadata { + return func() k8sclient.PodMetadata { + return k8sclient.PodMetadata{ + Workload: workload, + Namespace: namespace, + Node: node, + } + } +} + // This helper function creates a test logger // so that it can send the log messages into a // temporary buffer for pattern matching @@ -919,6 +930,60 @@ func TestProcessMetricsDatapointAttributeScraping(t *testing.T) { } } +func TestAwsEntityProcessor_AddsEntityFieldsFromPodMeta_WithMock(t *testing.T) { + logger, _ := zap.NewDevelopment() + + tests := []struct { + name string + metrics pmetric.Metrics + mockGetPodMeta func() k8sclient.PodMetadata + want map[string]any + }{ + { + name: "PodMetaFromMockFunction", + metrics: generateMetrics(), + mockGetPodMeta: newMockPodMeta( + "test-workload", + "test-namespace", + "test-node", + ), + want: map[string]any{ + "com.amazonaws.cloudwatch.entity.internal.k8s.workload.name": "test-workload", + "com.amazonaws.cloudwatch.entity.internal.k8s.namespace.name": "test-namespace", + "com.amazonaws.cloudwatch.entity.internal.k8s.node.name": "test-node", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + origGetPodMeta := getPodMeta + getPodMeta = tt.mockGetPodMeta + defer func() { getPodMeta = origGetPodMeta }() + + metrics := tt.metrics + rm := metrics.ResourceMetrics().At(0) + rm.Resource().Attributes().Clear() + + processor := newAwsEntityProcessor(&Config{ + EntityType: attributeService, + ClusterName: "test-cluster", + }, logger) + processor.config.KubernetesMode = config.ModeEKS + + _, err := processor.processMetrics(context.Background(), metrics) + assert.NoError(t, err) + + attrs := rm.Resource().Attributes().AsRaw() + for key, expectedVal := range tt.want { + actualVal, exists := attrs[key] + assert.True(t, exists, "expected attribute %s to be set", key) + assert.Equal(t, expectedVal, actualVal, "mismatch for attribute %s", key) + } + }) + } +} + func generateMetrics(resourceAttrs ...string) pmetric.Metrics { md := pmetric.NewMetrics() generateResource(md, resourceAttrs...) From 2a30bf76f8a3a551aa2a18e72293062249c70f47 Mon Sep 17 00:00:00 2001 From: musa-asad Date: Thu, 13 Mar 2025 11:24:40 -0400 Subject: [PATCH 08/17] debug --- plugins/processors/awsentity/processor.go | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/plugins/processors/awsentity/processor.go b/plugins/processors/awsentity/processor.go index b8686952ac..b59d9a5074 100644 --- a/plugins/processors/awsentity/processor.go +++ b/plugins/processors/awsentity/processor.go @@ -5,6 +5,8 @@ package awsentity import ( "context" + "go.opentelemetry.io/collector/client" + "net" "strings" "github.com/go-playground/validator/v10" @@ -106,14 +108,21 @@ var getServiceNameSource = func() (string, string) { return es.GetMetricServiceNameAndSource() } -var getPodMeta = func() k8sclient.PodMetadata { +var getPodMeta = func(ctx context.Context) k8sclient.PodMetadata { podMeta := k8sclient.PodMetadata{} k8sMetadata := k8smetadata.GetKubernetesMetadata() if k8sMetadata != nil { podIP := "" - // Get the pod IP from the context + switch addr := client.FromContext(ctx).Addr.(type) { + case *net.UDPAddr: + podIP = addr.IP.String() + case *net.TCPAddr: + podIP = addr.IP.String() + case *net.IPAddr: + podIP = addr.IP.String() + } podMeta = k8sMetadata.GetPodMetadata(podIP) } @@ -138,7 +147,7 @@ func newAwsEntityProcessor(config *Config, logger *zap.Logger) *awsEntityProcess } } -func (p *awsEntityProcessor) processMetrics(_ context.Context, md pmetric.Metrics) (pmetric.Metrics, error) { +func (p *awsEntityProcessor) processMetrics(ctx context.Context, md pmetric.Metrics) (pmetric.Metrics, error) { // Get the following metric attributes from the EntityStore: PlatformType, EC2.InstanceId, EC2.AutoScalingGroup rm := md.ResourceMetrics() @@ -193,7 +202,7 @@ func (p *awsEntityProcessor) processMetrics(_ context.Context, md pmetric.Metric } } if p.config.KubernetesMode != "" { - p.k8sscraper.Scrape(rm.At(i).Resource(), getPodMeta()) + p.k8sscraper.Scrape(rm.At(i).Resource(), getPodMeta(ctx)) if p.config.Platform == config.ModeEC2 { ec2Info = getEC2InfoFromEntityStore() } From 4945626813810c1d1c1c5430cc12ff5b3edeb5c7 Mon Sep 17 00:00:00 2001 From: musa-asad Date: Thu, 13 Mar 2025 11:43:38 -0400 Subject: [PATCH 09/17] revert fix --- internal/k8sCommon/k8sclient/endpointslicewatcher.go | 8 ++------ .../k8sCommon/k8sclient/endpointslicewatcher_test.go | 4 +--- internal/k8sCommon/k8sclient/kubernetes_utils.go | 9 +++------ 3 files changed, 6 insertions(+), 15 deletions(-) diff --git a/internal/k8sCommon/k8sclient/endpointslicewatcher.go b/internal/k8sCommon/k8sclient/endpointslicewatcher.go index 2db5f8c132..1a7d6fbaa4 100644 --- a/internal/k8sCommon/k8sclient/endpointslicewatcher.go +++ b/internal/k8sCommon/k8sclient/endpointslicewatcher.go @@ -226,9 +226,7 @@ func (w *EndpointSliceWatcher) handleSliceUpdate(oldObj, newObj interface{}) { // 3) For each key in oldKeys that doesn't exist in newKeys, remove it for k := range oldKeysSet { if _, stillPresent := newKeysSet[k]; !stillPresent { - if oldVal, ok := w.IPToPodMetadata.Load(k); ok { - w.deleter.DeleteWithDelay(w.IPToPodMetadata, k, oldVal) - } + w.deleter.DeleteWithDelay(w.IPToPodMetadata, k) } } @@ -269,9 +267,7 @@ func (w *EndpointSliceWatcher) removeSliceKeys(slice *discv1.EndpointSlice) { keys := val.([]string) for _, k := range keys { - if currentVal, ok := w.IPToPodMetadata.Load(k); ok { - w.deleter.DeleteWithDelay(w.IPToPodMetadata, k, currentVal) - } + w.deleter.DeleteWithDelay(w.IPToPodMetadata, k, currentVal) } w.sliceToKeysMap.Delete(sliceUID) } diff --git a/internal/k8sCommon/k8sclient/endpointslicewatcher_test.go b/internal/k8sCommon/k8sclient/endpointslicewatcher_test.go index dbd3ec7402..c8f642b910 100644 --- a/internal/k8sCommon/k8sclient/endpointslicewatcher_test.go +++ b/internal/k8sCommon/k8sclient/endpointslicewatcher_test.go @@ -22,9 +22,7 @@ import ( type MockDeleter struct{} func (md *MockDeleter) DeleteWithDelay(m *sync.Map, key interface{}, expected interface{}) { - if current, ok := m.Load(key); ok && reflect.DeepEqual(current, expected) { - m.Delete(key) - } + m.Delete(key) } var mockDeleter = &MockDeleter{} diff --git a/internal/k8sCommon/k8sclient/kubernetes_utils.go b/internal/k8sCommon/k8sclient/kubernetes_utils.go index 08677e4b60..fffd3faf58 100644 --- a/internal/k8sCommon/k8sclient/kubernetes_utils.go +++ b/internal/k8sCommon/k8sclient/kubernetes_utils.go @@ -5,7 +5,6 @@ package k8sclient import ( "fmt" - "reflect" "regexp" "sync" "time" @@ -105,7 +104,7 @@ func (sc *SafeChannel) Close() { // Deleter represents a type that can delete a key from a map after a certain delay. type Deleter interface { - DeleteWithDelay(m *sync.Map, key interface{}, expected interface{}) + DeleteWithDelay(m *sync.Map, key interface{}) } // TimedDeleter deletes a key after a specified delay. @@ -113,11 +112,9 @@ type TimedDeleter struct { Delay time.Duration } -func (td *TimedDeleter) DeleteWithDelay(m *sync.Map, key interface{}, expected interface{}) { +func (td *TimedDeleter) DeleteWithDelay(m *sync.Map, key interface{}) { go func() { time.Sleep(td.Delay) - if current, ok := m.Load(key); ok && reflect.DeepEqual(current, expected) { - m.Delete(key) - } + m.Delete(key) }() } From 377100799462cf91453997529624f90bbdf6a0cc Mon Sep 17 00:00:00 2001 From: musa-asad Date: Thu, 13 Mar 2025 11:44:01 -0400 Subject: [PATCH 10/17] typo --- internal/k8sCommon/k8sclient/endpointslicewatcher.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/k8sCommon/k8sclient/endpointslicewatcher.go b/internal/k8sCommon/k8sclient/endpointslicewatcher.go index 1a7d6fbaa4..3f090f07ac 100644 --- a/internal/k8sCommon/k8sclient/endpointslicewatcher.go +++ b/internal/k8sCommon/k8sclient/endpointslicewatcher.go @@ -267,7 +267,7 @@ func (w *EndpointSliceWatcher) removeSliceKeys(slice *discv1.EndpointSlice) { keys := val.([]string) for _, k := range keys { - w.deleter.DeleteWithDelay(w.IPToPodMetadata, k, currentVal) + w.deleter.DeleteWithDelay(w.IPToPodMetadata, k) } w.sliceToKeysMap.Delete(sliceUID) } From df95221865140f3732f88331cfcd7c78b226eaa0 Mon Sep 17 00:00:00 2001 From: musa-asad Date: Thu, 13 Mar 2025 11:45:15 -0400 Subject: [PATCH 11/17] typo --- internal/k8sCommon/k8sclient/endpointslicewatcher_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/k8sCommon/k8sclient/endpointslicewatcher_test.go b/internal/k8sCommon/k8sclient/endpointslicewatcher_test.go index c8f642b910..05ef32bea1 100644 --- a/internal/k8sCommon/k8sclient/endpointslicewatcher_test.go +++ b/internal/k8sCommon/k8sclient/endpointslicewatcher_test.go @@ -21,7 +21,7 @@ import ( // MockDeleter deletes a key immediately, useful for testing. type MockDeleter struct{} -func (md *MockDeleter) DeleteWithDelay(m *sync.Map, key interface{}, expected interface{}) { +func (md *MockDeleter) DeleteWithDelay(m *sync.Map, key interface{}) { m.Delete(key) } From a4914eb458af676847cf6d79411dd8e81cf1efe1 Mon Sep 17 00:00:00 2001 From: musa-asad Date: Thu, 13 Mar 2025 14:53:39 -0400 Subject: [PATCH 12/17] fix typo --- internal/k8sCommon/k8sclient/endpointslicewatcher.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/k8sCommon/k8sclient/endpointslicewatcher.go b/internal/k8sCommon/k8sclient/endpointslicewatcher.go index 3f090f07ac..d9db228af5 100644 --- a/internal/k8sCommon/k8sclient/endpointslicewatcher.go +++ b/internal/k8sCommon/k8sclient/endpointslicewatcher.go @@ -67,7 +67,7 @@ func (w *EndpointSliceWatcher) Run(stopCh chan struct{}) { w.handleSliceAdd(obj) }, UpdateFunc: func(oldObj, newObj interface{}) { - w.handleSliceUpdate(newObj, oldObj) + w.handleSliceUpdate(oldObj, newObj) }, DeleteFunc: func(obj interface{}) { w.handleSliceDelete(obj) From 7ead094f484f152e9cb2ed79c3d03aed5c8c873a Mon Sep 17 00:00:00 2001 From: musa-asad Date: Thu, 13 Mar 2025 16:02:53 -0400 Subject: [PATCH 13/17] remove logging --- extension/k8smetadata/extension.go | 9 ++------- .../k8sCommon/k8sclient/endpointslicewatcher.go | 14 ++------------ 2 files changed, 4 insertions(+), 19 deletions(-) diff --git a/extension/k8smetadata/extension.go b/extension/k8smetadata/extension.go index 5a664a0a2d..5d0ed57957 100644 --- a/extension/k8smetadata/extension.go +++ b/extension/k8smetadata/extension.go @@ -89,15 +89,10 @@ func (e *KubernetesMetadata) Shutdown(_ context.Context) error { func (e *KubernetesMetadata) GetPodMetadata(ip string) k8sclient.PodMetadata { pm, ok := e.endpointSliceWatcher.IPToPodMetadata.Load(ip) if !ok { - e.logger.Debug("GetPodMetadata: no mapping found for IP", zap.String("ip", ip)) + e.logger.Debug("GetPodMetadata: no mapping found for IP") return k8sclient.PodMetadata{} } metadata := pm.(k8sclient.PodMetadata) - e.logger.Debug("GetPodMetadata: found metadata", - zap.String("ip", ip), - zap.String("workload", metadata.Workload), - zap.String("namespace", metadata.Namespace), - zap.String("node", metadata.Node), - ) + e.logger.Debug("GetPodMetadata: found metadata") return metadata } diff --git a/internal/k8sCommon/k8sclient/endpointslicewatcher.go b/internal/k8sCommon/k8sclient/endpointslicewatcher.go index d9db228af5..7fa4f11808 100644 --- a/internal/k8sCommon/k8sclient/endpointslicewatcher.go +++ b/internal/k8sCommon/k8sclient/endpointslicewatcher.go @@ -108,15 +108,11 @@ func (w *EndpointSliceWatcher) extractEndpointSliceKeyValuePairs(slice *discv1.E nodeName = *endpoint.NodeName } - w.logger.Debug("Processing endpoint", - zap.String("podName", podName), - zap.String("namespace", ns), - zap.String("nodeName", nodeName), - ) + w.logger.Debug("Processing endpoint") derivedWorkload := inferWorkloadName(podName, svcName) if derivedWorkload == "" { - w.logger.Warn("failed to infer workload name from Pod name", zap.String("podName", podName)) + w.logger.Warn("failed to infer workload name from Pod name") continue } fullWl := @@ -158,9 +154,7 @@ func (w *EndpointSliceWatcher) extractEndpointSliceKeyValuePairs(slice *discv1.E func (w *EndpointSliceWatcher) handleSliceAdd(obj interface{}) { newSlice := obj.(*discv1.EndpointSlice) w.logger.Debug("Received EndpointSlice Add", - zap.String("sliceName", newSlice.Name), zap.String("uid", string(newSlice.UID)), - zap.String("namespace", newSlice.Namespace), ) sliceUID := string(newSlice.UID) @@ -193,8 +187,6 @@ func (w *EndpointSliceWatcher) handleSliceUpdate(oldObj, newObj interface{}) { w.logger.Debug("Received EndpointSlice Update", zap.String("oldSliceUID", string(oldSlice.UID)), zap.String("newSliceUID", string(newSlice.UID)), - zap.String("name", newSlice.Name), - zap.String("namespace", newSlice.Namespace), ) oldUID := string(oldSlice.UID) @@ -252,8 +244,6 @@ func (w *EndpointSliceWatcher) handleSliceDelete(obj interface{}) { slice := obj.(*discv1.EndpointSlice) w.logger.Debug("Received EndpointSlice Delete", zap.String("uid", string(slice.UID)), - zap.String("name", slice.Name), - zap.String("namespace", slice.Namespace), ) w.removeSliceKeys(slice) } From c8696fb152720ea7972ce162997334ae9a0e9469 Mon Sep 17 00:00:00 2001 From: musa-asad Date: Thu, 13 Mar 2025 16:06:12 -0400 Subject: [PATCH 14/17] fmt --- plugins/processors/awsentity/processor.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/plugins/processors/awsentity/processor.go b/plugins/processors/awsentity/processor.go index b59d9a5074..2d38e9cb86 100644 --- a/plugins/processors/awsentity/processor.go +++ b/plugins/processors/awsentity/processor.go @@ -5,10 +5,11 @@ package awsentity import ( "context" - "go.opentelemetry.io/collector/client" "net" "strings" + "go.opentelemetry.io/collector/client" + "github.com/go-playground/validator/v10" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" From eb7038109143254fceadb7eab48660909a62e067 Mon Sep 17 00:00:00 2001 From: musa-asad Date: Thu, 13 Mar 2025 16:10:15 -0400 Subject: [PATCH 15/17] fix import --- plugins/processors/awsentity/processor.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/plugins/processors/awsentity/processor.go b/plugins/processors/awsentity/processor.go index 2d38e9cb86..729973a241 100644 --- a/plugins/processors/awsentity/processor.go +++ b/plugins/processors/awsentity/processor.go @@ -8,9 +8,8 @@ import ( "net" "strings" - "go.opentelemetry.io/collector/client" - "github.com/go-playground/validator/v10" + "go.opentelemetry.io/collector/client" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" semconv "go.opentelemetry.io/collector/semconv/v1.22.0" From cb5a2a7303894c5ae2c882d4f791a70120b63b77 Mon Sep 17 00:00:00 2001 From: musa-asad Date: Thu, 13 Mar 2025 16:21:53 -0400 Subject: [PATCH 16/17] remove ctx --- plugins/processors/awsentity/processor.go | 17 ++++------------- 1 file changed, 4 insertions(+), 13 deletions(-) diff --git a/plugins/processors/awsentity/processor.go b/plugins/processors/awsentity/processor.go index 729973a241..86295c312b 100644 --- a/plugins/processors/awsentity/processor.go +++ b/plugins/processors/awsentity/processor.go @@ -5,11 +5,9 @@ package awsentity import ( "context" - "net" "strings" "github.com/go-playground/validator/v10" - "go.opentelemetry.io/collector/client" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" semconv "go.opentelemetry.io/collector/semconv/v1.22.0" @@ -108,21 +106,14 @@ var getServiceNameSource = func() (string, string) { return es.GetMetricServiceNameAndSource() } -var getPodMeta = func(ctx context.Context) k8sclient.PodMetadata { +var getPodMeta = func() k8sclient.PodMetadata { podMeta := k8sclient.PodMetadata{} k8sMetadata := k8smetadata.GetKubernetesMetadata() if k8sMetadata != nil { podIP := "" - switch addr := client.FromContext(ctx).Addr.(type) { - case *net.UDPAddr: - podIP = addr.IP.String() - case *net.TCPAddr: - podIP = addr.IP.String() - case *net.IPAddr: - podIP = addr.IP.String() - } + // Get Pod IP from connection context. podMeta = k8sMetadata.GetPodMetadata(podIP) } @@ -147,7 +138,7 @@ func newAwsEntityProcessor(config *Config, logger *zap.Logger) *awsEntityProcess } } -func (p *awsEntityProcessor) processMetrics(ctx context.Context, md pmetric.Metrics) (pmetric.Metrics, error) { +func (p *awsEntityProcessor) processMetrics(_ context.Context, md pmetric.Metrics) (pmetric.Metrics, error) { // Get the following metric attributes from the EntityStore: PlatformType, EC2.InstanceId, EC2.AutoScalingGroup rm := md.ResourceMetrics() @@ -202,7 +193,7 @@ func (p *awsEntityProcessor) processMetrics(ctx context.Context, md pmetric.Metr } } if p.config.KubernetesMode != "" { - p.k8sscraper.Scrape(rm.At(i).Resource(), getPodMeta(ctx)) + p.k8sscraper.Scrape(rm.At(i).Resource(), getPodMeta()) if p.config.Platform == config.ModeEC2 { ec2Info = getEC2InfoFromEntityStore() } From 72a78e85bc86a5a268408714d85415f5847cde32 Mon Sep 17 00:00:00 2001 From: musa-asad Date: Mon, 17 Mar 2025 11:27:06 -0400 Subject: [PATCH 17/17] add comment --- extension/k8smetadata/extension.go | 1 + 1 file changed, 1 insertion(+) diff --git a/extension/k8smetadata/extension.go b/extension/k8smetadata/extension.go index 5d0ed57957..2f7a76e59f 100644 --- a/extension/k8smetadata/extension.go +++ b/extension/k8smetadata/extension.go @@ -59,6 +59,7 @@ func (e *KubernetesMetadata) Start(_ context.Context, _ component.Host) error { } e.logger.Debug("Kubernetes clientset created successfully") + // jitter calls to the kubernetes api (a precaution to prevent overloading api server) jitterSleep(jitterKubernetesAPISeconds) timedDeleter := &k8sclient.TimedDeleter{Delay: deletionDelay}