diff --git a/.azure-pipelines/pipelines.yml b/.azure-pipelines/pipelines.yml index 4efd687e546c..8e26b1f61ff2 100644 --- a/.azure-pipelines/pipelines.yml +++ b/.azure-pipelines/pipelines.yml @@ -70,7 +70,6 @@ stages: inputs: pathtoPublish: "$(Build.SourcesDirectory)/generated/docs" artifactName: docs - condition: and(succeeded(), eq(variables['Build.Reason'], 'PullRequest')) - task: InstallSSHKey@0 inputs: @@ -78,7 +77,7 @@ stages: sshPublicKey: "$(DocsPublicKey)" sshPassphrase: "$(SshDeployKeyPassphrase)" sshKeySecureFile: "$(DocsPrivateKey)" - condition: and(succeeded(), ne(variables['Build.Reason'], 'PullRequest'), eq(variables['PostSubmit'], true)) + condition: and(succeeded(), eq(variables['PostSubmit'], true), ne(variables['NoSync'], true)) - script: docs/publish.sh displayName: "Publish to GitHub" @@ -86,10 +85,25 @@ stages: env: AZP_BRANCH: $(Build.SourceBranch) AZP_SHA1: $(Build.SourceVersion) - condition: and(succeeded(), ne(variables['Build.Reason'], 'PullRequest'), eq(variables['PostSubmit'], true)) + condition: and(succeeded(), eq(variables['PostSubmit'], true), ne(variables['NoSync'], true)) + + - job: dependencies + dependsOn: [] # this removes the implicit dependency on previous stage and causes this to run in parallel. + pool: + vmImage: "ubuntu-18.04" + steps: + - script: ci/run_envoy_docker.sh 'ci/do_ci.sh deps' + workingDirectory: $(Build.SourcesDirectory) + env: + ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory) + BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com + BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance + GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) + GITHUB_TOKEN: $(GitHubPublicRepoOnlyAccessToken) + displayName: "Verify dependency information" - stage: sync - condition: and(succeeded(), eq(variables['PostSubmit'], true)) + condition: and(succeeded(), eq(variables['PostSubmit'], true), ne(variables['NoSync'], true)) dependsOn: [] jobs: - job: filter_example @@ -130,6 +144,8 @@ stages: - job: go_control_plane dependsOn: [] + pool: + vmImage: "ubuntu-18.04" steps: - task: InstallSSHKey@0 inputs: @@ -152,12 +168,11 @@ stages: - stage: linux_x64 dependsOn: ["precheck"] - # For master builds, continue even if precheck fails - condition: and(not(canceled()), or(succeeded(), ne(variables['Build.Reason'], 'PullRequest'))) + # For post-submit builds, continue even if precheck fails + condition: and(not(canceled()), or(succeeded(), eq(variables['PostSubmit'], true))) jobs: - job: release - # For master builds, continue even if format fails - timeoutInMinutes: 360 + timeoutInMinutes: 120 pool: vmImage: "ubuntu-18.04" steps: @@ -167,11 +182,11 @@ stages: - stage: linux_arm64 dependsOn: ["precheck"] - # For master builds, continue even if precheck fails - condition: and(not(canceled()), or(succeeded(), ne(variables['Build.Reason'], 'PullRequest'))) + # For post-submit builds, continue even if precheck fails + condition: and(not(canceled()), or(succeeded(), eq(variables['PostSubmit'], true))) jobs: - job: release - timeoutInMinutes: 360 + timeoutInMinutes: 120 pool: "arm-large" steps: - template: bazel.yml @@ -203,7 +218,7 @@ stages: CI_TARGET: "bazel.tsan" compile_time_options: CI_TARGET: "bazel.compile_time_options" - timeoutInMinutes: 360 + timeoutInMinutes: 120 pool: vmImage: "ubuntu-18.04" steps: @@ -214,7 +229,7 @@ stages: - job: coverage displayName: "linux_x64" dependsOn: [] - timeoutInMinutes: 360 + timeoutInMinutes: 120 pool: "x64-large" strategy: maxParallel: 2 @@ -313,7 +328,7 @@ stages: dependsOn: ["precheck"] jobs: - job: test - timeoutInMinutes: 360 + timeoutInMinutes: 180 pool: vmImage: "macos-latest" steps: @@ -347,7 +362,7 @@ stages: dependsOn: ["precheck"] jobs: - job: release - timeoutInMinutes: 360 + timeoutInMinutes: 120 pool: vmImage: "windows-latest" steps: @@ -368,7 +383,7 @@ stages: - job: docker dependsOn: ["release"] - timeoutInMinutes: 360 + timeoutInMinutes: 120 pool: vmImage: "windows-latest" steps: diff --git a/.bazelrc b/.bazelrc index dd242a8aacd1..16d8843d6a88 100644 --- a/.bazelrc +++ b/.bazelrc @@ -3,10 +3,10 @@ # Bazel doesn't need more than 200MB of memory for local build based on memory profiling: # https://docs.bazel.build/versions/master/skylark/performance.html#memory-profiling # The default JVM max heapsize is 1/4 of physical memory up to 32GB which could be large -# enough to consume all memory constrained by cgroup in large host, which is the case in CircleCI. +# enough to consume all memory constrained by cgroup in large host. # Limiting JVM heapsize here to let it do GC more when approaching the limit to # leave room for compiler/linker. -# The number 2G is choosed heuristically to both support in CircleCI and large enough for RBE. +# The number 2G is chosen heuristically to both support large VM and small VM with RBE. # Startup options cannot be selected via config. startup --host_jvm_args=-Xmx2g diff --git a/CODEOWNERS b/CODEOWNERS index b3c9af89a7a8..9696b370c4c7 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -142,7 +142,6 @@ extensions/filters/common/original_src @snowp @klarose /*/extensions/filters/http/decompressor @rojkov @dio # Watchdog Extensions /*/extensions/watchdog/profile_action @kbaichoo @antoniovicente -/*/extensions/watchdog/abort_action @kbaichoo @antoniovicente # Core upstream code extensions/upstreams/http @alyssawilk @snowp @mattklein123 extensions/upstreams/http/http @alyssawilk @snowp @mattklein123 diff --git a/DEPENDENCY_POLICY.md b/DEPENDENCY_POLICY.md index 0944ad59030b..777d7e64adf4 100644 --- a/DEPENDENCY_POLICY.md +++ b/DEPENDENCY_POLICY.md @@ -45,8 +45,11 @@ Dependency declarations must: version is being used. * Provide accurate entries for `use_category`. Please think carefully about whether there are data or control plane implications of the dependency. -* Reflect the date (YYYY-MM-DD) at which they were last updated in the `last_updated` field. This - date is preferably the date at which the PR is created. +* Reflect the UTC date (YYYY-MM-DD format) for the dependency release. This is when + the dependency was updated in its repository. For dependencies that have + releases, this is the date of the release. For dependencies without releases + or for scenarios where we temporarily need to use a commit, this date should + be the date of the commit in UTC. * CPEs are compulsory for all dependencies that are not purely build/test. [CPEs](https://en.wikipedia.org/wiki/Common_Platform_Enumeration) provide metadata that allow us to correlate with related CVEs in dashboards and other tooling, and also provide a machine diff --git a/DEVELOPER.md b/DEVELOPER.md index b9bb204e66fe..6786925fa7e8 100644 --- a/DEVELOPER.md +++ b/DEVELOPER.md @@ -1,6 +1,7 @@ # Developer documentation -Envoy is built using the Bazel build system. CircleCI builds, tests, and runs coverage against all pull requests and the master branch. +Envoy is built using the Bazel build system. Our CI on Azure Pipelines builds, tests, and runs coverage against +all pull requests and the master branch. To get started building Envoy locally, see the [Bazel quick start](https://github.com/envoyproxy/envoy/blob/master/bazel/README.md#quick-start-bazel-build-for-developers). To run tests, there are Bazel [targets](https://github.com/envoyproxy/envoy/blob/master/bazel/README.md#testing-envoy-with-bazel) for Google Test. diff --git a/README.md b/README.md index 597e2c3ff07c..03c0aa0432d6 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,6 @@ involved and how Envoy plays a role, read the CNCF [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/1266/badge)](https://bestpractices.coreinfrastructure.org/projects/1266) [![Azure Pipelines](https://dev.azure.com/cncf/envoy/_apis/build/status/11?branchName=master)](https://dev.azure.com/cncf/envoy/_build/latest?definitionId=11&branchName=master) -[![CircleCI](https://circleci.com/gh/envoyproxy/envoy/tree/master.svg?style=shield)](https://circleci.com/gh/envoyproxy/envoy/tree/master) [![Fuzzing Status](https://oss-fuzz-build-logs.storage.googleapis.com/badges/envoy.svg)](https://bugs.chromium.org/p/oss-fuzz/issues/list?sort=-opened&can=1&q=proj:envoy) [![Jenkins](https://powerci.osuosl.org/buildStatus/icon?job=build-envoy-static-master&subject=ppc64le%20build)](https://powerci.osuosl.org/job/build-envoy-static-master/) diff --git a/REPO_LAYOUT.md b/REPO_LAYOUT.md index cd87e015ac5b..e4f2452a1417 100644 --- a/REPO_LAYOUT.md +++ b/REPO_LAYOUT.md @@ -4,7 +4,8 @@ This is a high level overview of how the repository is laid out to both aid in c as well as to clearly specify how extensions are added to the repository. The top level directories are: -* [.circleci/](.circleci/): Configuration for [CircleCI](https://circleci.com/gh/envoyproxy). +* [.azure-pipelines/](.azure-pipelines/): Configuration for +[Azure Pipelines](https://azure.microsoft.com/en-us/services/devops/pipelines/). * [api/](api/): Envoy data plane API. * [bazel/](bazel/): Configuration for Envoy's use of [Bazel](https://bazel.build/). * [ci/](ci/): Scripts used both during CI as well as to build Docker containers. diff --git a/STYLE.md b/STYLE.md index f9328cf528f7..ee2deadf170b 100644 --- a/STYLE.md +++ b/STYLE.md @@ -1,7 +1,7 @@ # C++ coding style * The Envoy source code is formatted using clang-format. Thus all white spaces, etc. - issues are taken care of automatically. The CircleCI tests will automatically check + issues are taken care of automatically. The Azure Pipelines will automatically check the code format and fail. There are make targets that can both check the format (check_format) as well as fix the code format for you (fix_format). Errors in .clang-tidy are enforced while other warnings are suggestions. Note that code and diff --git a/api/BUILD b/api/BUILD index ed8743b793e3..345732128a0d 100644 --- a/api/BUILD +++ b/api/BUILD @@ -248,7 +248,6 @@ proto_library( "//envoy/extensions/upstreams/http/http/v3:pkg", "//envoy/extensions/upstreams/http/tcp/v3:pkg", "//envoy/extensions/wasm/v3:pkg", - "//envoy/extensions/watchdog/abort_action/v3alpha:pkg", "//envoy/extensions/watchdog/profile_action/v3alpha:pkg", "//envoy/service/accesslog/v3:pkg", "//envoy/service/auth/v3:pkg", @@ -272,6 +271,7 @@ proto_library( "//envoy/type/metadata/v3:pkg", "//envoy/type/tracing/v3:pkg", "//envoy/type/v3:pkg", + "//envoy/watchdog/v3alpha:pkg", ], ) diff --git a/api/CONTRIBUTING.md b/api/CONTRIBUTING.md index 773248f2e2ea..01ba39b500b8 100644 --- a/api/CONTRIBUTING.md +++ b/api/CONTRIBUTING.md @@ -50,11 +50,11 @@ generated RST files are also viewable in `generated/rst`. Note also that the generated documentation can be viewed in CI: -1. Open docs job in CircleCI. -2. Navigate to "artifacts" tab. -3. Expand files and click on `index.html`. +1. Open docs job in Azure Pipelines. +2. Navigate to "Upload Docs to GCS" log. +3. Click on the link there. -If you do not see an artifacts tab this is a bug in CircleCI. Try logging out and logging back in. +If you do not see "Upload Docs to GCS" or it is failing, that means the docs are not built correctly. ### Documentation guidelines diff --git a/api/bazel/external_deps.bzl b/api/bazel/external_deps.bzl index 588879c4bd0a..e8283e4fee10 100644 --- a/api/bazel/external_deps.bzl +++ b/api/bazel/external_deps.bzl @@ -17,14 +17,18 @@ DEPENDENCY_ANNOTATIONS = [ # Envoy (see the external dependency at the given version for information). "implied_untracked_deps", - # When the dependency was last updated in Envoy. - "last_updated", - # Project metadata. "project_desc", "project_name", "project_url", + # Reflects the UTC date (YYYY-MM-DD format) for the dependency release. This + # is when the dependency was updated in its repository. For dependencies + # that have releases, this is the date of the release. For dependencies + # without releases or for scenarios where we temporarily need to use a + # commit, this date should be the date of the commit in UTC. + "release_date", + # List of the categories describing how the dependency is being used. This attribute is used # for automatic tracking of security posture of Envoy's dependencies. # Possible values are documented in the USE_CATEGORIES list below. @@ -63,8 +67,7 @@ USE_CATEGORIES = [ "devtools", ] -# Components with these use categories are not required to specify the 'cpe' -# and 'last_updated' annotation. +# Components with these use categories are not required to specify the 'cpe'. USE_CATEGORIES_WITH_CPE_OPTIONAL = ["build", "other", "test_only", "api"] def _fail_missing_attribute(attr, key): @@ -106,13 +109,13 @@ def load_repository_locations(repository_locations_spec): if "extensions" not in location: _fail_missing_attribute("extensions", key) - if "last_updated" not in location: - _fail_missing_attribute("last_updated", key) - last_updated = location["last_updated"] + if "release_date" not in location: + _fail_missing_attribute("release_date", key) + release_date = location["release_date"] # Starlark doesn't have regexes. - if len(last_updated) != 10 or last_updated[4] != "-" or last_updated[7] != "-": - fail("last_updated must match YYYY-DD-MM: " + last_updated) + if len(release_date) != 10 or release_date[4] != "-" or release_date[7] != "-": + fail("release_date must match YYYY-DD-MM: " + release_date) if "cpe" in location: cpe = location["cpe"] diff --git a/api/bazel/repository_locations.bzl b/api/bazel/repository_locations.bzl index bdcf31e867d2..e46f7d77f8e5 100644 --- a/api/bazel/repository_locations.bzl +++ b/api/bazel/repository_locations.bzl @@ -7,7 +7,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( version = "1.0.3", sha256 = "1c531376ac7e5a180e0237938a2536de0c54d93f5c278634818e0efc952dd56c", urls = ["https://github.com/bazelbuild/bazel-skylib/releases/download/{version}/bazel-skylib-{version}.tar.gz"], - last_updated = "2020-08-27", + release_date = "2020-08-27", use_category = ["api"], ), com_envoyproxy_protoc_gen_validate = dict( @@ -18,7 +18,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( sha256 = "e368733c9fb7f8489591ffaf269170d7658cc0cd1ee322b601512b769446d3c8", strip_prefix = "protoc-gen-validate-{version}", urls = ["https://github.com/envoyproxy/protoc-gen-validate/archive/{version}.tar.gz"], - last_updated = "2020-06-09", + release_date = "2020-06-08", use_category = ["api"], ), com_github_cncf_udpa = dict( @@ -29,7 +29,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( sha256 = "83a7dcc316d741031f34c0409021432b74a39c4811845a177133f02f948fe2d8", strip_prefix = "udpa-{version}", urls = ["https://github.com/cncf/udpa/archive/v{version}.tar.gz"], - last_updated = "2020-09-23", + release_date = "2020-06-29", use_category = ["api"], ), com_github_openzipkin_zipkinapi = dict( @@ -40,7 +40,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( sha256 = "688c4fe170821dd589f36ec45aaadc03a618a40283bc1f97da8fa11686fc816b", strip_prefix = "zipkin-api-{version}", urls = ["https://github.com/openzipkin/zipkin-api/archive/{version}.tar.gz"], - last_updated = "2020-09-23", + release_date = "2019-08-23", use_category = ["api"], ), com_google_googleapis = dict( @@ -52,7 +52,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( sha256 = "a45019af4d3290f02eaeb1ce10990166978c807cb33a9692141a076ba46d1405", strip_prefix = "googleapis-{version}", urls = ["https://github.com/googleapis/googleapis/archive/{version}.tar.gz"], - last_updated = "2019-12-02", + release_date = "2019-12-02", use_category = ["api"], ), opencensus_proto = dict( @@ -63,7 +63,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( sha256 = "b7e13f0b4259e80c3070b583c2f39e53153085a6918718b1c710caf7037572b0", strip_prefix = "opencensus-proto-{version}/src", urls = ["https://github.com/census-instrumentation/opencensus-proto/archive/v{version}.tar.gz"], - last_updated = "2020-06-20", + release_date = "2020-07-21", use_category = ["api"], ), prometheus_metrics_model = dict( @@ -74,7 +74,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( sha256 = "6748b42f6879ad4d045c71019d2512c94be3dd86f60965e9e31e44a3f464323e", strip_prefix = "client_model-{version}", urls = ["https://github.com/prometheus/client_model/archive/{version}.tar.gz"], - last_updated = "2020-06-23", + release_date = "2020-06-23", use_category = ["api"], ), rules_proto = dict( @@ -85,7 +85,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( sha256 = "aa1ee19226f707d44bee44c720915199c20c84a23318bb0597ed4e5c873ccbd5", strip_prefix = "rules_proto-{version}", urls = ["https://github.com/bazelbuild/rules_proto/archive/{version}.tar.gz"], - last_updated = "2020-08-17", + release_date = "2020-08-17", use_category = ["api"], ), ) diff --git a/api/envoy/config/common/tap/v2alpha/BUILD b/api/envoy/config/common/tap/v2alpha/BUILD index 0f944d868c1a..3aed5a34a400 100644 --- a/api/envoy/config/common/tap/v2alpha/BUILD +++ b/api/envoy/config/common/tap/v2alpha/BUILD @@ -6,7 +6,6 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ - "//envoy/api/v2/core:pkg", "//envoy/service/tap/v2alpha:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], diff --git a/api/envoy/config/common/tap/v2alpha/common.proto b/api/envoy/config/common/tap/v2alpha/common.proto index 262557b35623..6db1ecceddc4 100644 --- a/api/envoy/config/common/tap/v2alpha/common.proto +++ b/api/envoy/config/common/tap/v2alpha/common.proto @@ -2,7 +2,6 @@ syntax = "proto3"; package envoy.config.common.tap.v2alpha; -import "envoy/api/v2/core/config_source.proto"; import "envoy/service/tap/v2alpha/common.proto"; import "udpa/annotations/migrate.proto"; @@ -19,15 +18,6 @@ option (udpa.annotations.file_status).package_version_status = FROZEN; // Common configuration for all tap extensions. message CommonExtensionConfig { - // [#not-implemented-hide:] - message TapDSConfig { - // Configuration for the source of TapDS updates for this Cluster. - api.v2.core.ConfigSource config_source = 1 [(validate.rules).message = {required: true}]; - - // Tap config to request from XDS server. - string name = 2 [(validate.rules).string = {min_bytes: 1}]; - } - oneof config_type { option (validate.required) = true; @@ -37,9 +27,6 @@ message CommonExtensionConfig { // If specified, the tap filter will be configured via a static configuration that cannot be // changed. service.tap.v2alpha.TapConfig static_config = 2; - - // [#not-implemented-hide:] Configuration to use for TapDS updates for the filter. - TapDSConfig tapds_config = 3; } } diff --git a/api/envoy/extensions/common/tap/v3/BUILD b/api/envoy/extensions/common/tap/v3/BUILD index eea9dcac5d23..2ad1221bb717 100644 --- a/api/envoy/extensions/common/tap/v3/BUILD +++ b/api/envoy/extensions/common/tap/v3/BUILD @@ -7,7 +7,6 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/common/tap/v2alpha:pkg", - "//envoy/config/core/v3:pkg", "//envoy/config/tap/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", "@com_github_cncf_udpa//udpa/core/v1:pkg", diff --git a/api/envoy/extensions/common/tap/v3/common.proto b/api/envoy/extensions/common/tap/v3/common.proto index aa7ae8264757..c71bff14008b 100644 --- a/api/envoy/extensions/common/tap/v3/common.proto +++ b/api/envoy/extensions/common/tap/v3/common.proto @@ -2,7 +2,6 @@ syntax = "proto3"; package envoy.extensions.common.tap.v3; -import "envoy/config/core/v3/config_source.proto"; import "envoy/config/tap/v3/common.proto"; import "udpa/core/v1/resource_locator.proto"; @@ -24,23 +23,6 @@ message CommonExtensionConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.common.tap.v2alpha.CommonExtensionConfig"; - // [#not-implemented-hide:] - message TapDSConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.common.tap.v2alpha.CommonExtensionConfig.TapDSConfig"; - - // Configuration for the source of TapDS updates for this Cluster. - config.core.v3.ConfigSource config_source = 1 [(validate.rules).message = {required: true}]; - - // Tap config to request from XDS server. - string name = 2 [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; - - // Resource locator for TAP. This is mutually exclusive to *name*. - // [#not-implemented-hide:] - udpa.core.v1.ResourceLocator tap_resource_locator = 3 - [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; - } - oneof config_type { option (validate.required) = true; @@ -50,9 +32,6 @@ message CommonExtensionConfig { // If specified, the tap filter will be configured via a static configuration that cannot be // changed. config.tap.v3.TapConfig static_config = 2; - - // [#not-implemented-hide:] Configuration to use for TapDS updates for the filter. - TapDSConfig tapds_config = 3; } } diff --git a/api/envoy/extensions/common/tap/v4alpha/BUILD b/api/envoy/extensions/common/tap/v4alpha/BUILD index 22f844ecba26..37f19ce2acd1 100644 --- a/api/envoy/extensions/common/tap/v4alpha/BUILD +++ b/api/envoy/extensions/common/tap/v4alpha/BUILD @@ -6,7 +6,6 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ - "//envoy/config/core/v4alpha:pkg", "//envoy/config/tap/v4alpha:pkg", "//envoy/extensions/common/tap/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", diff --git a/api/envoy/extensions/common/tap/v4alpha/common.proto b/api/envoy/extensions/common/tap/v4alpha/common.proto index efa7744e357f..b40101232ba2 100644 --- a/api/envoy/extensions/common/tap/v4alpha/common.proto +++ b/api/envoy/extensions/common/tap/v4alpha/common.proto @@ -2,7 +2,6 @@ syntax = "proto3"; package envoy.extensions.common.tap.v4alpha; -import "envoy/config/core/v4alpha/config_source.proto"; import "envoy/config/tap/v4alpha/common.proto"; import "udpa/core/v1/resource_locator.proto"; @@ -23,25 +22,6 @@ message CommonExtensionConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.common.tap.v3.CommonExtensionConfig"; - // [#not-implemented-hide:] - message TapDSConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.common.tap.v3.CommonExtensionConfig.TapDSConfig"; - - // Configuration for the source of TapDS updates for this Cluster. - config.core.v4alpha.ConfigSource config_source = 1 - [(validate.rules).message = {required: true}]; - - oneof name_specifier { - // Tap config to request from XDS server. - string name = 2; - - // Resource locator for TAP. This is mutually exclusive to *name*. - // [#not-implemented-hide:] - udpa.core.v1.ResourceLocator tap_resource_locator = 3; - } - } - oneof config_type { option (validate.required) = true; @@ -51,9 +31,6 @@ message CommonExtensionConfig { // If specified, the tap filter will be configured via a static configuration that cannot be // changed. config.tap.v4alpha.TapConfig static_config = 2; - - // [#not-implemented-hide:] Configuration to use for TapDS updates for the filter. - TapDSConfig tapds_config = 3; } } diff --git a/api/envoy/extensions/watchdog/abort_action/v3alpha/abort_action.proto b/api/envoy/extensions/watchdog/abort_action/v3alpha/abort_action.proto deleted file mode 100644 index 7d793be82012..000000000000 --- a/api/envoy/extensions/watchdog/abort_action/v3alpha/abort_action.proto +++ /dev/null @@ -1,32 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.watchdog.abort_action.v3alpha; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.watchdog.abort_action.v3alpha"; -option java_outer_classname = "AbortActionProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Watchdog Action that sends a SIGABRT to kill the process.] -// [#extension: envoy.watchdog.abort_action] - -// A GuardDogAction that will terminate the process by sending SIGABRT to the -// stuck thread. This would allow easier access to the call stack of the stuck -// thread since we would run signal handlers on that thread. This would be -// more useful than the default watchdog kill behaviors since those PANIC -// from the watchdog's thread. - -// This is currently only implemented for systems that support kill to send -// signals. -message AbortActionConfig { - // How long to wait for the thread to respond to the SIGABRT before killing the - // process from this action. This is a blocking action. - google.protobuf.Duration wait_duration = 1; -} diff --git a/api/envoy/extensions/watchdog/abort_action/v3alpha/BUILD b/api/envoy/watchdog/v3alpha/BUILD similarity index 100% rename from api/envoy/extensions/watchdog/abort_action/v3alpha/BUILD rename to api/envoy/watchdog/v3alpha/BUILD diff --git a/api/envoy/watchdog/v3alpha/README.md b/api/envoy/watchdog/v3alpha/README.md new file mode 100644 index 000000000000..c8433b9c05b5 --- /dev/null +++ b/api/envoy/watchdog/v3alpha/README.md @@ -0,0 +1,2 @@ +This contains watchdog actions that are part of core Envoy, and therefore cannot +be in the extensions directory. diff --git a/api/envoy/watchdog/v3alpha/abort_action.proto b/api/envoy/watchdog/v3alpha/abort_action.proto new file mode 100644 index 000000000000..3f47fddaa77e --- /dev/null +++ b/api/envoy/watchdog/v3alpha/abort_action.proto @@ -0,0 +1,29 @@ +syntax = "proto3"; + +package envoy.watchdog.v3alpha; + +import "google/protobuf/duration.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.watchdog.v3alpha"; +option java_outer_classname = "AbortActionProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).work_in_progress = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Watchdog Action that kills a stuck thread to kill the process.] + +// A GuardDogAction that will terminate the process by killing the +// stuck thread. This would allow easier access to the call stack of the stuck +// thread since we would run signal handlers on that thread. By default +// this will be registered to run as the last watchdog action on KILL and +// MULTIKILL events if those are enabled. +message AbortActionConfig { + // How long to wait for the thread to respond to the thread kill function + // before killing the process from this action. This is a blocking action. + // By default this is 5 seconds. + google.protobuf.Duration wait_duration = 1; +} diff --git a/api/versioning/BUILD b/api/versioning/BUILD index d44a54640ca4..2e0a1cd4997d 100644 --- a/api/versioning/BUILD +++ b/api/versioning/BUILD @@ -131,7 +131,6 @@ proto_library( "//envoy/extensions/upstreams/http/http/v3:pkg", "//envoy/extensions/upstreams/http/tcp/v3:pkg", "//envoy/extensions/wasm/v3:pkg", - "//envoy/extensions/watchdog/abort_action/v3alpha:pkg", "//envoy/extensions/watchdog/profile_action/v3alpha:pkg", "//envoy/service/accesslog/v3:pkg", "//envoy/service/auth/v3:pkg", @@ -155,6 +154,7 @@ proto_library( "//envoy/type/metadata/v3:pkg", "//envoy/type/tracing/v3:pkg", "//envoy/type/v3:pkg", + "//envoy/watchdog/v3alpha:pkg", ], ) diff --git a/bazel/external/wee8.genrule_cmd b/bazel/external/wee8.genrule_cmd index d8cbd1981a64..bb5b9b7f8490 100644 --- a/bazel/external/wee8.genrule_cmd +++ b/bazel/external/wee8.genrule_cmd @@ -2,12 +2,12 @@ set -e -# This works only on Linux-{x86_64,s390x,aarch64} and macOS-x86_64. +# This works only on Linux-{x86_64,s390x,aarch64,ppc64le} and macOS-x86_64. case "$$(uname -s)-$$(uname -m)" in -Linux-x86_64|Linux-s390x|Linux-aarch64|Darwin-x86_64) +Linux-x86_64|Linux-s390x|Linux-aarch64|Linux-ppc64le|Darwin-x86_64) ;; *) - echo "ERROR: wee8 is currently supported only on Linux-{x86_64,s390x,aarch64} and macOS-x86_64." >&2 + echo "ERROR: wee8 is currently supported only on Linux-{x86_64,s390x,aarch64,ppc64le} and macOS-x86_64." >&2 exit 1 esac @@ -88,6 +88,11 @@ WEE8_BUILD_ARGS+=" v8_enable_shared_ro_heap=false" if [[ `uname -m` == "aarch64" ]]; then WEE8_BUILD_ARGS+=" target_cpu=\"arm64\"" fi +# Support ppc64 +# Only tests with gn 5da62d5 +if [[ `uname -m` == "ppc64le" ]]; then + WEE8_BUILD_ARGS+=" target_cpu=\"ppc64\"" +fi # Build wee8. if [[ -f /etc/centos-release ]] && [[ $$(cat /etc/centos-release) =~ "CentOS Linux release 7" ]] && [[ -x "$$(command -v gn)" ]]; then diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index 1ba42eaa6ae2..5047a52141f0 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -13,7 +13,6 @@ WINDOWS_SKIP_TARGETS = [ "envoy.tracers.lightstep", "envoy.tracers.datadog", "envoy.tracers.opencensus", - "envoy.watchdog.abort_action", ] # Make all contents of an external repository accessible under a filegroup. Used for external HTTP diff --git a/bazel/repositories_extra.bzl b/bazel/repositories_extra.bzl index 3aafc9528d80..70fe69b6fa40 100644 --- a/bazel/repositories_extra.bzl +++ b/bazel/repositories_extra.bzl @@ -14,7 +14,7 @@ def _python_deps(): # project_name = "PyYAML", # project_url = "https://github.com/yaml/pyyaml", # version = "5.3.1", - # last_update = "2020-03-18" + # release_date = "2020-03-18" # use_category = ["devtools"], # cpe = "cpe:2.3:a:pyyaml:pyyaml:*", ) @@ -26,14 +26,14 @@ def _python_deps(): # project_name = "Jinja", # project_url = "http://palletsprojects.com/p/jinja", # version = "2.11.2", - # last_update = "2020-04-13" + # release_date = "2020-04-13" # use_category = ["test"], # cpe = "cpe:2.3:a:palletsprojects:jinja:*", # project_name = "MarkupSafe", # project_url = "https://markupsafe.palletsprojects.com/en/1.1.x/", # version = "1.1.1", - # last_update = "2019-02-23" + # release_date = "2019-02-23" # use_category = ["test"], ) pip3_import( @@ -44,14 +44,14 @@ def _python_deps(): # project_name = "Jinja", # project_url = "http://palletsprojects.com/p/jinja", # version = "2.11.2", - # last_update = "2020-04-13" + # release_date = "2020-04-13" # use_category = ["test"], # cpe = "cpe:2.3:a:palletsprojects:jinja:*", # project_name = "MarkupSafe", # project_url = "https://markupsafe.palletsprojects.com/en/1.1.x/", # version = "1.1.1", - # last_update = "2019-02-23" + # release_date = "2019-02-23" # use_category = ["test"], ) pip3_import( @@ -62,7 +62,7 @@ def _python_deps(): # project_name = "Clang", # project_url = "https://clang.llvm.org/", # version = "10.0.1", - # last_update = "2020-07-21" + # release_date = "2020-07-21" # use_category = ["devtools"], # cpe = "cpe:2.3:a:llvm:clang:*", ) @@ -74,7 +74,7 @@ def _python_deps(): # project_name = "PyYAML", # project_url = "https://github.com/yaml/pyyaml", # version = "5.3.1", - # last_update = "2020-03-18" + # release_date = "2020-03-18" # use_category = ["docs"], # cpe = "cpe:2.3:a:pyyaml:pyyaml:*", ) @@ -86,14 +86,14 @@ def _python_deps(): # project_name = "Apache Thrift", # project_url = "http://thrift.apache.org/", # version = "0.11.0", - # last_update = "2017-12-07" + # release_date = "2017-12-07" # use_category = ["test"], # cpe = "cpe:2.3:a:apache:thrift:*", # project_name = "Six: Python 2 and 3 Compatibility Library", # project_url = "https://six.readthedocs.io/", # version = "1.15.0", - # last_update = "2020-05-21" + # release_date = "2020-05-21" # use_category = ["test"], ) diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 6a631c5a3e6b..8d60741072d4 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -8,7 +8,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( sha256 = "bcecfd622c4ef272fd4ba42726a52e140b961c4eac23025f18b346c968a8cfb4", strip_prefix = "bazel-compilation-database-{version}", urls = ["https://github.com/grailbio/bazel-compilation-database/archive/{version}.tar.gz"], - last_updated = "2020-08-01", + release_date = "2020-08-01", use_category = ["build"], ), bazel_gazelle = dict( @@ -18,7 +18,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( version = "0.21.1", sha256 = "cdb02a887a7187ea4d5a27452311a75ed8637379a1287d8eeb952138ea485f7d", urls = ["https://github.com/bazelbuild/bazel-gazelle/releases/download/v{version}/bazel-gazelle-v{version}.tar.gz"], - last_updated = "2020-05-28", + release_date = "2020-05-28", use_category = ["build"], ), bazel_toolchains = dict( @@ -32,7 +32,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( "https://github.com/bazelbuild/bazel-toolchains/releases/download/{version}/bazel-toolchains-{version}.tar.gz", "https://mirror.bazel.build/github.com/bazelbuild/bazel-toolchains/archive/{version}.tar.gz", ], - last_updated = "2020-08-10", + release_date = "2020-08-10", use_category = ["build"], ), build_bazel_rules_apple = dict( @@ -42,7 +42,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( version = "0.19.0", sha256 = "7a7afdd4869bb201c9352eed2daf37294d42b093579b70423490c1b4d4f6ce42", urls = ["https://github.com/bazelbuild/rules_apple/releases/download/{version}/rules_apple.{version}.tar.gz"], - last_updated = "2020-10-10", + release_date = "2019-10-10", use_category = ["build"], ), envoy_build_tools = dict( @@ -53,7 +53,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( sha256 = "dc3881d16e7b0c855a7279f5757d55e4aa55fe2befbd9e34215b971818622f9e", strip_prefix = "envoy-build-tools-{version}", urls = ["https://github.com/envoyproxy/envoy-build-tools/archive/{version}.tar.gz"], - last_updated = "2020-10-01", + release_date = "2020-10-01", use_category = ["build"], ), boringssl = dict( @@ -71,7 +71,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( # chromium-86.0.4240.80 urls = ["https://github.com/google/boringssl/archive/{version}.tar.gz"], use_category = ["controlplane", "dataplane_core"], - last_updated = "2020-07-30", + release_date = "2020-07-30", cpe = "cpe:2.3:a:google:boringssl:*", ), boringssl_fips = dict( @@ -82,7 +82,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( sha256 = "3b5fdf23274d4179c2077b5e8fa625d9debd7a390aac1d165b7e47234f648bb8", urls = ["https://commondatastorage.googleapis.com/chromium-boringssl-fips/boringssl-ae223d6138807a13006342edfeef32e813246b39.tar.xz"], use_category = ["controlplane", "dataplane_core"], - last_updated = "2019-08-08", + release_date = "2019-08-08", cpe = "cpe:2.3:a:google:boringssl:*", ), com_google_absl = dict( @@ -94,7 +94,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( strip_prefix = "abseil-cpp-{version}", urls = ["https://github.com/abseil/abseil-cpp/archive/{version}.tar.gz"], use_category = ["dataplane_core", "controlplane"], - last_updated = "2020-10-01", + release_date = "2020-10-01", cpe = "N/A", ), com_github_c_ares_c_ares = dict( @@ -106,7 +106,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( strip_prefix = "c-ares-{version}", urls = ["https://github.com/c-ares/c-ares/releases/download/cares-{underscore_version}/c-ares-{version}.tar.gz"], use_category = ["dataplane_core", "controlplane"], - last_updated = "2020-05-11", + release_date = "2020-05-11", cpe = "cpe:2.3:a:c-ares_project:c-ares:*", ), com_github_circonus_labs_libcircllhist = dict( @@ -118,7 +118,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( strip_prefix = "libcircllhist-{version}", urls = ["https://github.com/circonus-labs/libcircllhist/archive/{version}.tar.gz"], use_category = ["controlplane", "observability_core", "dataplane_core"], - last_updated = "2019-02-11", + release_date = "2019-02-11", cpe = "N/A", ), com_github_cyan4973_xxhash = dict( @@ -130,7 +130,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( strip_prefix = "xxHash-{version}", urls = ["https://github.com/Cyan4973/xxHash/archive/v{version}.tar.gz"], use_category = ["dataplane_core", "controlplane"], - last_updated = "2020-03-04", + release_date = "2020-03-05", cpe = "N/A", ), com_github_envoyproxy_sqlparser = dict( @@ -146,7 +146,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( "envoy.filters.network.mysql_proxy", "envoy.filters.network.postgres_proxy", ], - last_updated = "2020-06-10", + release_date = "2020-06-10", cpe = "N/A", ), com_github_mirror_tclap = dict( @@ -157,7 +157,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( sha256 = "f0ede0721dddbb5eba3a47385a6e8681b14f155e1129dd39d1a959411935098f", strip_prefix = "tclap-tclap-{version}-release-final", urls = ["https://github.com/mirror/tclap/archive/tclap-{version}-release-final.tar.gz"], - last_updated = "2017-11-10", + release_date = "2011-04-16", use_category = ["other"], ), com_github_fmtlib_fmt = dict( @@ -169,7 +169,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( strip_prefix = "fmt-{version}", urls = ["https://github.com/fmtlib/fmt/releases/download/{version}/fmt-{version}.zip"], use_category = ["dataplane_core", "controlplane"], - last_updated = "2020-08-07", + release_date = "2020-08-06", cpe = "cpe:2.3:a:fmt:fmt:*", ), com_github_gabime_spdlog = dict( @@ -181,7 +181,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( strip_prefix = "spdlog-{version}", urls = ["https://github.com/gabime/spdlog/archive/v{version}.tar.gz"], use_category = ["dataplane_core", "controlplane"], - last_updated = "2020-07-09", + release_date = "2020-07-09", cpe = "N/A", ), com_github_google_libprotobuf_mutator = dict( @@ -192,7 +192,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( sha256 = "49a26dbe77c75f2eca1dd8a9fbdb31c4496d9af42df027ff57569c5a7a5d980d", strip_prefix = "libprotobuf-mutator-{version}", urls = ["https://github.com/google/libprotobuf-mutator/archive/{version}.tar.gz"], - last_updated = "2020-08-18", + release_date = "2020-08-18", use_category = ["test_only"], ), com_github_google_tcmalloc = dict( @@ -204,7 +204,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( strip_prefix = "tcmalloc-{version}", urls = ["https://github.com/google/tcmalloc/archive/{version}.tar.gz"], use_category = ["dataplane_core", "controlplane"], - last_updated = "2020-09-16", + release_date = "2020-09-16", cpe = "N/A", ), com_github_gperftools_gperftools = dict( @@ -215,7 +215,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( sha256 = "240deacdd628b6459671b83eb0c4db8e97baadf659f25b92e9a078d536bd513e", strip_prefix = "gperftools-{version}", urls = ["https://github.com/gperftools/gperftools/releases/download/gperftools-{version}/gperftools-{version}.tar.gz"], - last_updated = "2020-07-06", + release_date = "2020-07-06", use_category = ["dataplane_core", "controlplane"], cpe = "cpe:2.3:a:gperftools_project:gperftools:*", ), @@ -230,7 +230,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( strip_prefix = "grpc-{version}", urls = ["https://github.com/grpc/grpc/archive/{version}.tar.gz"], use_category = ["dataplane_core", "controlplane"], - last_updated = "2020-02-11", + release_date = "2020-02-11", cpe = "cpe:2.3:a:grpc:grpc:*", ), com_github_luajit_luajit = dict( @@ -243,7 +243,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( sha256 = "20a159c38a98ecdb6368e8d655343b6036622a29a1621da9dc303f7ed9bf37f3", strip_prefix = "LuaJIT-{version}", urls = ["https://github.com/LuaJIT/LuaJIT/archive/{version}.tar.gz"], - last_updated = "2020-10-13", + release_date = "2020-10-12", use_category = ["dataplane_ext"], extensions = ["envoy.filters.http.lua"], cpe = "cpe:2.3:a:luajit:luajit:*", @@ -258,7 +258,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( urls = ["https://github.com/moonjit/moonjit/archive/{version}.tar.gz"], use_category = ["dataplane_ext"], extensions = ["envoy.filters.http.lua"], - last_updated = "2020-01-14", + release_date = "2020-01-14", cpe = "cpe:2.3:a:moonjit_project:moonjit:*", ), com_github_nghttp2_nghttp2 = dict( @@ -270,7 +270,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( strip_prefix = "nghttp2-{version}", urls = ["https://github.com/nghttp2/nghttp2/releases/download/v{version}/nghttp2-{version}.tar.gz"], use_category = ["controlplane", "dataplane_core"], - last_updated = "2020-06-03", + release_date = "2020-06-02", cpe = "cpe:2.3:a:nghttp2:nghttp2:*", ), io_opentracing_cpp = dict( @@ -287,7 +287,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( "envoy.tracers.dynamic_ot", "envoy.tracers.lightstep", ], - last_updated = "2019-01-16", + release_date = "2019-01-16", cpe = "N/A", ), com_lightstep_tracer_cpp = dict( @@ -300,7 +300,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( urls = ["https://github.com/lightstep/lightstep-tracer-cpp/archive/{version}.tar.gz"], use_category = ["observability_ext"], extensions = ["envoy.tracers.lightstep"], - last_updated = "2020-08-24", + release_date = "2020-08-25", cpe = "N/A", ), com_github_datadog_dd_opentracing_cpp = dict( @@ -313,7 +313,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( urls = ["https://github.com/DataDog/dd-opentracing-cpp/archive/v{version}.tar.gz"], use_category = ["observability_ext"], extensions = ["envoy.tracers.datadog"], - last_updated = "2020-05-15", + release_date = "2020-05-15", cpe = "N/A", ), com_github_google_benchmark = dict( @@ -325,7 +325,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( strip_prefix = "benchmark-{version}", urls = ["https://github.com/google/benchmark/archive/v{version}.tar.gz"], use_category = ["test_only"], - last_updated = "2020-06-09", + release_date = "2020-06-09", ), com_github_libevent_libevent = dict( project_name = "libevent", @@ -346,7 +346,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( strip_prefix = "libevent-{version}", urls = ["https://github.com/libevent/libevent/archive/{version}.tar.gz"], use_category = ["dataplane_core", "controlplane"], - last_updated = "2020-07-31", + release_date = "2020-07-28", cpe = "cpe:2.3:a:libevent_project:libevent:*", ), # This should be removed, see https://github.com/envoyproxy/envoy/issues/13261. @@ -362,7 +362,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( strip_prefix = "zlib-{version}", urls = ["https://github.com/madler/zlib/archive/{version}.tar.gz"], use_category = ["controlplane", "dataplane_core"], - last_updated = "2019-04-14", + release_date = "2019-04-14", cpe = "cpe:2.3:a:gnu:zlib:*", ), com_github_zlib_ng_zlib_ng = dict( @@ -374,7 +374,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( strip_prefix = "zlib-ng-{version}", urls = ["https://github.com/zlib-ng/zlib-ng/archive/{version}.tar.gz"], use_category = ["controlplane", "dataplane_core"], - last_updated = "2020-08-16", + release_date = "2020-08-16", cpe = "N/A", ), com_github_jbeder_yaml_cpp = dict( @@ -388,7 +388,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( # YAML is also used for runtime as well as controlplane. It shouldn't appear on the # dataplane but we can't verify this automatically due to code structure today. use_category = ["controlplane", "dataplane_core"], - last_updated = "2020-07-28", + release_date = "2020-07-27", cpe = "cpe:2.3:a:yaml-cpp_project:yaml-cpp:*", ), com_github_msgpack_msgpack_c = dict( @@ -401,7 +401,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( urls = ["https://github.com/msgpack/msgpack-c/releases/download/cpp-{version}/msgpack-{version}.tar.gz"], use_category = ["observability_ext"], extensions = ["envoy.tracers.datadog"], - last_updated = "2020-06-05", + release_date = "2020-06-05", cpe = "N/A", ), com_github_google_jwt_verify = dict( @@ -414,7 +414,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( urls = ["https://github.com/google/jwt_verify_lib/archive/{version}.tar.gz"], use_category = ["dataplane_ext"], extensions = ["envoy.filters.http.jwt_authn"], - last_updated = "2020-07-09", + release_date = "2020-07-10", cpe = "N/A", ), com_github_nodejs_http_parser = dict( @@ -428,7 +428,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( strip_prefix = "http-parser-{version}", urls = ["https://github.com/nodejs/http-parser/archive/{version}.tar.gz"], use_category = ["controlplane", "dataplane_core"], - last_updated = "2020-07-10", + release_date = "2020-07-10", cpe = "cpe:2.3:a:nodejs:node.js:*", ), com_github_tencent_rapidjson = dict( @@ -442,7 +442,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( # We're mostly using com_google_protobuf for JSON, but there are some extensions and hard to # disentangle uses on the dataplane, e.g. header_formatter, Squash filter. use_category = ["controlplane", "dataplane_core"], - last_updated = "2019-12-02", + release_date = "2019-12-03", cpe = "cpe:2.3:a:tencent:rapidjson:*", ), com_github_twitter_common_lang = dict( @@ -453,7 +453,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( sha256 = "56d1d266fd4767941d11c27061a57bc1266a3342e551bde3780f9e9eb5ad0ed1", strip_prefix = "twitter.common.lang-{version}/src", urls = ["https://files.pythonhosted.org/packages/08/bc/d6409a813a9dccd4920a6262eb6e5889e90381453a5f58938ba4cf1d9420/twitter.common.lang-{version}.tar.gz"], - last_updated = "2018-06-26", + release_date = "2016-10-17", use_category = ["test_only"], ), com_github_twitter_common_rpc = dict( @@ -464,7 +464,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( sha256 = "0792b63fb2fb32d970c2e9a409d3d00633190a22eb185145fe3d9067fdaa4514", strip_prefix = "twitter.common.rpc-{version}/src", urls = ["https://files.pythonhosted.org/packages/be/97/f5f701b703d0f25fbf148992cd58d55b4d08d3db785aad209255ee67e2d0/twitter.common.rpc-{version}.tar.gz"], - last_updated = "2018-06-26", + release_date = "2016-10-17", use_category = ["test_only"], ), com_github_twitter_common_finagle_thrift = dict( @@ -475,7 +475,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( sha256 = "1e3a57d11f94f58745e6b83348ecd4fa74194618704f45444a15bc391fde497a", strip_prefix = "twitter.common.finagle-thrift-{version}/src", urls = ["https://files.pythonhosted.org/packages/f9/e7/4f80d582578f8489226370762d2cf6bc9381175d1929eba1754e03f70708/twitter.common.finagle-thrift-{version}.tar.gz"], - last_updated = "2018-06-26", + release_date = "2016-10-17", use_category = ["test_only"], ), com_google_googletest = dict( @@ -488,7 +488,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( sha256 = "7897bfaa5ad39a479177cfb5c3ce010184dbaee22a7c3727b212282871918751", strip_prefix = "googletest-{version}", urls = ["https://github.com/google/googletest/archive/{version}.tar.gz"], - last_updated = "2020-09-10", + release_date = "2020-09-10", use_category = ["test_only"], ), com_google_protobuf = dict( @@ -500,7 +500,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( strip_prefix = "protobuf-{version}", urls = ["https://github.com/protocolbuffers/protobuf/releases/download/v{version}/protobuf-all-{version}.tar.gz"], use_category = ["dataplane_core", "controlplane"], - last_updated = "2020-10-24", + release_date = "2019-10-24", cpe = "cpe:2.3:a:google:protobuf:*", ), grpc_httpjson_transcoding = dict( @@ -513,7 +513,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( urls = ["https://github.com/grpc-ecosystem/grpc-httpjson-transcoding/archive/{version}.tar.gz"], use_category = ["dataplane_ext"], extensions = ["envoy.filters.http.grpc_json_transcoder"], - last_updated = "2020-03-02", + release_date = "2020-03-02", cpe = "N/A", ), io_bazel_rules_go = dict( @@ -524,7 +524,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( sha256 = "0310e837aed522875791750de44408ec91046c630374990edd51827cb169f616", urls = ["https://github.com/bazelbuild/rules_go/releases/download/v{version}/rules_go-v{version}.tar.gz"], use_category = ["build", "api"], - last_updated = "2020-08-06", + release_date = "2020-08-06", implied_untracked_deps = [ "com_github_golang_protobuf", "io_bazel_rules_nogo", @@ -541,7 +541,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( sha256 = "9d48151ea71b3e225adfb6867e6d2c7d0dce46cbdc8710d9a9a628574dfd40a0", strip_prefix = "rules_cc-{version}", urls = ["https://github.com/bazelbuild/rules_cc/archive/{version}.tar.gz"], - last_updated = "2020-05-13", + release_date = "2020-05-13", use_category = ["build"], ), rules_foreign_cc = dict( @@ -552,7 +552,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( sha256 = "2b1cf88de0b6e0195f6571cfde3a5bd406d11b42117d6adef2395c9525a1902e", strip_prefix = "rules_foreign_cc-{version}", urls = ["https://github.com/bazelbuild/rules_foreign_cc/archive/{version}.tar.gz"], - last_updated = "2020-08-21", + release_date = "2020-08-21", use_category = ["build"], ), rules_python = dict( @@ -564,7 +564,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( sha256 = "76a8fd4e7eca2a3590f816958faa0d83c9b2ce9c32634c5c375bcccf161d3bb5", strip_prefix = "rules_python-{version}", urls = ["https://github.com/bazelbuild/rules_python/archive/{version}.tar.gz"], - last_updated = "2020-04-09", + release_date = "2020-04-09", use_category = ["build"], ), six = dict( @@ -574,7 +574,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( version = "1.12.0", sha256 = "d16a0141ec1a18405cd4ce8b4613101da75da0e9a7aec5bdd4fa804d0e0eba73", urls = ["https://files.pythonhosted.org/packages/dd/bf/4138e7bfb757de47d1f4b6994648ec67a51efe58fa907c1e11e350cddfca/six-{version}.tar.gz"], - last_updated = "2019-11-17", + release_date = "2018-12-09", use_category = ["other"], ), org_llvm_llvm = dict( @@ -585,7 +585,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( sha256 = "df83a44b3a9a71029049ec101fb0077ecbbdf5fe41e395215025779099a98fdf", strip_prefix = "llvm-{version}.src", urls = ["https://github.com/llvm/llvm-project/releases/download/llvmorg-{version}/llvm-{version}.src.tar.xz"], - last_updated = "2020-10-09", + release_date = "2020-03-23", use_category = ["dataplane_ext"], extensions = [ "envoy.access_loggers.wasm", @@ -604,7 +604,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( sha256 = "cc3fcaf05d57010c9cf8eb920234679dede6c780137b55001fd34e4d14806f7c", strip_prefix = "WAVM-{version}", urls = ["https://github.com/WAVM/WAVM/archive/{version}.tar.gz"], - last_updated = "2020-10-09", + release_date = "2020-07-07", use_category = ["dataplane_ext"], extensions = [ "envoy.access_loggers.wasm", @@ -625,7 +625,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( urls = ["https://github.com/census-instrumentation/opencensus-cpp/archive/{version}.tar.gz"], use_category = ["observability_ext"], extensions = ["envoy.tracers.opencensus"], - last_updated = "2020-10-13", + release_date = "2020-10-08", cpe = "N/A", ), # This should be removed, see https://github.com/envoyproxy/envoy/issues/11816. @@ -644,7 +644,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( "envoy.grpc_credentials.aws_iam", "envoy.tracers.opencensus", ], - last_updated = "2020-08-19", + release_date = "2020-08-19", cpe = "cpe:2.3:a:haxx:curl:*", ), com_googlesource_chromium_v8 = dict( @@ -664,7 +664,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( "envoy.filters.network.wasm", "envoy.stat_sinks.wasm", ], - last_updated = "2020-08-31", + release_date = "2020-08-17", cpe = "cpe:2.3:a:google:v8:*", ), com_googlesource_quiche = dict( @@ -677,7 +677,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( urls = ["https://storage.googleapis.com/quiche-envoy-integration/{version}.tar.gz"], use_category = ["dataplane_ext"], extensions = ["envoy.transport_sockets.quic"], - last_updated = "2020-09-18", + release_date = "2020-09-17", cpe = "N/A", ), com_googlesource_googleurl = dict( @@ -690,7 +690,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( urls = ["https://storage.googleapis.com/quiche-envoy-integration/googleurl_{version}.tar.gz"], use_category = ["dataplane_ext"], extensions = [], - last_updated = "2020-08-05", + release_date = "2020-07-30", cpe = "N/A", ), com_google_cel_cpp = dict( @@ -711,7 +711,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( "envoy.filters.network.wasm", "envoy.stat_sinks.wasm", ], - last_updated = "2020-07-14", + release_date = "2020-07-14", cpe = "N/A", ), com_github_google_flatbuffers = dict( @@ -730,7 +730,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( "envoy.filters.network.wasm", "envoy.stat_sinks.wasm", ], - last_updated = "2020-07-29", + release_date = "2020-04-02", cpe = "N/A", ), com_googlesource_code_re2 = dict( @@ -742,7 +742,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( strip_prefix = "re2-{version}", urls = ["https://github.com/google/re2/archive/{version}.tar.gz"], use_category = ["controlplane", "dataplane_core"], - last_updated = "2020-07-06", + release_date = "2020-07-06", cpe = "N/A", ), # Included to access FuzzedDataProvider.h. This is compiler agnostic but @@ -757,7 +757,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( # Only allow peeking at fuzzer related files for now. strip_prefix = "compiler-rt-{version}.src", urls = ["https://github.com/llvm/llvm-project/releases/download/llvmorg-{version}/compiler-rt-{version}.src.tar.xz"], - last_updated = "2020-03-24", + release_date = "2020-03-23", use_category = ["test_only"], ), upb = dict( @@ -769,7 +769,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( strip_prefix = "upb-{version}", urls = ["https://github.com/protocolbuffers/upb/archive/{version}.tar.gz"], use_category = ["controlplane"], - last_updated = "2019-11-19", + release_date = "2019-11-19", cpe = "N/A", ), kafka_source = dict( @@ -782,7 +782,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( urls = ["https://github.com/apache/kafka/archive/{version}.zip"], use_category = ["dataplane_ext"], extensions = ["envoy.filters.network.kafka_broker"], - last_updated = "2020-08-26", + release_date = "2020-03-03", cpe = "cpe:2.3:a:apache:kafka:*", ), kafka_server_binary = dict( @@ -793,7 +793,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( sha256 = "2177cbd14118999e1d76fec628ca78ace7e6f841219dbc6035027c796bbe1a2a", strip_prefix = "kafka_2.12-{version}", urls = ["https://mirrors.gigenet.com/apache/kafka/{version}/kafka_2.12-{version}.tgz"], - last_updated = "2020-08-26", + release_date = "2020-03-12", use_category = ["test_only"], ), kafka_python_client = dict( @@ -804,7 +804,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( sha256 = "05f7c6eecb402f11fcb7e524c903f1ba1c38d3bdc9bf42bc8ec3cf7567b9f979", strip_prefix = "kafka-python-{version}", urls = ["https://github.com/dpkp/kafka-python/archive/{version}.tar.gz"], - last_updated = "2020-08-26", + release_date = "2020-02-20", use_category = ["test_only"], ), proxy_wasm_cpp_sdk = dict( @@ -823,7 +823,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( "envoy.filters.network.wasm", "envoy.stat_sinks.wasm", ], - last_updated = "2020-10-09", + release_date = "2020-09-11", cpe = "N/A", ), proxy_wasm_cpp_host = dict( @@ -842,7 +842,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( "envoy.filters.network.wasm", "envoy.stat_sinks.wasm", ], - last_updated = "2020-10-16", + release_date = "2020-10-16", cpe = "N/A", ), # TODO: upgrade to the latest version (1.41 currently fails tests) @@ -855,7 +855,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( strip_prefix = "emsdk-{version}", urls = ["https://github.com/emscripten-core/emsdk/archive/{version}.tar.gz"], use_category = ["build"], - last_updated = "2020-10-09", + release_date = "2020-01-15", ), io_bazel_rules_rust = dict( project_name = "Bazel rust rules", @@ -868,7 +868,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( strip_prefix = "rules_rust-{version}", urls = ["https://github.com/bazelbuild/rules_rust/archive/{version}.tar.gz"], use_category = ["test_only"], - last_updated = "2020-10-15", + release_date = "2020-10-15", ), rules_antlr = dict( project_name = "ANTLR Rules for Bazel", @@ -887,7 +887,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( "envoy.filters.network.wasm", "envoy.stat_sinks.wasm", ], - last_updated = "2020-07-29", + release_date = "2019-06-21", cpe = "N/A", ), antlr4_runtimes = dict( @@ -906,7 +906,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( "envoy.filters.network.wasm", "envoy.stat_sinks.wasm", ], - last_updated = "2020-10-09", + release_date = "2018-12-18", cpe = "N/A", ), ) diff --git a/bazel/wasm/wasm.bzl b/bazel/wasm/wasm.bzl index 5a20b46837a1..a3d89067e496 100644 --- a/bazel/wasm/wasm.bzl +++ b/bazel/wasm/wasm.bzl @@ -88,9 +88,9 @@ def wasm_cc_binary(name, tags = [], repository = "", **kwargs): kwargs.setdefault("additional_linker_inputs", ["@proxy_wasm_cpp_sdk//:jslib", "@envoy//source/extensions/common/wasm/ext:jslib"]) if repository == "@envoy": - envoy_js = "--js-library source/extensions/common/wasm/ext/envoy_wasm_intrinsics.js" - else: envoy_js = "--js-library external/envoy/source/extensions/common/wasm/ext/envoy_wasm_intrinsics.js" + else: + envoy_js = "--js-library source/extensions/common/wasm/ext/envoy_wasm_intrinsics.js" kwargs.setdefault("linkopts", [ envoy_js, "--js-library external/proxy_wasm_cpp_sdk/proxy_wasm_intrinsics.js", @@ -112,7 +112,7 @@ def wasm_cc_binary(name, tags = [], repository = "", **kwargs): ) def envoy_wasm_cc_binary(name, tags = [], **kwargs): - wasm_cc_binary(name, tags, repository = "@envoy", **kwargs) + wasm_cc_binary(name, tags, repository = "", **kwargs) def wasm_rust_binary(name, tags = [], **kwargs): wasm_name = "_wasm_" + name.replace(".", "_") diff --git a/ci/README.md b/ci/README.md index ccef23fb5bf2..028e31263b30 100644 --- a/ci/README.md +++ b/ci/README.md @@ -5,7 +5,7 @@ and an image based on Windows2019. ## Ubuntu Envoy image -The Ubuntu based Envoy Docker image at [`envoyproxy/envoy-build:`](https://hub.docker.com/r/envoyproxy/envoy-build/) is used for CircleCI checks, +The Ubuntu based Envoy Docker image at [`envoyproxy/envoy-build:`](https://hub.docker.com/r/envoyproxy/envoy-build/) is used for CI checks, where `` is specified in [`envoy_build_sha.sh`](https://github.com/envoyproxy/envoy/blob/master/ci/envoy_build_sha.sh). Developers may work with the latest build image SHA in [envoy-build-tools](https://github.com/envoyproxy/envoy-build-tools/blob/master/toolchains/rbe_toolchains_config.bzl#L8) repo to provide a self-contained environment for building Envoy binaries and running tests that reflects the latest built Ubuntu Envoy image. @@ -189,10 +189,10 @@ This build the Ubuntu based `envoyproxy/envoy-build-ubuntu` image, and the final # macOS Build Flow -The macOS CI build is part of the [CircleCI](https://circleci.com/gh/envoyproxy/envoy) workflow. +The macOS CI build is part of the [Azure Pipelines](https://dev.azure.com/cncf/envoy/_build) workflow. Dependencies are installed by the `ci/mac_ci_setup.sh` script, via [Homebrew](https://brew.sh), -which is pre-installed on the CircleCI macOS image. The dependencies are cached are re-installed -on every build. The `ci/mac_ci_steps.sh` script executes the specific commands that +which is pre-installed on the [Azure Pipelines macOS image](https://github.com/actions/virtual-environments/blob/main/images/macos/macos-10.15-Readme.md). +The dependencies are cached and re-installed on every build. The `ci/mac_ci_steps.sh` script executes the specific commands that build and test Envoy. Note that the full version of Xcode (not just Command Line Tools) is required. # Coverity Scan Build Flow diff --git a/ci/check_repository_locations.sh b/ci/check_repository_locations.sh new file mode 100755 index 000000000000..bd799f1b05d9 --- /dev/null +++ b/ci/check_repository_locations.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +set -e + +function no_change { + echo "No change to **/repository_locations.bzl" + exit 0 +} + +(./tools/git/modified_since_last_github_commit.sh . bzl | grep repository_locations) || no_change + +./tools/dependency/release_dates.sh ./bazel/repository_locations.bzl +./tools/dependency/release_dates.sh ./api/bazel/repository_locations.bzl diff --git a/ci/do_ci.sh b/ci/do_ci.sh index 967e34558512..0a1faa8b8b24 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -411,14 +411,18 @@ elif [[ "$CI_TARGET" == "fix_spelling_pedantic" ]]; then exit 0 elif [[ "$CI_TARGET" == "docs" ]]; then echo "generating docs..." - # Validate dependency relationships between core/extensions and external deps. - tools/dependency/validate_test.py - tools/dependency/validate.py - # Validate the CVE scanner works. TODO(htuch): create a dedicated tools CI target. - python3.8 tools/dependency/cve_scan_test.py # Build docs. BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS[*]}" docs/build.sh exit 0 +elif [[ "$CI_TARGET" == "deps" ]]; then + echo "verifying dependencies..." + # Validate dependency relationships between core/extensions and external deps. + ./tools/dependency/validate_test.py + ./tools/dependency/validate.py + # Validate the CVE scanner works. + python3.8 tools/dependency/cve_scan_test.py + ./ci/check_repository_locations.sh + exit 0 elif [[ "$CI_TARGET" == "verify_examples" ]]; then echo "verify examples..." docker load < "$ENVOY_DOCKER_BUILD_DIR/docker/envoy-docker-images.tar.xz" @@ -438,6 +442,7 @@ elif [[ "$CI_TARGET" == "verify_examples" ]]; then sudo apt-get install -y -qq --no-install-recommends redis-tools export DOCKER_NO_PULL=1 umask 027 + chmod -R o-rwx examples/ ci/verify_examples.sh exit 0 else diff --git a/ci/do_circle_ci.sh b/ci/do_circle_ci.sh deleted file mode 100755 index 3602f6a00239..000000000000 --- a/ci/do_circle_ci.sh +++ /dev/null @@ -1,51 +0,0 @@ -#!/bin/bash - -set -e - -# Workaround for argument too long issue in protoc -ulimit -s 16384 - -# bazel uses jgit internally and the default circle-ci .gitconfig says to -# convert https://github.com to ssh://git@github.com, which jgit does not support. -if [[ -e "${HOME}/.gitconfig" ]]; then - mv ~/.gitconfig ~/.gitconfig_save -fi - -# Workaround for not using ci/run_envoy_docker.sh -# Create a fake home. Python site libs tries to do getpwuid(3) if we don't and the CI -# Docker image gets confused as it has no passwd entry when running non-root -# unless we do this. -FAKE_HOME=/tmp/fake_home -mkdir -p "${FAKE_HOME}" -export HOME="${FAKE_HOME}" -export PYTHONUSERBASE="${FAKE_HOME}" -export USER=bazel - -ENVOY_SRCDIR="$(pwd)" -export ENVOY_SRCDIR - -# xlarge resource_class. -# See note: https://circleci.com/docs/2.0/configuration-reference/#resource_class for why we -# hard code this (basically due to how docker works). -export NUM_CPUS=6 - -# CircleCI doesn't support IPv6 by default, so we run all tests with IPv4 only. -# IPv6 tests are run with Azure Pipelines. -export BAZEL_BUILD_EXTRA_OPTIONS+=" \ - --test_env=ENVOY_IP_TEST_VERSIONS=v4only \ - --local_cpu_resources=${NUM_CPUS} \ - --action_env=HOME \ - --action_env=PYTHONUSERBASE \ - --test_env=HOME \ - --test_env=PYTHONUSERBASE" - -function finish { - echo "disk space at end of build:" - df -h -} -trap finish EXIT - -echo "disk space at beginning of build:" -df -h - -ci/do_ci.sh "$@" diff --git a/ci/mac_ci_setup.sh b/ci/mac_ci_setup.sh index 755852b4ffa0..ef29e6c92587 100755 --- a/ci/mac_ci_setup.sh +++ b/ci/mac_ci_setup.sh @@ -41,8 +41,8 @@ function retry () { } if ! retry brew update; then - echo "Failed to update homebrew" - exit 1 + # Do not exit early if update fails. + echo "Failed to update homebrew" fi DEPS="automake cmake coreutils go libtool wget ninja" @@ -51,12 +51,6 @@ do is_installed "${DEP}" || install "${DEP}" done -if [ -n "$CIRCLECI" ]; then - # bazel uses jgit internally and the default circle-ci .gitconfig says to - # convert https://github.com to ssh://git@github.com, which jgit does not support. - mv ~/.gitconfig ~/.gitconfig_save -fi - # Required as bazel and a foreign bazelisk are installed in the latest macos vm image, we have # to unlink/overwrite them to install bazelisk echo "Installing bazelisk" diff --git a/ci/repokitteh/modules/azure_pipelines.star b/ci/repokitteh/modules/azure_pipelines.star index 7d80c149b5cd..655ba0e50863 100644 --- a/ci/repokitteh/modules/azure_pipelines.star +++ b/ci/repokitteh/modules/azure_pipelines.star @@ -25,7 +25,7 @@ def _get_azp_checks(): return checks def _retry(config, comment_id, command): - msgs = "Retrying Azure Pipelines, to retry CircleCI checks, use `/retest-circle`.\n" + msgs = "Retrying Azure Pipelines.\n" checks = _get_azp_checks() retried_checks = [] diff --git a/ci/run_envoy_docker.sh b/ci/run_envoy_docker.sh index 842b51b6ce89..e0f204e67fcf 100755 --- a/ci/run_envoy_docker.sh +++ b/ci/run_envoy_docker.sh @@ -86,6 +86,7 @@ docker run --rm \ -e SYSTEM_PULLREQUEST_TARGETBRANCH \ -e SYSTEM_PULLREQUEST_PULLREQUESTNUMBER \ -e GCS_ARTIFACT_BUCKET \ + -e GITHUB_TOKEN \ -e BUILD_SOURCEBRANCHNAME \ -e BAZELISK_BASE_URL \ -e ENVOY_BUILD_ARCH \ diff --git a/docs/README.md b/docs/README.md index 216e7bafcbad..5cd5444d670b 100644 --- a/docs/README.md +++ b/docs/README.md @@ -34,20 +34,17 @@ To use this method you will need a minimum of 4-5GB of disk space available to a # Creating a Pull Request with documentation changes -When you create a Pull Request the documentation is rendered by CircleCI. - -If you are logged in to CircleCI (it is possible to authenticate using your Github account), you can view -the rendered changes. +When you create a Pull Request the documentation is rendered by Azure Pipelines. To do this: -- click `Details` in the `ci/circleci: docs` check at the bottom of the Pull Request. -- click `ARTIFACTS` in the CircleCI dashboard -- browse to the documentation root at `generated/docs/index.html`. +1. Open docs job in Azure Pipelines. +2. Navigate to "Upload Docs to GCS" log. +3. Click on the link there. # How the Envoy website and docs are updated 1. The docs are published to [docs/envoy/latest](https://github.com/envoyproxy/envoyproxy.github.io/tree/master/docs/envoy/latest) - on every commit to master. This process is handled by CircleCI with the + on every commit to master. This process is handled by Azure Pipelines with the [`publish.sh`](https://github.com/envoyproxy/envoy/blob/master/docs/publish.sh) script. 2. The docs are published to [docs/envoy](https://github.com/envoyproxy/envoyproxy.github.io/tree/master/docs/envoy) diff --git a/docs/generate_external_dep_rst.py b/docs/generate_external_dep_rst.py index 07488d20831e..4c4008e64dd0 100755 --- a/docs/generate_external_dep_rst.py +++ b/docs/generate_external_dep_rst.py @@ -53,43 +53,23 @@ def RenderTitle(title): # SHA. Otherwise, return the tarball download. def GetVersionUrl(metadata): # Figure out if it's a GitHub repo. - github_repo = None - github_version = None - for url in metadata['urls']: - if url.startswith('https://github.com/'): - components = url.split('/') - github_repo = f'https://github.com/{components[3]}/{components[4]}' - if components[5] == 'archive': - # Only support .tar.gz, .zip today. Figure out the release tag from this - # filename. - if components[6].endswith('.tar.gz'): - github_version = components[6][:-len('.tar.gz')] - else: - assert (components[6].endswith('.zip')) - github_version = components[6][:-len('.zip')] - else: - # Release tag is a path component. - assert (components[5] == 'releases') - github_version = components[7] - break + github_release = dep_utils.GetGitHubReleaseFromUrls(metadata['urls']) # If not, direct download link for tarball - download_url = metadata['urls'][0] - if not github_repo: - return download_url - # If it's not a GH hash, it's a tagged release. - tagged_release = len(metadata['version']) != 40 - if tagged_release: + if not github_release: + return metadata['urls'][0] + github_repo = f'https://github.com/{github_release.organization}/{github_release.project}' + if github_release.tagged: # The GitHub version should look like the metadata version, but might have # something like a "v" prefix. - return f'{github_repo}/releases/tag/{github_version}' - assert (metadata['version'] == github_version) - return f'{github_repo}/tree/{github_version}' + return f'{github_repo}/releases/tag/{github_release.version}' + assert (metadata['version'] == github_release.version) + return f'{github_repo}/tree/{github_release.version}' if __name__ == '__main__': security_rst_root = sys.argv[1] - Dep = namedtuple('Dep', ['name', 'sort_name', 'version', 'cpe', 'last_updated']) + Dep = namedtuple('Dep', ['name', 'sort_name', 'version', 'cpe', 'release_date']) use_categories = defaultdict(lambda: defaultdict(list)) # Bin rendered dependencies into per-use category lists. for k, v in dep_utils.RepositoryLocations().items(): @@ -102,14 +82,14 @@ def GetVersionUrl(metadata): project_url = v['project_url'] name = RstLink(project_name, project_url) version = RstLink(RenderVersion(v['version']), GetVersionUrl(v)) - last_updated = v['last_updated'] - dep = Dep(name, project_name.lower(), version, cpe, last_updated) + release_date = v['release_date'] + dep = Dep(name, project_name.lower(), version, cpe, release_date) for category in v['use_category']: for ext in v.get('extensions', ['core']): use_categories[category][ext].append(dep) def CsvRow(dep): - return [dep.name, dep.version, dep.last_updated, dep.cpe] + return [dep.name, dep.version, dep.release_date, dep.cpe] # Generate per-use category RST with CSV tables. for category, exts in use_categories.items(): @@ -118,6 +98,6 @@ def CsvRow(dep): if ext_name != 'core': content += RenderTitle(ext_name) output_path = pathlib.Path(security_rst_root, f'external_dep_{category}.rst') - content += CsvTable(['Name', 'Version', 'Last updated', 'CPE'], [2, 1, 1, 2], + content += CsvTable(['Name', 'Version', 'Release date', 'CPE'], [2, 1, 1, 2], [CsvRow(dep) for dep in sorted(deps, key=lambda d: d.sort_name)]) output_path.write_text(content) diff --git a/docs/publish.sh b/docs/publish.sh index c56e9bb31066..11b75f1b77c9 100755 --- a/docs/publish.sh +++ b/docs/publish.sh @@ -1,6 +1,6 @@ #!/bin/bash -# This is run on every commit that CircleCI picks up. It assumes that docs have already been built +# This is run on every commit that Azure Pipelines picks up. It assumes that docs have already been built # via docs/build.sh. The push behavior differs depending on the nature of the commit: # * Tag commit (e.g. v1.6.0): pushes docs to versioned location, e.g. # https://www.envoyproxy.io/docs/envoy/v1.6.0/. diff --git a/docs/root/api-v3/config/watchdog/watchdog.rst b/docs/root/api-v3/config/watchdog/watchdog.rst index f3fd56c327ec..f5906b3390d3 100644 --- a/docs/root/api-v3/config/watchdog/watchdog.rst +++ b/docs/root/api-v3/config/watchdog/watchdog.rst @@ -6,4 +6,4 @@ Watchdog :maxdepth: 2 ../../extensions/watchdog/profile_action/v3alpha/* - ../../extensions/watchdog/abort_action/v3alpha/* + ../../watchdog/v3alpha/* diff --git a/docs/root/configuration/http/http_filters/compressor_filter.rst b/docs/root/configuration/http/http_filters/compressor_filter.rst index 862af5304065..2fa10f00d6bf 100644 --- a/docs/root/configuration/http/http_filters/compressor_filter.rst +++ b/docs/root/configuration/http/http_filters/compressor_filter.rst @@ -77,8 +77,8 @@ the extension. When compression is *applied*: - The *content-length* is removed from response headers. -- Response headers contain "*transfer-encoding: chunked*" and do not contain - "*content-encoding*" header. +- Response headers contain "*transfer-encoding: chunked*", and + "*content-encoding*" with the compression scheme used (e.g., ``gzip``). - The "*vary: accept-encoding*" header is inserted on every response. Also the "*vary: accept-encoding*" header may be inserted even if compression is *not* diff --git a/docs/root/configuration/http/http_filters/lua_filter.rst b/docs/root/configuration/http/http_filters/lua_filter.rst index ebd60a7c0ded..3f067fdb7ea0 100644 --- a/docs/root/configuration/http/http_filters/lua_filter.rst +++ b/docs/root/configuration/http/http_filters/lua_filter.rst @@ -643,6 +643,26 @@ protocol() Returns the string representation of :repo:`HTTP protocol ` used by the current request. The possible values are: *HTTP/1.0*, *HTTP/1.1*, and *HTTP/2*. +downstreamLocalAddress() +^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: lua + + streamInfo:downstreamLocalAddress() + +Returns the string representation of :repo:`downstream remote address ` +used by the current request. + +downstreamDirectRemoteAddress() +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: lua + + streamInfo:downstreamDirectRemoteAddress() + +Returns the string representation of :repo:`downstream directly connected address ` +used by the current request. This is equivalent to the address of the physical connection. + dynamicMetadata() ^^^^^^^^^^^^^^^^^ diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index f3f42050f399..dd0f6af6bbbf 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -11,6 +11,7 @@ Minor Behavior Changes * build: the Alpine based debug images are no longer built in CI, use Ubuntu based images instead. * ext_authz filter: the deprecated field :ref:`use_alpha ` is no longer supported and cannot be set anymore. +* watchdog: the watchdog action :ref:`abort_action ` is now the default action to terminate the process if watchdog kill / multikill is enabled. Bug Fixes --------- @@ -35,6 +36,7 @@ New Features * hds: added support for delta updates in the :ref:`HealthCheckSpecifier `, making only the Endpoints and Health Checkers that changed be reconstructed on receiving a new message, rather than the entire HDS. * health_check: added option to use :ref:`no_traffic_healthy_interval ` which allows a different no traffic interval when the host is healthy. * listener: added an optional :ref:`default filter chain `. If this field is supplied, and none of the :ref:`filter_chains ` matches, this default filter chain is used to serve the connection. +* lua: added `downstreamDirectRemoteAddress()` and `downstreamLocalAddress()` APIs to :ref:`streamInfo() `. * mongo_proxy: the list of commands to produce metrics for is now :ref:`configurable `. * ratelimit: added support for use of various :ref:`metadata ` as a ratelimit action. * ratelimit: added :ref:`disable_x_envoy_ratelimited_header ` option to disable `X-Envoy-RateLimited` header. diff --git a/docs/root/version_history/v1.16.0.rst b/docs/root/version_history/v1.16.0.rst index c4dc4ee126c4..259ed30d223c 100644 --- a/docs/root/version_history/v1.16.0.rst +++ b/docs/root/version_history/v1.16.0.rst @@ -159,7 +159,7 @@ New Features * watchdog: support randomizing the watchdog's kill timeout to prevent synchronized kills via a maximium jitter parameter :ref:`max_kill_timeout_jitter`. * watchdog: supports an extension point where actions can be registered to fire on watchdog events such as miss, megamiss, kill and multikill. See :ref:`watchdog actions`. * watchdog: watchdog action extension that does cpu profiling. See :ref:`Profile Action `. -* watchdog: watchdog action extension that sends SIGABRT to the stuck thread to terminate the process. See :ref:`Abort Action `. +* watchdog: watchdog action extension that sends SIGABRT to the stuck thread to terminate the process. See :ref:`Abort Action `. * xds: added :ref:`extension config discovery` support for HTTP filters. * xds: added support for mixed v2/v3 discovery response, which enable type url downgrade and upgrade. This feature is disabled by default and is controlled by runtime guard `envoy.reloadable_features.enable_type_url_downgrade_and_upgrade`. * zlib: added option to use `zlib-ng `_ as zlib library. diff --git a/examples/cache/docker-compose.yaml b/examples/cache/docker-compose.yaml index 0d4614f0a555..3f067efb0c06 100644 --- a/examples/cache/docker-compose.yaml +++ b/examples/cache/docker-compose.yaml @@ -13,8 +13,6 @@ services: ports: - "8000:8000" - "8001:8001" - environment: - - ENVOY_UID=0 service1: build: diff --git a/examples/wasm/BUILD b/examples/wasm/BUILD index 81f139c4b3e5..09cdc34d9ac4 100644 --- a/examples/wasm/BUILD +++ b/examples/wasm/BUILD @@ -2,13 +2,13 @@ load( "//bazel:envoy_build_system.bzl", "envoy_package", ) -load("//bazel/wasm:wasm.bzl", "wasm_cc_binary") +load("//bazel/wasm:wasm.bzl", "envoy_wasm_cc_binary") licenses(["notice"]) # Apache 2 envoy_package() -wasm_cc_binary( +envoy_wasm_cc_binary( name = "envoy_filter_http_wasm_example.wasm", srcs = ["envoy_filter_http_wasm_example.cc"], deps = [ diff --git a/generated_api_shadow/bazel/external_deps.bzl b/generated_api_shadow/bazel/external_deps.bzl index 588879c4bd0a..e8283e4fee10 100644 --- a/generated_api_shadow/bazel/external_deps.bzl +++ b/generated_api_shadow/bazel/external_deps.bzl @@ -17,14 +17,18 @@ DEPENDENCY_ANNOTATIONS = [ # Envoy (see the external dependency at the given version for information). "implied_untracked_deps", - # When the dependency was last updated in Envoy. - "last_updated", - # Project metadata. "project_desc", "project_name", "project_url", + # Reflects the UTC date (YYYY-MM-DD format) for the dependency release. This + # is when the dependency was updated in its repository. For dependencies + # that have releases, this is the date of the release. For dependencies + # without releases or for scenarios where we temporarily need to use a + # commit, this date should be the date of the commit in UTC. + "release_date", + # List of the categories describing how the dependency is being used. This attribute is used # for automatic tracking of security posture of Envoy's dependencies. # Possible values are documented in the USE_CATEGORIES list below. @@ -63,8 +67,7 @@ USE_CATEGORIES = [ "devtools", ] -# Components with these use categories are not required to specify the 'cpe' -# and 'last_updated' annotation. +# Components with these use categories are not required to specify the 'cpe'. USE_CATEGORIES_WITH_CPE_OPTIONAL = ["build", "other", "test_only", "api"] def _fail_missing_attribute(attr, key): @@ -106,13 +109,13 @@ def load_repository_locations(repository_locations_spec): if "extensions" not in location: _fail_missing_attribute("extensions", key) - if "last_updated" not in location: - _fail_missing_attribute("last_updated", key) - last_updated = location["last_updated"] + if "release_date" not in location: + _fail_missing_attribute("release_date", key) + release_date = location["release_date"] # Starlark doesn't have regexes. - if len(last_updated) != 10 or last_updated[4] != "-" or last_updated[7] != "-": - fail("last_updated must match YYYY-DD-MM: " + last_updated) + if len(release_date) != 10 or release_date[4] != "-" or release_date[7] != "-": + fail("release_date must match YYYY-DD-MM: " + release_date) if "cpe" in location: cpe = location["cpe"] diff --git a/generated_api_shadow/bazel/repository_locations.bzl b/generated_api_shadow/bazel/repository_locations.bzl index bdcf31e867d2..e46f7d77f8e5 100644 --- a/generated_api_shadow/bazel/repository_locations.bzl +++ b/generated_api_shadow/bazel/repository_locations.bzl @@ -7,7 +7,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( version = "1.0.3", sha256 = "1c531376ac7e5a180e0237938a2536de0c54d93f5c278634818e0efc952dd56c", urls = ["https://github.com/bazelbuild/bazel-skylib/releases/download/{version}/bazel-skylib-{version}.tar.gz"], - last_updated = "2020-08-27", + release_date = "2020-08-27", use_category = ["api"], ), com_envoyproxy_protoc_gen_validate = dict( @@ -18,7 +18,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( sha256 = "e368733c9fb7f8489591ffaf269170d7658cc0cd1ee322b601512b769446d3c8", strip_prefix = "protoc-gen-validate-{version}", urls = ["https://github.com/envoyproxy/protoc-gen-validate/archive/{version}.tar.gz"], - last_updated = "2020-06-09", + release_date = "2020-06-08", use_category = ["api"], ), com_github_cncf_udpa = dict( @@ -29,7 +29,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( sha256 = "83a7dcc316d741031f34c0409021432b74a39c4811845a177133f02f948fe2d8", strip_prefix = "udpa-{version}", urls = ["https://github.com/cncf/udpa/archive/v{version}.tar.gz"], - last_updated = "2020-09-23", + release_date = "2020-06-29", use_category = ["api"], ), com_github_openzipkin_zipkinapi = dict( @@ -40,7 +40,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( sha256 = "688c4fe170821dd589f36ec45aaadc03a618a40283bc1f97da8fa11686fc816b", strip_prefix = "zipkin-api-{version}", urls = ["https://github.com/openzipkin/zipkin-api/archive/{version}.tar.gz"], - last_updated = "2020-09-23", + release_date = "2019-08-23", use_category = ["api"], ), com_google_googleapis = dict( @@ -52,7 +52,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( sha256 = "a45019af4d3290f02eaeb1ce10990166978c807cb33a9692141a076ba46d1405", strip_prefix = "googleapis-{version}", urls = ["https://github.com/googleapis/googleapis/archive/{version}.tar.gz"], - last_updated = "2019-12-02", + release_date = "2019-12-02", use_category = ["api"], ), opencensus_proto = dict( @@ -63,7 +63,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( sha256 = "b7e13f0b4259e80c3070b583c2f39e53153085a6918718b1c710caf7037572b0", strip_prefix = "opencensus-proto-{version}/src", urls = ["https://github.com/census-instrumentation/opencensus-proto/archive/v{version}.tar.gz"], - last_updated = "2020-06-20", + release_date = "2020-07-21", use_category = ["api"], ), prometheus_metrics_model = dict( @@ -74,7 +74,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( sha256 = "6748b42f6879ad4d045c71019d2512c94be3dd86f60965e9e31e44a3f464323e", strip_prefix = "client_model-{version}", urls = ["https://github.com/prometheus/client_model/archive/{version}.tar.gz"], - last_updated = "2020-06-23", + release_date = "2020-06-23", use_category = ["api"], ), rules_proto = dict( @@ -85,7 +85,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( sha256 = "aa1ee19226f707d44bee44c720915199c20c84a23318bb0597ed4e5c873ccbd5", strip_prefix = "rules_proto-{version}", urls = ["https://github.com/bazelbuild/rules_proto/archive/{version}.tar.gz"], - last_updated = "2020-08-17", + release_date = "2020-08-17", use_category = ["api"], ), ) diff --git a/generated_api_shadow/envoy/config/common/tap/v2alpha/BUILD b/generated_api_shadow/envoy/config/common/tap/v2alpha/BUILD index 0f944d868c1a..3aed5a34a400 100644 --- a/generated_api_shadow/envoy/config/common/tap/v2alpha/BUILD +++ b/generated_api_shadow/envoy/config/common/tap/v2alpha/BUILD @@ -6,7 +6,6 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ - "//envoy/api/v2/core:pkg", "//envoy/service/tap/v2alpha:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], diff --git a/generated_api_shadow/envoy/config/common/tap/v2alpha/common.proto b/generated_api_shadow/envoy/config/common/tap/v2alpha/common.proto index 262557b35623..6db1ecceddc4 100644 --- a/generated_api_shadow/envoy/config/common/tap/v2alpha/common.proto +++ b/generated_api_shadow/envoy/config/common/tap/v2alpha/common.proto @@ -2,7 +2,6 @@ syntax = "proto3"; package envoy.config.common.tap.v2alpha; -import "envoy/api/v2/core/config_source.proto"; import "envoy/service/tap/v2alpha/common.proto"; import "udpa/annotations/migrate.proto"; @@ -19,15 +18,6 @@ option (udpa.annotations.file_status).package_version_status = FROZEN; // Common configuration for all tap extensions. message CommonExtensionConfig { - // [#not-implemented-hide:] - message TapDSConfig { - // Configuration for the source of TapDS updates for this Cluster. - api.v2.core.ConfigSource config_source = 1 [(validate.rules).message = {required: true}]; - - // Tap config to request from XDS server. - string name = 2 [(validate.rules).string = {min_bytes: 1}]; - } - oneof config_type { option (validate.required) = true; @@ -37,9 +27,6 @@ message CommonExtensionConfig { // If specified, the tap filter will be configured via a static configuration that cannot be // changed. service.tap.v2alpha.TapConfig static_config = 2; - - // [#not-implemented-hide:] Configuration to use for TapDS updates for the filter. - TapDSConfig tapds_config = 3; } } diff --git a/generated_api_shadow/envoy/extensions/common/tap/v3/BUILD b/generated_api_shadow/envoy/extensions/common/tap/v3/BUILD index eea9dcac5d23..2ad1221bb717 100644 --- a/generated_api_shadow/envoy/extensions/common/tap/v3/BUILD +++ b/generated_api_shadow/envoy/extensions/common/tap/v3/BUILD @@ -7,7 +7,6 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/common/tap/v2alpha:pkg", - "//envoy/config/core/v3:pkg", "//envoy/config/tap/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", "@com_github_cncf_udpa//udpa/core/v1:pkg", diff --git a/generated_api_shadow/envoy/extensions/common/tap/v3/common.proto b/generated_api_shadow/envoy/extensions/common/tap/v3/common.proto index aa7ae8264757..c71bff14008b 100644 --- a/generated_api_shadow/envoy/extensions/common/tap/v3/common.proto +++ b/generated_api_shadow/envoy/extensions/common/tap/v3/common.proto @@ -2,7 +2,6 @@ syntax = "proto3"; package envoy.extensions.common.tap.v3; -import "envoy/config/core/v3/config_source.proto"; import "envoy/config/tap/v3/common.proto"; import "udpa/core/v1/resource_locator.proto"; @@ -24,23 +23,6 @@ message CommonExtensionConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.common.tap.v2alpha.CommonExtensionConfig"; - // [#not-implemented-hide:] - message TapDSConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.common.tap.v2alpha.CommonExtensionConfig.TapDSConfig"; - - // Configuration for the source of TapDS updates for this Cluster. - config.core.v3.ConfigSource config_source = 1 [(validate.rules).message = {required: true}]; - - // Tap config to request from XDS server. - string name = 2 [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; - - // Resource locator for TAP. This is mutually exclusive to *name*. - // [#not-implemented-hide:] - udpa.core.v1.ResourceLocator tap_resource_locator = 3 - [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; - } - oneof config_type { option (validate.required) = true; @@ -50,9 +32,6 @@ message CommonExtensionConfig { // If specified, the tap filter will be configured via a static configuration that cannot be // changed. config.tap.v3.TapConfig static_config = 2; - - // [#not-implemented-hide:] Configuration to use for TapDS updates for the filter. - TapDSConfig tapds_config = 3; } } diff --git a/generated_api_shadow/envoy/extensions/common/tap/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/common/tap/v4alpha/BUILD index 22f844ecba26..37f19ce2acd1 100644 --- a/generated_api_shadow/envoy/extensions/common/tap/v4alpha/BUILD +++ b/generated_api_shadow/envoy/extensions/common/tap/v4alpha/BUILD @@ -6,7 +6,6 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ - "//envoy/config/core/v4alpha:pkg", "//envoy/config/tap/v4alpha:pkg", "//envoy/extensions/common/tap/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", diff --git a/generated_api_shadow/envoy/extensions/common/tap/v4alpha/common.proto b/generated_api_shadow/envoy/extensions/common/tap/v4alpha/common.proto index efa7744e357f..b40101232ba2 100644 --- a/generated_api_shadow/envoy/extensions/common/tap/v4alpha/common.proto +++ b/generated_api_shadow/envoy/extensions/common/tap/v4alpha/common.proto @@ -2,7 +2,6 @@ syntax = "proto3"; package envoy.extensions.common.tap.v4alpha; -import "envoy/config/core/v4alpha/config_source.proto"; import "envoy/config/tap/v4alpha/common.proto"; import "udpa/core/v1/resource_locator.proto"; @@ -23,25 +22,6 @@ message CommonExtensionConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.common.tap.v3.CommonExtensionConfig"; - // [#not-implemented-hide:] - message TapDSConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.common.tap.v3.CommonExtensionConfig.TapDSConfig"; - - // Configuration for the source of TapDS updates for this Cluster. - config.core.v4alpha.ConfigSource config_source = 1 - [(validate.rules).message = {required: true}]; - - oneof name_specifier { - // Tap config to request from XDS server. - string name = 2; - - // Resource locator for TAP. This is mutually exclusive to *name*. - // [#not-implemented-hide:] - udpa.core.v1.ResourceLocator tap_resource_locator = 3; - } - } - oneof config_type { option (validate.required) = true; @@ -51,9 +31,6 @@ message CommonExtensionConfig { // If specified, the tap filter will be configured via a static configuration that cannot be // changed. config.tap.v4alpha.TapConfig static_config = 2; - - // [#not-implemented-hide:] Configuration to use for TapDS updates for the filter. - TapDSConfig tapds_config = 3; } } diff --git a/generated_api_shadow/envoy/extensions/watchdog/abort_action/v3alpha/abort_action.proto b/generated_api_shadow/envoy/extensions/watchdog/abort_action/v3alpha/abort_action.proto deleted file mode 100644 index 7d793be82012..000000000000 --- a/generated_api_shadow/envoy/extensions/watchdog/abort_action/v3alpha/abort_action.proto +++ /dev/null @@ -1,32 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.watchdog.abort_action.v3alpha; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.watchdog.abort_action.v3alpha"; -option java_outer_classname = "AbortActionProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Watchdog Action that sends a SIGABRT to kill the process.] -// [#extension: envoy.watchdog.abort_action] - -// A GuardDogAction that will terminate the process by sending SIGABRT to the -// stuck thread. This would allow easier access to the call stack of the stuck -// thread since we would run signal handlers on that thread. This would be -// more useful than the default watchdog kill behaviors since those PANIC -// from the watchdog's thread. - -// This is currently only implemented for systems that support kill to send -// signals. -message AbortActionConfig { - // How long to wait for the thread to respond to the SIGABRT before killing the - // process from this action. This is a blocking action. - google.protobuf.Duration wait_duration = 1; -} diff --git a/generated_api_shadow/envoy/extensions/watchdog/abort_action/v3alpha/BUILD b/generated_api_shadow/envoy/watchdog/v3alpha/BUILD similarity index 100% rename from generated_api_shadow/envoy/extensions/watchdog/abort_action/v3alpha/BUILD rename to generated_api_shadow/envoy/watchdog/v3alpha/BUILD diff --git a/generated_api_shadow/envoy/watchdog/v3alpha/abort_action.proto b/generated_api_shadow/envoy/watchdog/v3alpha/abort_action.proto new file mode 100644 index 000000000000..3f47fddaa77e --- /dev/null +++ b/generated_api_shadow/envoy/watchdog/v3alpha/abort_action.proto @@ -0,0 +1,29 @@ +syntax = "proto3"; + +package envoy.watchdog.v3alpha; + +import "google/protobuf/duration.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.watchdog.v3alpha"; +option java_outer_classname = "AbortActionProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).work_in_progress = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Watchdog Action that kills a stuck thread to kill the process.] + +// A GuardDogAction that will terminate the process by killing the +// stuck thread. This would allow easier access to the call stack of the stuck +// thread since we would run signal handlers on that thread. By default +// this will be registered to run as the last watchdog action on KILL and +// MULTIKILL events if those are enabled. +message AbortActionConfig { + // How long to wait for the thread to respond to the thread kill function + // before killing the process from this action. This is a blocking action. + // By default this is 5 seconds. + google.protobuf.Duration wait_duration = 1; +} diff --git a/include/envoy/network/transport_socket.h b/include/envoy/network/transport_socket.h index db500f86a8a6..fe054ce2f16d 100644 --- a/include/envoy/network/transport_socket.h +++ b/include/envoy/network/transport_socket.h @@ -226,13 +226,6 @@ class TransportSocketFactory { */ virtual TransportSocketPtr createTransportSocket(TransportSocketOptionsSharedPtr options) const PURE; - - /** - * @param a callback to be invoked when the secrets required by the created transport - * sockets are ready. Will be invoked immediately if no secrets are required or if they - * are already loaded. - */ - virtual void addReadyCb(std::function callback) PURE; }; using TransportSocketFactoryPtr = std::unique_ptr; diff --git a/include/envoy/server/watchdog.h b/include/envoy/server/watchdog.h index d230ab48f6fb..cd76f552e244 100644 --- a/include/envoy/server/watchdog.h +++ b/include/envoy/server/watchdog.h @@ -37,7 +37,6 @@ class WatchDog { */ virtual void touch() PURE; virtual Thread::ThreadId threadId() const PURE; - virtual MonotonicTime lastTouchTime() const PURE; }; using WatchDogSharedPtr = std::shared_ptr; diff --git a/include/envoy/upstream/upstream.h b/include/envoy/upstream/upstream.h index 2c51a7c261c4..127df14c923a 100644 --- a/include/envoy/upstream/upstream.h +++ b/include/envoy/upstream/upstream.h @@ -112,15 +112,6 @@ class Host : virtual public HostDescription { Network::TransportSocketOptionsSharedPtr transport_socket_options, const envoy::config::core::v3::Metadata* metadata) const PURE; - /** - * Register a callback to be invoked when secrets are ready for the transport socket that - * corresponds to the provided metadata. - * @param callback supplies the callback to be invoked. - * @param metadata supplies the metadata to be used for resolving transport socket matches. - */ - virtual void addReadyCb(std::function callback, - const envoy::config::core::v3::Metadata* metadata) const PURE; - /** * @return host specific gauges. */ diff --git a/repokitteh.star b/repokitteh.star index 93d86cbd3fb8..bf5919628aff 100644 --- a/repokitteh.star +++ b/repokitteh.star @@ -3,7 +3,6 @@ pin("github.com/repokitteh/modules", "4ee2ed0c3622aad7fcddc04cb5dc866e44a541e6") use("github.com/repokitteh/modules/assign.star") use("github.com/repokitteh/modules/review.star") use("github.com/repokitteh/modules/wait.star") -use("github.com/repokitteh/modules/circleci.star", secret_token=get_secret('circle_token')) use("github.com/envoyproxy/envoy/ci/repokitteh/modules/azure_pipelines.star", secret_token=get_secret('azp_token')) use("github.com/envoyproxy/envoy/ci/repokitteh/modules/newcontributor.star") use( @@ -37,7 +36,6 @@ use( ], ) -alias('retest-circle', 'retry-circle') alias('retest', 'retry-azp') def _backport(): diff --git a/source/common/network/apple_dns_impl.cc b/source/common/network/apple_dns_impl.cc index 05e430076056..543cf4def9c5 100644 --- a/source/common/network/apple_dns_impl.cc +++ b/source/common/network/apple_dns_impl.cc @@ -55,7 +55,7 @@ void AppleDnsResolverImpl::initializeMainSdRef() { // for kDNSServiceFlagsShareConnection in dns_sd.h, and copied (and edited) in this implementation // where relevant. auto error = DNSServiceCreateConnection(&main_sd_ref_); - RELEASE_ASSERT(!error, "error in DNSServiceCreateConnection"); + RELEASE_ASSERT(!error, fmt::format("error ({}) in DNSServiceCreateConnection", error)); auto fd = DNSServiceRefSockFD(main_sd_ref_); RELEASE_ASSERT(fd != -1, "error in DNSServiceRefSockFD"); @@ -71,9 +71,16 @@ void AppleDnsResolverImpl::initializeMainSdRef() { } void AppleDnsResolverImpl::onEventCallback(uint32_t events) { - ENVOY_LOG(debug, "DNS resolver file event"); + ENVOY_LOG(debug, "DNS resolver file event ({})", events); ASSERT(events & Event::FileReadyType::Read); - DNSServiceProcessResult(main_sd_ref_); + DNSServiceErrorType error = DNSServiceProcessResult(main_sd_ref_); + if (error != kDNSServiceErr_NoError) { + ENVOY_LOG(warn, "DNS resolver error ({}) in DNSServiceProcessResult", error); + // Similar to receiving an error in onDNSServiceGetAddrInfoReply, an error while processing fd + // events indicates that the sd_ref state is broken. + // Therefore, flush queries with_error == true. + flushPendingQueries(true /* with_error */); + } } ActiveDnsQuery* AppleDnsResolverImpl::resolve(const std::string& dns_name, @@ -85,7 +92,7 @@ ActiveDnsQuery* AppleDnsResolverImpl::resolve(const std::string& dns_name, DNSServiceErrorType error = pending_resolution->dnsServiceGetAddrInfo(dns_lookup_family); if (error != kDNSServiceErr_NoError) { - ENVOY_LOG(warn, "DNS resolver error in dnsServiceGetAddrInfo for {}", dns_name); + ENVOY_LOG(warn, "DNS resolver error ({}) in dnsServiceGetAddrInfo for {}", error, dns_name); return nullptr; } @@ -150,7 +157,14 @@ void AppleDnsResolverImpl::flushPendingQueries(const bool with_error) { AppleDnsResolverImpl::PendingResolution::~PendingResolution() { ENVOY_LOG(debug, "Destroying PendingResolution for {}", dns_name_); - DNSServiceRefDeallocate(individual_sd_ref_); + // It is possible that DNSServiceGetAddrInfo returns a synchronous error, with a NULLed + // DNSServiceRef, in AppleDnsResolverImpl::resolve. + // Additionally, it is also possible that the query is cancelled before resolution starts, and + // thus the DNSServiceRef is null. + // Therefore, only deallocate if the ref is not null. + if (individual_sd_ref_) { + DNSServiceRefDeallocate(individual_sd_ref_); + } } void AppleDnsResolverImpl::PendingResolution::cancel() { diff --git a/source/common/network/raw_buffer_socket.h b/source/common/network/raw_buffer_socket.h index 8f17279890aa..fe87bbeda605 100644 --- a/source/common/network/raw_buffer_socket.h +++ b/source/common/network/raw_buffer_socket.h @@ -32,7 +32,6 @@ class RawBufferSocketFactory : public TransportSocketFactory { // Network::TransportSocketFactory TransportSocketPtr createTransportSocket(TransportSocketOptionsSharedPtr options) const override; bool implementsSecureTransport() const override; - void addReadyCb(std::function callback) override { callback(); } }; } // namespace Network diff --git a/source/common/thread/BUILD b/source/common/thread/BUILD new file mode 100644 index 000000000000..6f79301e8676 --- /dev/null +++ b/source/common/thread/BUILD @@ -0,0 +1,19 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_cc_library( + name = "terminate_thread_lib", + srcs = ["terminate_thread.cc"], + hdrs = ["terminate_thread.h"], + deps = [ + "//include/envoy/thread:thread_interface", + "//source/common/common:minimal_logger_lib", + ], +) diff --git a/source/common/thread/terminate_thread.cc b/source/common/thread/terminate_thread.cc new file mode 100644 index 000000000000..435d704e6d24 --- /dev/null +++ b/source/common/thread/terminate_thread.cc @@ -0,0 +1,31 @@ +#include "common/thread/terminate_thread.h" + +#include + +#include + +#include "common/common/logger.h" + +namespace Envoy { +namespace Thread { +namespace { +#ifdef __linux__ +pid_t toPlatformTid(int64_t tid) { return static_cast(tid); } +#elif defined(__APPLE__) +uint64_t toPlatformTid(int64_t tid) { return static_cast(tid); } +#endif +} // namespace + +bool terminateThread(const ThreadId& tid) { +#ifndef WIN32 + // Assume POSIX-compatible system and signal to the thread. + return kill(toPlatformTid(tid.getId()), SIGABRT) == 0; +#else + // Windows, currently unsupported termination of thread. + ENVOY_LOG_MISC(error, "Windows is currently unsupported for terminateThread."); + return false; +#endif +} + +} // namespace Thread +} // namespace Envoy diff --git a/source/common/thread/terminate_thread.h b/source/common/thread/terminate_thread.h new file mode 100644 index 000000000000..a9a20b1903cf --- /dev/null +++ b/source/common/thread/terminate_thread.h @@ -0,0 +1,19 @@ +#pragma once + +#include "envoy/thread/thread.h" + +namespace Envoy { +namespace Thread { +/** + * Tries to terminates the process by killing the thread specified by + * the ThreadId. The implementation is platform dependent and currently + * only works on platforms that support SIGABRT. + * + * Returns true if the platform specific function to terminate the thread + * succeeded (i.e. kill() == 0). If the platform is currently unsupported, this + * will return false. + */ +bool terminateThread(const ThreadId& tid); + +} // namespace Thread +} // namespace Envoy diff --git a/source/common/upstream/health_checker_base_impl.cc b/source/common/upstream/health_checker_base_impl.cc index 29e2aa6493c4..f6357559eec8 100644 --- a/source/common/upstream/health_checker_base_impl.cc +++ b/source/common/upstream/health_checker_base_impl.cc @@ -384,14 +384,6 @@ void HealthCheckerImplBase::ActiveHealthCheckSession::onTimeoutBase() { handleFailure(envoy::data::core::v3::NETWORK); } -void HealthCheckerImplBase::ActiveHealthCheckSession::start() { - // Start health checks only after secrets are ready for the transport socket - // that health checks will be performed on. If health checks start - // immediately, they may fail with "network" errors due to TLS credentials - // not yet being loaded, which can result in long startup times. - host_->addReadyCb([this] { onInitialInterval(); }, parent_.transportSocketMatchMetadata().get()); -} - void HealthCheckerImplBase::ActiveHealthCheckSession::onInitialInterval() { if (parent_.initial_jitter_.count() == 0) { onIntervalBase(); diff --git a/source/common/upstream/health_checker_base_impl.h b/source/common/upstream/health_checker_base_impl.h index 9620713cf399..c1e4bb7affff 100644 --- a/source/common/upstream/health_checker_base_impl.h +++ b/source/common/upstream/health_checker_base_impl.h @@ -77,7 +77,7 @@ class HealthCheckerImplBase : public HealthChecker, ~ActiveHealthCheckSession() override; HealthTransition setUnhealthy(envoy::data::core::v3::HealthCheckFailureType type); void onDeferredDeleteBase(); - void start(); + void start() { onInitialInterval(); } protected: ActiveHealthCheckSession(HealthCheckerImplBase& parent, HostSharedPtr host); diff --git a/source/common/upstream/upstream_impl.cc b/source/common/upstream/upstream_impl.cc index 48d426f65250..8e6a6db3c507 100644 --- a/source/common/upstream/upstream_impl.cc +++ b/source/common/upstream/upstream_impl.cc @@ -356,14 +356,6 @@ HostImpl::createConnection(Event::Dispatcher& dispatcher, const ClusterInfo& clu return connection; } -void HostImpl::addReadyCb(std::function callback, - const envoy::config::core::v3::Metadata* metadata) const { - Network::TransportSocketFactory& factory = - (metadata != nullptr) ? cluster_->transportSocketMatcher().resolve(metadata).factory_ - : socket_factory_; - factory.addReadyCb(callback); -} - void HostImpl::weight(uint32_t new_weight) { weight_ = std::max(1U, new_weight); } std::vector HostsPerLocalityImpl::filter( diff --git a/source/common/upstream/upstream_impl.h b/source/common/upstream/upstream_impl.h index ed8ef917b047..c74e489384f0 100644 --- a/source/common/upstream/upstream_impl.h +++ b/source/common/upstream/upstream_impl.h @@ -189,8 +189,6 @@ class HostImpl : public HostDescriptionImpl, createHealthCheckConnection(Event::Dispatcher& dispatcher, Network::TransportSocketOptionsSharedPtr transport_socket_options, const envoy::config::core::v3::Metadata* metadata) const override; - void addReadyCb(std::function callback, - const envoy::config::core::v3::Metadata* metadata) const override; std::vector> gauges() const override { diff --git a/source/extensions/watchdog/abort_action/BUILD b/source/common/watchdog/BUILD similarity index 63% rename from source/extensions/watchdog/abort_action/BUILD rename to source/common/watchdog/BUILD index 67305bc86e33..db6b6162ba75 100644 --- a/source/extensions/watchdog/abort_action/BUILD +++ b/source/common/watchdog/BUILD @@ -1,13 +1,12 @@ load( "//bazel:envoy_build_system.bzl", - "envoy_cc_extension", "envoy_cc_library", - "envoy_extension_package", + "envoy_package", ) licenses(["notice"]) # Apache 2 -envoy_extension_package() +envoy_package() envoy_cc_library( name = "abort_action_lib", @@ -19,22 +18,21 @@ envoy_cc_library( "//include/envoy/thread:thread_interface", "//source/common/common:assert_lib", "//source/common/protobuf:utility_lib", - "@envoy_api//envoy/extensions/watchdog/abort_action/v3alpha:pkg_cc_proto", + "//source/common/thread:terminate_thread_lib", + "@envoy_api//envoy/watchdog/v3alpha:pkg_cc_proto", ], ) -envoy_cc_extension( - name = "config", - srcs = ["config.cc"], - hdrs = ["config.h"], - security_posture = "data_plane_agnostic", - status = "alpha", +envoy_cc_library( + name = "abort_action_config", + srcs = ["abort_action_config.cc"], + hdrs = ["abort_action_config.h"], deps = [ ":abort_action_lib", "//include/envoy/registry", "//source/common/config:utility_lib", "//source/common/protobuf", "//source/common/protobuf:message_validator_lib", - "@envoy_api//envoy/extensions/watchdog/abort_action/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/watchdog/v3alpha:pkg_cc_proto", ], ) diff --git a/source/common/watchdog/README.md b/source/common/watchdog/README.md new file mode 100644 index 000000000000..c8433b9c05b5 --- /dev/null +++ b/source/common/watchdog/README.md @@ -0,0 +1,2 @@ +This contains watchdog actions that are part of core Envoy, and therefore cannot +be in the extensions directory. diff --git a/source/common/watchdog/abort_action.cc b/source/common/watchdog/abort_action.cc new file mode 100644 index 000000000000..d629b0bce70e --- /dev/null +++ b/source/common/watchdog/abort_action.cc @@ -0,0 +1,54 @@ +#include "common/watchdog/abort_action.h" + +#include "envoy/thread/thread.h" + +#include "common/common/assert.h" +#include "common/common/fmt.h" +#include "common/common/logger.h" +#include "common/protobuf/utility.h" +#include "common/thread/terminate_thread.h" + +namespace Envoy { +namespace Watchdog { +namespace { +constexpr uint64_t DefaultWaitDurationMs = 5000; +} // end namespace + +AbortAction::AbortAction(envoy::watchdog::v3alpha::AbortActionConfig& config, + Server::Configuration::GuardDogActionFactoryContext& /*context*/) + : wait_duration_(absl::Milliseconds( + PROTOBUF_GET_MS_OR_DEFAULT(config, wait_duration, DefaultWaitDurationMs))) {} + +void AbortAction::run( + envoy::config::bootstrap::v3::Watchdog::WatchdogAction::WatchdogEvent /*event*/, + const std::vector>& thread_last_checkin_pairs, + MonotonicTime /*now*/) { + + if (thread_last_checkin_pairs.empty()) { + ENVOY_LOG_MISC(warn, "Watchdog AbortAction called without any thread."); + return; + } + + // The following lines of code won't be considered covered by code coverage + // tools since they would run in DEATH tests. + const auto& thread_id = thread_last_checkin_pairs[0].first; + const std::string tid_string = thread_id.debugString(); + ENVOY_LOG_MISC(error, "Watchdog AbortAction terminating thread with tid {}.", tid_string); + + if (Thread::terminateThread(thread_id)) { + // Successfully signaled to thread to terminate, sleep for wait_duration. + absl::SleepFor(wait_duration_); + } else { + ENVOY_LOG_MISC(error, "Failed to terminate tid {}", tid_string); + } + + // Abort from the action since the signaled thread hasn't yet crashed the process. + // Panicing in the action gives flexibility since it doesn't depend on + // external code to kill the process if the signal fails. + PANIC(fmt::format( + "Failed to terminate thread with id {}, aborting from Watchdog AbortAction instead.", + tid_string)); +} + +} // namespace Watchdog +} // namespace Envoy diff --git a/source/extensions/watchdog/abort_action/abort_action.h b/source/common/watchdog/abort_action.h similarity index 53% rename from source/extensions/watchdog/abort_action/abort_action.h rename to source/common/watchdog/abort_action.h index 90b64393080b..5170c8bbea00 100644 --- a/source/extensions/watchdog/abort_action/abort_action.h +++ b/source/common/watchdog/abort_action.h @@ -1,24 +1,18 @@ #pragma once -#include - -#include "envoy/extensions/watchdog/abort_action/v3alpha/abort_action.pb.h" #include "envoy/server/guarddog_config.h" #include "envoy/thread/thread.h" +#include "envoy/watchdog/v3alpha/abort_action.pb.h" namespace Envoy { -namespace Extensions { namespace Watchdog { -namespace AbortAction { - /** - * A GuardDogAction that will terminate the process by sending SIGABRT to the - * stuck thread. This is currently only implemented for systems that - * support kill to send signals. + * A GuardDogAction that will terminate the process by killing the + * stuck thread. */ class AbortAction : public Server::Configuration::GuardDogAction { public: - AbortAction(envoy::extensions::watchdog::abort_action::v3alpha::AbortActionConfig& config, + AbortAction(envoy::watchdog::v3alpha::AbortActionConfig& config, Server::Configuration::GuardDogActionFactoryContext& context); void run(envoy::config::bootstrap::v3::Watchdog::WatchdogAction::WatchdogEvent event, @@ -26,12 +20,10 @@ class AbortAction : public Server::Configuration::GuardDogAction { MonotonicTime now) override; private: - const envoy::extensions::watchdog::abort_action::v3alpha::AbortActionConfig config_; + const absl::Duration wait_duration_; }; using AbortActionPtr = std::unique_ptr; -} // namespace AbortAction } // namespace Watchdog -} // namespace Extensions } // namespace Envoy diff --git a/source/extensions/watchdog/abort_action/config.cc b/source/common/watchdog/abort_action_config.cc similarity index 59% rename from source/extensions/watchdog/abort_action/config.cc rename to source/common/watchdog/abort_action_config.cc index f59c62c94cf4..916864386ace 100644 --- a/source/extensions/watchdog/abort_action/config.cc +++ b/source/common/watchdog/abort_action_config.cc @@ -1,32 +1,27 @@ -#include "extensions/watchdog/abort_action/config.h" +#include "common/watchdog/abort_action_config.h" #include "envoy/registry/registry.h" #include "common/config/utility.h" #include "common/protobuf/message_validator_impl.h" - -#include "extensions/watchdog/abort_action/abort_action.h" +#include "common/watchdog/abort_action.h" namespace Envoy { -namespace Extensions { namespace Watchdog { -namespace AbortAction { Server::Configuration::GuardDogActionPtr AbortActionFactory::createGuardDogActionFromProto( const envoy::config::bootstrap::v3::Watchdog::WatchdogAction& config, Server::Configuration::GuardDogActionFactoryContext& context) { - auto message = createEmptyConfigProto(); + AbortActionConfig message; Config::Utility::translateOpaqueConfig(config.config().typed_config(), ProtobufWkt::Struct(), - ProtobufMessage::getStrictValidationVisitor(), *message); - return std::make_unique(dynamic_cast(*message), context); + ProtobufMessage::getStrictValidationVisitor(), message); + return std::make_unique(message, context); } /** - * Static registration for the fixed heap resource monitor factory. @see RegistryFactory. + * Static registration for the Abort Action factory. @see RegisterFactory. */ REGISTER_FACTORY(AbortActionFactory, Server::Configuration::GuardDogActionFactory); -} // namespace AbortAction } // namespace Watchdog -} // namespace Extensions } // namespace Envoy diff --git a/source/extensions/watchdog/abort_action/config.h b/source/common/watchdog/abort_action_config.h similarity index 72% rename from source/extensions/watchdog/abort_action/config.h rename to source/common/watchdog/abort_action_config.h index d9f11c562b71..27f65ea16b60 100644 --- a/source/extensions/watchdog/abort_action/config.h +++ b/source/common/watchdog/abort_action_config.h @@ -1,14 +1,12 @@ #pragma once -#include "envoy/extensions/watchdog/abort_action/v3alpha/abort_action.pb.h" #include "envoy/server/guarddog_config.h" +#include "envoy/watchdog/v3alpha/abort_action.pb.h" #include "common/protobuf/protobuf.h" namespace Envoy { -namespace Extensions { namespace Watchdog { -namespace AbortAction { class AbortActionFactory : public Server::Configuration::GuardDogActionFactory { public: @@ -24,10 +22,8 @@ class AbortActionFactory : public Server::Configuration::GuardDogActionFactory { std::string name() const override { return "envoy.watchdog.abort_action"; } - using AbortActionConfig = envoy::extensions::watchdog::abort_action::v3alpha::AbortActionConfig; + using AbortActionConfig = envoy::watchdog::v3alpha::AbortActionConfig; }; -} // namespace AbortAction } // namespace Watchdog -} // namespace Extensions } // namespace Envoy diff --git a/source/docs/repokitteh.md b/source/docs/repokitteh.md index 1d2b747bdacb..0d07ba9ddc26 100644 --- a/source/docs/repokitteh.md +++ b/source/docs/repokitteh.md @@ -75,17 +75,6 @@ Sets the label `waiting:any` on a PR. When a new commit is pushed or any comment [Demo PR](https://github.com/envoyproxy/envoybot/pull/15) -### [CircleCI Retest](https://github.com/repokitteh/modules/blob/master/circleci.star) -Restart failed CircleCI tests. - -Example: -``` -/retest-circle -``` -Restarts all failed CircleCI tests, as reported in the commit statuses. - -[Demo PR](https://github.com/envoyproxy/envoy/pull/12613#issuecomment-676141200) - ### [Azure Pipelines Retest](https://github.com/envoyproxy/envoy/blob/master/ci/repokitteh/modules/azure_pipelines.star) Restart failed Azure pipelines. diff --git a/source/extensions/common/tap/extension_config_base.cc b/source/extensions/common/tap/extension_config_base.cc index 6578c02fc37b..46dbcbf469bf 100644 --- a/source/extensions/common/tap/extension_config_base.cc +++ b/source/extensions/common/tap/extension_config_base.cc @@ -40,9 +40,6 @@ ExtensionConfigBase::ExtensionConfigBase( ENVOY_LOG(debug, "initializing tap extension with static config"); break; } - case envoy::extensions::common::tap::v3::CommonExtensionConfig::ConfigTypeCase::kTapdsConfig: { - NOT_IMPLEMENTED_GCOVR_EXCL_LINE; - } default: { NOT_REACHED_GCOVR_EXCL_LINE; } diff --git a/source/extensions/extensions_build_config.bzl b/source/extensions/extensions_build_config.bzl index ddc3dc9a0d50..e3ec724d9339 100644 --- a/source/extensions/extensions_build_config.bzl +++ b/source/extensions/extensions_build_config.bzl @@ -214,7 +214,6 @@ EXTENSIONS = { # Watchdog actions # "envoy.watchdog.profile_action": "//source/extensions/watchdog/profile_action:config", - "envoy.watchdog.abort_action": "//source/extensions/watchdog/abort_action:config", } diff --git a/source/extensions/filters/common/ext_authz/ext_authz_http_impl.h b/source/extensions/filters/common/ext_authz/ext_authz_http_impl.h index 96b6d1d3173a..218a01f6c179 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz_http_impl.h +++ b/source/extensions/filters/common/ext_authz/ext_authz_http_impl.h @@ -119,11 +119,10 @@ class ClientConfig { private: static MatcherSharedPtr - toRequestMatchers(const envoy::type::matcher::v3::ListStringMatcher& matcher); + toRequestMatchers(const envoy::type::matcher::v3::ListStringMatcher& list); + static MatcherSharedPtr toClientMatchers(const envoy::type::matcher::v3::ListStringMatcher& list); static MatcherSharedPtr - toClientMatchers(const envoy::type::matcher::v3::ListStringMatcher& matcher); - static MatcherSharedPtr - toUpstreamMatchers(const envoy::type::matcher::v3::ListStringMatcher& matcher); + toUpstreamMatchers(const envoy::type::matcher::v3::ListStringMatcher& list); const MatcherSharedPtr request_header_matchers_; const MatcherSharedPtr client_header_matchers_; diff --git a/source/extensions/filters/http/admission_control/config.cc b/source/extensions/filters/http/admission_control/config.cc index 360cc67fe8c8..01aef0125bbf 100644 --- a/source/extensions/filters/http/admission_control/config.cc +++ b/source/extensions/filters/http/admission_control/config.cc @@ -1,5 +1,6 @@ #include "extensions/filters/http/admission_control/config.h" +#include "envoy/common/exception.h" #include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.h" #include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.validate.h" #include "envoy/registry/registry.h" @@ -21,6 +22,10 @@ Http::FilterFactoryCb AdmissionControlFilterFactory::createFilterFactoryFromProt const envoy::extensions::filters::http::admission_control::v3alpha::AdmissionControl& config, const std::string& stats_prefix, Server::Configuration::FactoryContext& context) { + if (config.has_sr_threshold() && config.sr_threshold().default_value().value() == 0) { + throw EnvoyException("Success Rate Threshold cannot be zero percent"); + } + const std::string prefix = stats_prefix + "admission_control."; // Create the thread-local controller. diff --git a/source/extensions/filters/http/lua/wrappers.cc b/source/extensions/filters/http/lua/wrappers.cc index cb31e695f8be..daf9d864f246 100644 --- a/source/extensions/filters/http/lua/wrappers.cc +++ b/source/extensions/filters/http/lua/wrappers.cc @@ -129,6 +129,16 @@ int StreamInfoWrapper::luaDownstreamSslConnection(lua_State* state) { return 1; } +int StreamInfoWrapper::luaDownstreamLocalAddress(lua_State* state) { + lua_pushstring(state, stream_info_.downstreamLocalAddress()->asString().c_str()); + return 1; +} + +int StreamInfoWrapper::luaDownstreamDirectRemoteAddress(lua_State* state) { + lua_pushstring(state, stream_info_.downstreamDirectRemoteAddress()->asString().c_str()); + return 1; +} + DynamicMetadataMapIterator::DynamicMetadataMapIterator(DynamicMetadataMapWrapper& parent) : parent_{parent}, current_{parent_.streamInfo().dynamicMetadata().filter_metadata().begin()} {} diff --git a/source/extensions/filters/http/lua/wrappers.h b/source/extensions/filters/http/lua/wrappers.h index 89f7cb6d2d24..bf19843dd8d1 100644 --- a/source/extensions/filters/http/lua/wrappers.h +++ b/source/extensions/filters/http/lua/wrappers.h @@ -184,6 +184,8 @@ class StreamInfoWrapper : public Filters::Common::Lua::BaseLuaObject queue_data_list; std::vector broker_data_list; for (auto& host_set : cluster->prioritySet().hostSetsPerPriority()) { diff --git a/source/extensions/quic_listeners/quiche/quic_transport_socket_factory.h b/source/extensions/quic_listeners/quiche/quic_transport_socket_factory.h index 4fbe83286b38..2ada9e2de17b 100644 --- a/source/extensions/quic_listeners/quiche/quic_transport_socket_factory.h +++ b/source/extensions/quic_listeners/quiche/quic_transport_socket_factory.h @@ -24,9 +24,6 @@ class QuicTransportSocketFactoryBase : public Network::TransportSocketFactory { NOT_REACHED_GCOVR_EXCL_LINE; } bool implementsSecureTransport() const override { return true; } - - // TODO(mpuncel) only invoke callback() once secrets are ready. - void addReadyCb(std::function callback) override { callback(); }; }; // TODO(danzh): when implement ProofSource, examine of it's necessary to diff --git a/source/extensions/transport_sockets/alts/tsi_socket.h b/source/extensions/transport_sockets/alts/tsi_socket.h index 7bf5877870ab..0acba405022d 100644 --- a/source/extensions/transport_sockets/alts/tsi_socket.h +++ b/source/extensions/transport_sockets/alts/tsi_socket.h @@ -101,9 +101,6 @@ class TsiSocketFactory : public Network::TransportSocketFactory { Network::TransportSocketPtr createTransportSocket(Network::TransportSocketOptionsSharedPtr options) const override; - // TODO(mpuncel) only invoke callback() once secrets are ready. - void addReadyCb(std::function callback) override { callback(); }; - private: HandshakerFactory handshaker_factory_; HandshakeValidator handshake_validator_; diff --git a/source/extensions/transport_sockets/proxy_protocol/proxy_protocol.h b/source/extensions/transport_sockets/proxy_protocol/proxy_protocol.h index bcddef7bf547..4a191ebf539d 100644 --- a/source/extensions/transport_sockets/proxy_protocol/proxy_protocol.h +++ b/source/extensions/transport_sockets/proxy_protocol/proxy_protocol.h @@ -49,7 +49,6 @@ class UpstreamProxyProtocolSocketFactory : public Network::TransportSocketFactor Network::TransportSocketPtr createTransportSocket(Network::TransportSocketOptionsSharedPtr options) const override; bool implementsSecureTransport() const override; - void addReadyCb(std::function callback) override { callback(); }; private: Network::TransportSocketFactoryPtr transport_socket_factory_; diff --git a/source/extensions/transport_sockets/tap/tap.h b/source/extensions/transport_sockets/tap/tap.h index d04712b2a50a..33156b705153 100644 --- a/source/extensions/transport_sockets/tap/tap.h +++ b/source/extensions/transport_sockets/tap/tap.h @@ -41,8 +41,6 @@ class TapSocketFactory : public Network::TransportSocketFactory, Network::TransportSocketPtr createTransportSocket(Network::TransportSocketOptionsSharedPtr options) const override; bool implementsSecureTransport() const override; - // TODO(mpuncel) only invoke callback() once secrets are ready. - void addReadyCb(std::function callback) override { callback(); }; private: Network::TransportSocketFactoryPtr transport_socket_factory_; diff --git a/source/extensions/transport_sockets/tap/tap_config_impl.cc b/source/extensions/transport_sockets/tap/tap_config_impl.cc index 5dbf4d1f753a..93887c30758e 100644 --- a/source/extensions/transport_sockets/tap/tap_config_impl.cc +++ b/source/extensions/transport_sockets/tap/tap_config_impl.cc @@ -19,6 +19,9 @@ PerSocketTapperImpl::PerSocketTapperImpl(SocketTapConfigSharedPtr config, connection_(connection), statuses_(config_->createMatchStatusVector()) { config_->rootMatcher().onNewStream(statuses_); if (config_->streaming() && config_->rootMatcher().matchStatus(statuses_).matches_) { + // TODO(mattklein123): For IP client connections, local address will not be populated until + // connection. We should re-emit connection information after connection so the streaming + // trace gets the local address. TapCommon::TraceWrapperPtr trace = makeTraceSegment(); fillConnectionInfo(*trace->mutable_socket_streamed_trace_segment()->mutable_connection()); sink_handle_->submitTrace(std::move(trace)); @@ -26,8 +29,11 @@ PerSocketTapperImpl::PerSocketTapperImpl(SocketTapConfigSharedPtr config, } void PerSocketTapperImpl::fillConnectionInfo(envoy::data::tap::v3::Connection& connection) { - Network::Utility::addressToProtobufAddress(*connection_.localAddress(), - *connection.mutable_local_address()); + if (connection_.localAddress() != nullptr) { + // Local address might not be populated before a client connection is connected. + Network::Utility::addressToProtobufAddress(*connection_.localAddress(), + *connection.mutable_local_address()); + } Network::Utility::addressToProtobufAddress(*connection_.remoteAddress(), *connection.mutable_remote_address()); } diff --git a/source/extensions/transport_sockets/tls/ssl_socket.cc b/source/extensions/transport_sockets/tls/ssl_socket.cc index 523242f8fada..485468443096 100644 --- a/source/extensions/transport_sockets/tls/ssl_socket.cc +++ b/source/extensions/transport_sockets/tls/ssl_socket.cc @@ -355,39 +355,13 @@ bool ClientSslSocketFactory::implementsSecureTransport() const { return true; } void ClientSslSocketFactory::onAddOrUpdateSecret() { ENVOY_LOG(debug, "Secret is updated."); - bool should_run_callbacks = false; { absl::WriterMutexLock l(&ssl_ctx_mu_); ssl_ctx_ = manager_.createSslClientContext(stats_scope_, *config_); - if (ssl_ctx_) { - should_run_callbacks = true; - } - } - if (should_run_callbacks) { - for (const auto& cb : secrets_ready_callbacks_) { - cb(); - } - secrets_ready_callbacks_.clear(); } stats_.ssl_context_update_by_sds_.inc(); } -void ClientSslSocketFactory::addReadyCb(std::function callback) { - bool immediately_run_callback = false; - { - absl::ReaderMutexLock l(&ssl_ctx_mu_); - if (ssl_ctx_) { - immediately_run_callback = true; - } - } - - if (immediately_run_callback) { - callback(); - } else { - secrets_ready_callbacks_.push_back(callback); - } -} - ServerSslSocketFactory::ServerSslSocketFactory(Envoy::Ssl::ServerContextConfigPtr config, Envoy::Ssl::ContextManager& manager, Stats::Scope& stats_scope, @@ -422,39 +396,13 @@ bool ServerSslSocketFactory::implementsSecureTransport() const { return true; } void ServerSslSocketFactory::onAddOrUpdateSecret() { ENVOY_LOG(debug, "Secret is updated."); - bool should_run_callbacks = false; { absl::WriterMutexLock l(&ssl_ctx_mu_); ssl_ctx_ = manager_.createSslServerContext(stats_scope_, *config_, server_names_); - - if (ssl_ctx_) { - should_run_callbacks = true; - } - } - if (should_run_callbacks) { - for (const auto& cb : secrets_ready_callbacks_) { - cb(); - } - secrets_ready_callbacks_.clear(); } stats_.ssl_context_update_by_sds_.inc(); } -void ServerSslSocketFactory::addReadyCb(std::function callback) { - bool immediately_run_callback = false; - { - absl::ReaderMutexLock l(&ssl_ctx_mu_); - if (ssl_ctx_) { - immediately_run_callback = true; - } - } - if (immediately_run_callback) { - callback(); - } else { - secrets_ready_callbacks_.push_back(callback); - } -} - } // namespace Tls } // namespace TransportSockets } // namespace Extensions diff --git a/source/extensions/transport_sockets/tls/ssl_socket.h b/source/extensions/transport_sockets/tls/ssl_socket.h index b0dcb139a319..c14cb502bed1 100644 --- a/source/extensions/transport_sockets/tls/ssl_socket.h +++ b/source/extensions/transport_sockets/tls/ssl_socket.h @@ -109,8 +109,6 @@ class ClientSslSocketFactory : public Network::TransportSocketFactory, createTransportSocket(Network::TransportSocketOptionsSharedPtr options) const override; bool implementsSecureTransport() const override; - void addReadyCb(std::function callback) override; - // Secret::SecretCallbacks void onAddOrUpdateSecret() override; @@ -121,7 +119,6 @@ class ClientSslSocketFactory : public Network::TransportSocketFactory, Envoy::Ssl::ClientContextConfigPtr config_; mutable absl::Mutex ssl_ctx_mu_; Envoy::Ssl::ClientContextSharedPtr ssl_ctx_ ABSL_GUARDED_BY(ssl_ctx_mu_); - std::list> secrets_ready_callbacks_; }; class ServerSslSocketFactory : public Network::TransportSocketFactory, @@ -136,8 +133,6 @@ class ServerSslSocketFactory : public Network::TransportSocketFactory, createTransportSocket(Network::TransportSocketOptionsSharedPtr options) const override; bool implementsSecureTransport() const override; - void addReadyCb(std::function callback) override; - // Secret::SecretCallbacks void onAddOrUpdateSecret() override; @@ -149,7 +144,6 @@ class ServerSslSocketFactory : public Network::TransportSocketFactory, const std::vector server_names_; mutable absl::Mutex ssl_ctx_mu_; Envoy::Ssl::ServerContextSharedPtr ssl_ctx_ ABSL_GUARDED_BY(ssl_ctx_mu_); - std::list> secrets_ready_callbacks_; }; } // namespace Tls diff --git a/source/extensions/watchdog/abort_action/abort_action.cc b/source/extensions/watchdog/abort_action/abort_action.cc deleted file mode 100644 index 3a4c3ade615e..000000000000 --- a/source/extensions/watchdog/abort_action/abort_action.cc +++ /dev/null @@ -1,67 +0,0 @@ -#include "extensions/watchdog/abort_action/abort_action.h" - -#include - -#include - -#include "envoy/thread/thread.h" - -#include "common/common/assert.h" -#include "common/common/fmt.h" -#include "common/common/logger.h" -#include "common/protobuf/utility.h" - -namespace Envoy { -namespace Extensions { -namespace Watchdog { -namespace AbortAction { -namespace { -#ifdef __linux__ -pid_t toPlatformTid(int64_t tid) { return static_cast(tid); } -#elif defined(__APPLE__) -uint64_t toPlatformTid(int64_t tid) { return static_cast(tid); } -#endif -} // namespace - -AbortAction::AbortAction( - envoy::extensions::watchdog::abort_action::v3alpha::AbortActionConfig& config, - Server::Configuration::GuardDogActionFactoryContext& /*context*/) - : config_(config){}; - -void AbortAction::run( - envoy::config::bootstrap::v3::Watchdog::WatchdogAction::WatchdogEvent /*event*/, - const std::vector>& thread_last_checkin_pairs, - MonotonicTime /*now*/) { - - if (thread_last_checkin_pairs.empty()) { - ENVOY_LOG_MISC(warn, "Watchdog AbortAction called without any thread."); - return; - } - - // The following lines of code won't be considered covered by code coverage - // tools since they would run in DEATH tests. - int64_t raw_tid = thread_last_checkin_pairs[0].first.getId(); - - // Assume POSIX-compatible system and signal to the thread. - ENVOY_LOG_MISC(error, "Watchdog AbortAction sending abort signal to thread with tid {}.", - raw_tid); - - if (kill(toPlatformTid(raw_tid), SIGABRT) == 0) { - // Successfully sent signal, sleep for wait_duration. - absl::SleepFor(absl::Milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(config_, wait_duration, 0))); - } else { - // Failed to send the signal, abort? - ENVOY_LOG_MISC(error, "Failed to send signal to tid {}", raw_tid); - } - - // Abort from the action since the signaled thread hasn't yet crashed the process. - // panicing in the action gives flexibility since it doesn't depend on - // external code to kill the process if the signal fails. - PANIC(fmt::format("Failed to kill thread with id {}, aborting from Watchdog AbortAction instead.", - raw_tid)); -} - -} // namespace AbortAction -} // namespace Watchdog -} // namespace Extensions -} // namespace Envoy diff --git a/source/server/BUILD b/source/server/BUILD index 6c587d9d95d7..852cc2bda604 100644 --- a/source/server/BUILD +++ b/source/server/BUILD @@ -127,7 +127,9 @@ envoy_cc_library( "//source/common/event:libevent_lib", "//source/common/protobuf:utility_lib", "//source/common/stats:symbol_table_lib", + "//source/common/watchdog:abort_action_config", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", + "@envoy_api//envoy/watchdog/v3alpha:pkg_cc_proto", ], ) diff --git a/source/server/connection_handler_impl.cc b/source/server/connection_handler_impl.cc index f819f5843c7b..d28f892b22ae 100644 --- a/source/server/connection_handler_impl.cc +++ b/source/server/connection_handler_impl.cc @@ -458,6 +458,10 @@ void ConnectionHandlerImpl::ActiveTcpListener::resumeListening() { void ConnectionHandlerImpl::ActiveTcpListener::newConnection( Network::ConnectionSocketPtr&& socket, std::unique_ptr stream_info) { + // Refresh local address in case it was restored by a listener filter like the original_dst + // filter. + stream_info->setDownstreamLocalAddress(socket->localAddress()); + // Find matching filter chain. const auto filter_chain = config_->filterChainManager().findFilterChain(*socket); if (filter_chain == nullptr) { diff --git a/source/server/guarddog_impl.cc b/source/server/guarddog_impl.cc index 7c512d20310e..c831b198a1bf 100644 --- a/source/server/guarddog_impl.cc +++ b/source/server/guarddog_impl.cc @@ -13,6 +13,7 @@ #include "envoy/server/guarddog.h" #include "envoy/server/guarddog_config.h" #include "envoy/stats/scope.h" +#include "envoy/watchdog/v3alpha/abort_action.pb.h" #include "common/common/assert.h" #include "common/common/fmt.h" @@ -64,7 +65,23 @@ GuardDogImpl::GuardDogImpl(Stats::Scope& stats_scope, const Server::Configuratio Configuration::GuardDogActionFactoryContext context = {api, *dispatcher_, stats_scope, name}; - const auto& actions = config.actions(); + auto actions = config.actions(); + + // Add default abort_action if kill and/or multi-kill is enabled. + if (config.killTimeout().count() > 0) { + envoy::watchdog::v3alpha::AbortActionConfig abort_config; + WatchDogAction* abort_action_config = actions.Add(); + abort_action_config->set_event(WatchDogAction::KILL); + abort_action_config->mutable_config()->mutable_typed_config()->PackFrom(abort_config); + } + + if (config.multiKillTimeout().count() > 0) { + envoy::watchdog::v3alpha::AbortActionConfig abort_config; + WatchDogAction* abort_action_config = actions.Add(); + abort_action_config->set_event(WatchDogAction::MULTIKILL); + abort_action_config->mutable_config()->mutable_typed_config()->PackFrom(abort_config); + } + for (const auto& action : actions) { // Get factory and add the created cb auto& factory = Config::Utility::getAndCheckFactory( @@ -85,11 +102,13 @@ GuardDogImpl::GuardDogImpl(Stats::Scope& stats_scope, const Server::Configuratio GuardDogImpl::~GuardDogImpl() { stop(); } void GuardDogImpl::step() { - { - Thread::LockGuard guard(mutex_); - if (!run_thread_) { - return; - } + // Hold mutex_ for the duration of the step() function to ensure that watchdog still alive checks + // and test interlocks happen in the expected order. Calls to forceCheckForTest() should result in + // a full iteration of the step() function to process recent watchdog touches and monotonic time + // changes. + Thread::LockGuard guard(mutex_); + if (!run_thread_) { + return; } const auto now = time_source_.monotonicTime(); @@ -106,7 +125,13 @@ void GuardDogImpl::step() { static_cast(ceil(multi_kill_fraction_ * watched_dogs_.size()))); for (auto& watched_dog : watched_dogs_) { - const auto last_checkin = watched_dog->dog_->lastTouchTime(); + if (watched_dog->dog_->getTouchedAndReset()) { + // Watchdog was touched since the guard dog last checked; update last check-in time. + watched_dog->last_checkin_ = now; + continue; + } + + const auto last_checkin = watched_dog->last_checkin_; const auto tid = watched_dog->dog_->threadId(); const auto delta = now - last_checkin; if (watched_dog->last_alert_time_ && watched_dog->last_alert_time_.value() < last_checkin) { @@ -133,19 +158,12 @@ void GuardDogImpl::step() { } if (killEnabled() && delta > kill_timeout_) { invokeGuardDogActions(WatchDogAction::KILL, {{tid, last_checkin}}, now); - - PANIC(fmt::format("GuardDog: one thread ({}) stuck for more than watchdog_kill_timeout", - watched_dog->dog_->threadId().debugString())); } if (multikillEnabled() && delta > multi_kill_timeout_) { multi_kill_threads.emplace_back(tid, last_checkin); if (multi_kill_threads.size() >= required_for_multi_kill) { invokeGuardDogActions(WatchDogAction::MULTIKILL, multi_kill_threads, now); - - PANIC(fmt::format("GuardDog: At least {} threads ({},...) stuck for more than " - "watchdog_multikill_timeout", - multi_kill_threads.size(), tid.debugString())); } } } @@ -160,12 +178,9 @@ void GuardDogImpl::step() { invokeGuardDogActions(WatchDogAction::MISS, miss_threads, now); } - { - Thread::LockGuard guard(mutex_); - test_interlock_hook_->signalFromImpl(now); - if (run_thread_) { - loop_timer_->enableTimer(loop_interval_); - } + test_interlock_hook_->signalFromImpl(); + if (run_thread_) { + loop_timer_->enableTimer(loop_interval_); } } @@ -176,14 +191,13 @@ WatchDogSharedPtr GuardDogImpl::createWatchDog(Thread::ThreadId thread_id, // accessed out of the locked section below is const (time_source_ has no // state). const auto wd_interval = loop_interval_ / 2; - WatchDogSharedPtr new_watchdog = - std::make_shared(std::move(thread_id), time_source_, wd_interval); + auto new_watchdog = std::make_shared(std::move(thread_id), wd_interval); WatchedDogPtr watched_dog = std::make_unique(stats_scope_, thread_name, new_watchdog); + new_watchdog->touch(); { Thread::LockGuard guard(wd_lock_); watched_dogs_.push_back(std::move(watched_dog)); } - new_watchdog->touch(); return new_watchdog; } @@ -232,7 +246,7 @@ void GuardDogImpl::invokeGuardDogActions( } GuardDogImpl::WatchedDog::WatchedDog(Stats::Scope& stats_scope, const std::string& thread_name, - const WatchDogSharedPtr& watch_dog) + const WatchDogImplSharedPtr& watch_dog) : dog_(watch_dog), miss_counter_(stats_scope.counterFromStatName( Stats::StatNameManagedStorage(fmt::format("server.{}.watchdog_miss", thread_name), diff --git a/source/server/guarddog_impl.h b/source/server/guarddog_impl.h index 3d8503ec9530..ebc4040e1c81 100644 --- a/source/server/guarddog_impl.h +++ b/source/server/guarddog_impl.h @@ -18,6 +18,8 @@ #include "common/common/thread.h" #include "common/event/libevent.h" +#include "server/watchdog_impl.h" + #include "absl/types/optional.h" namespace Envoy { @@ -45,16 +47,17 @@ class GuardDogImpl : public GuardDog { virtual ~TestInterlockHook() = default; /** - * Called from GuardDogImpl to indicate that it has evaluated all watch-dogs - * up to a particular point in time. + * Called from GuardDogImpl to indicate that it has evaluated all watch-dogs up to a particular + * point in time. Called while the GuardDog mutex is held. */ - virtual void signalFromImpl(MonotonicTime) {} + virtual void signalFromImpl() {} /** - * Called from GuardDog tests to block until the implementation has reached - * the desired point in time. + * Called from GuardDog tests to block until the implementation has reached the desired + * condition. Called while the GuardDog mutex is held. + * @param mutex The GuardDog's mutex for use by Thread::CondVar::wait. */ - virtual void waitFromTest(Thread::MutexBasicLockable&, MonotonicTime) {} + virtual void waitFromTest(Thread::MutexBasicLockable& /*mutex*/) {} }; /** @@ -79,15 +82,13 @@ class GuardDogImpl : public GuardDog { const std::chrono::milliseconds loopIntervalForTest() const { return loop_interval_; } /** - * Test hook to force a step() to catch up with the current simulated - * time. This is inlined so that it does not need to be present in the - * production binary. + * Test hook to force a step() to catch up with the current watchdog state and simulated time. + * This is inlined so that it does not need to be present in the production binary. */ void forceCheckForTest() { Thread::LockGuard guard(mutex_); - MonotonicTime now = time_source_.monotonicTime(); loop_timer_->enableTimer(std::chrono::milliseconds(0)); - test_interlock_hook_->waitFromTest(mutex_, now); + test_interlock_hook_->waitFromTest(mutex_); } // Server::GuardDog @@ -111,11 +112,13 @@ class GuardDogImpl : public GuardDog { std::vector> thread_last_checkin_pairs, MonotonicTime now); + using WatchDogImplSharedPtr = std::shared_ptr; struct WatchedDog { WatchedDog(Stats::Scope& stats_scope, const std::string& thread_name, - const WatchDogSharedPtr& watch_dog); + const WatchDogImplSharedPtr& watch_dog); - const WatchDogSharedPtr dog_; + const WatchDogImplSharedPtr dog_; + MonotonicTime last_checkin_; absl::optional last_alert_time_; bool miss_alerted_{}; bool megamiss_alerted_{}; diff --git a/source/server/watchdog_impl.h b/source/server/watchdog_impl.h index ea4ea82e5c28..a18034745885 100644 --- a/source/server/watchdog_impl.h +++ b/source/server/watchdog_impl.h @@ -19,26 +19,24 @@ class WatchDogImpl : public WatchDog { /** * @param interval WatchDog timer interval (used after startWatchdog()) */ - WatchDogImpl(Thread::ThreadId thread_id, TimeSource& tsource, std::chrono::milliseconds interval) - : thread_id_(thread_id), time_source_(tsource), - latest_touch_time_since_epoch_(tsource.monotonicTime().time_since_epoch()), - timer_interval_(interval) {} + WatchDogImpl(Thread::ThreadId thread_id, std::chrono::milliseconds interval) + : thread_id_(thread_id), timer_interval_(interval) {} Thread::ThreadId threadId() const override { return thread_id_; } - MonotonicTime lastTouchTime() const override { - return MonotonicTime(latest_touch_time_since_epoch_.load()); - } + // Used by GuardDogImpl determine if the watchdog was touched recently and reset the touch status. + bool getTouchedAndReset() { return touched_.exchange(false, std::memory_order_relaxed); } // Server::WatchDog void startWatchdog(Event::Dispatcher& dispatcher) override; void touch() override { - latest_touch_time_since_epoch_.store(time_source_.monotonicTime().time_since_epoch()); + // Set touched_ if not already set. + bool expected = false; + touched_.compare_exchange_strong(expected, true, std::memory_order_relaxed); } private: const Thread::ThreadId thread_id_; - TimeSource& time_source_; - std::atomic latest_touch_time_since_epoch_; + std::atomic touched_{false}; Event::TimerPtr timer_; const std::chrono::milliseconds timer_interval_; }; diff --git a/test/common/upstream/BUILD b/test/common/upstream/BUILD index 54bc7d6a5375..b64cd622fc48 100644 --- a/test/common/upstream/BUILD +++ b/test/common/upstream/BUILD @@ -242,6 +242,54 @@ envoy_cc_test( ], ) +envoy_cc_test_library( + name = "load_balancer_fuzz_lib", + srcs = ["load_balancer_fuzz_base.cc"], + hdrs = ["load_balancer_fuzz_base.h"], + deps = [ + ":load_balancer_fuzz_proto_cc_proto", + ":utility_lib", + "//source/common/upstream:load_balancer_lib", + "//test/fuzz:random_lib", + "//test/mocks:common_lib", + "//test/mocks/runtime:runtime_mocks", + "//test/mocks/upstream:cluster_info_mocks", + "//test/mocks/upstream:host_set_mocks", + "//test/mocks/upstream:load_balancer_context_mock", + "//test/mocks/upstream:priority_set_mocks", + "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", + ], +) + +envoy_proto_library( + name = "load_balancer_fuzz_proto", + srcs = ["load_balancer_fuzz.proto"], + deps = [ + "//test/fuzz:common_proto", + "@envoy_api//envoy/config/cluster/v3:pkg", + ], +) + +envoy_proto_library( + name = "random_load_balancer_fuzz_proto", + srcs = ["random_load_balancer_fuzz.proto"], + deps = [ + "//test/common/upstream:load_balancer_fuzz_proto", + ], +) + +envoy_cc_fuzz_test( + name = "random_load_balancer_fuzz_test", + srcs = ["random_load_balancer_fuzz_test.cc"], + corpus = "//test/common/upstream:random_load_balancer_corpus", + deps = [ + ":load_balancer_fuzz_lib", + ":load_balancer_fuzz_proto_cc_proto", + ":random_load_balancer_fuzz_proto_cc_proto", + ":utility_lib", + ], +) + envoy_cc_test( name = "load_balancer_simulation_test", srcs = ["load_balancer_simulation_test.cc"], diff --git a/test/common/upstream/health_checker_impl_test.cc b/test/common/upstream/health_checker_impl_test.cc index c73876ebefc1..cd03e0130459 100644 --- a/test/common/upstream/health_checker_impl_test.cc +++ b/test/common/upstream/health_checker_impl_test.cc @@ -961,8 +961,6 @@ TEST_F(HttpHealthCheckerImplTest, TlsOptions) { Network::TransportSocketFactoryPtr(socket_factory)); cluster_->info_->transport_socket_matcher_.reset(transport_socket_match); - EXPECT_CALL(*socket_factory, addReadyCb(_)) - .WillOnce(Invoke([&](std::function callback) -> void { callback(); })); EXPECT_CALL(*socket_factory, createTransportSocket(ApplicationProtocolListEq("http1"))); allocHealthChecker(yaml); @@ -2450,19 +2448,13 @@ TEST_F(HttpHealthCheckerImplTest, TransportSocketMatchCriteria) { ALL_TRANSPORT_SOCKET_MATCH_STATS(POOL_COUNTER_PREFIX(stats_store, "test"))}; auto health_check_only_socket_factory = std::make_unique(); - // We expect resolve() to be called 3 times, once for endpoint socket matching (with no metadata - // in this test) and twice for health check socket matching (once for checking if secrets are - // ready on the transport socket, and again for actually getting the health check transport socket - // to create a connection). In the latter 2 calls, we expect metadata that matches the above - // object. + // We expect resolve() to be called twice, once for endpoint socket matching (with no metadata in + // this test) and once for health check socket matching. In the latter we expect metadata that + // matches the above object. EXPECT_CALL(*transport_socket_match, resolve(nullptr)); EXPECT_CALL(*transport_socket_match, resolve(MetadataEq(metadata))) - .Times(2) - .WillRepeatedly(Return(TransportSocketMatcher::MatchData( - *health_check_only_socket_factory, health_transport_socket_stats, "health_check_only"))) - .RetiresOnSaturation(); - EXPECT_CALL(*health_check_only_socket_factory, addReadyCb(_)) - .WillOnce(Invoke([&](std::function callback) -> void { callback(); })); + .WillOnce(Return(TransportSocketMatcher::MatchData( + *health_check_only_socket_factory, health_transport_socket_stats, "health_check_only"))); // The health_check_only_socket_factory should be used to create a transport socket for the health // check connection. EXPECT_CALL(*health_check_only_socket_factory, createTransportSocket(_)); @@ -2498,9 +2490,6 @@ TEST_F(HttpHealthCheckerImplTest, NoTransportSocketMatchCriteria) { )EOF"; auto default_socket_factory = std::make_unique(); - - EXPECT_CALL(*default_socket_factory, addReadyCb(_)) - .WillOnce(Invoke([&](std::function callback) -> void { callback(); })); // The default_socket_factory should be used to create a transport socket for the health check // connection. EXPECT_CALL(*default_socket_factory, createTransportSocket(_)); diff --git a/test/common/upstream/load_balancer_fuzz.proto b/test/common/upstream/load_balancer_fuzz.proto new file mode 100644 index 000000000000..c4b1ead2c7d0 --- /dev/null +++ b/test/common/upstream/load_balancer_fuzz.proto @@ -0,0 +1,60 @@ +syntax = "proto3"; + +package test.common.upstream; + +import "validate/validate.proto"; +import "envoy/config/cluster/v3/cluster.proto"; +import "google/protobuf/empty.proto"; + +message UpdateHealthFlags { + // The host priority determines what host set within the priority set which will get updated. + uint64 host_priority = 1; + // These will determine how many hosts will get placed into health hosts, degraded hosts, and + // excluded hosts from the full host list. + uint32 num_healthy_hosts = 2; + uint32 num_degraded_hosts = 3; + uint32 num_excluded_hosts = 4; + // This is used to determine which hosts get marked as healthy, degraded, and excluded. + bytes random_bytestring = 5 [(validate.rules).bytes = {min_len: 1, max_len: 256}]; +} + +message LbAction { + oneof action_selector { + option (validate.required) = true; + // This updates the health flags of hosts at a certain priority level. The number of hosts in each priority level/in localities is static, + // as untrusted upstreams cannot change that, and can only change their health flags. + UpdateHealthFlags update_health_flags = 1; + // Prefetches a host using the encapsulated specific load balancer. + google.protobuf.Empty prefetch = 2; + // Chooses a host using the encapsulated specific load balancer. + google.protobuf.Empty choose_host = 3; + } +} + +message SetupPriorityLevel { + uint32 num_hosts_in_priority_level = 1 [(validate.rules).uint32.lte = 500]; + uint32 num_hosts_locality_a = 2 [(validate.rules).uint32.lte = 500]; + uint32 num_hosts_locality_b = 3 [(validate.rules).uint32.lte = 500]; + // Hard cap at 3 localities for simplicity + uint32 num_hosts_locality_c = 4 [(validate.rules).uint32.lte = 500]; + // For choosing which hosts go in which locality + bytes random_bytestring = 5 [(validate.rules).bytes = {min_len: 1, max_len: 256}]; +} + +// This message represents what LoadBalancerFuzzBase will interact with, performing setup of host sets and calling into load balancers. +// The logic that this message represents and the base class for load balancing fuzzing will be logic that maps to all types of load balancing +// and can be used in a modular way at the highest level for each load balancer. +message LoadBalancerTestCase { + envoy.config.cluster.v3.Cluster.CommonLbConfig common_lb_config = 1 + [(validate.rules).message.required = true]; + repeated LbAction actions = 2; + + // Each generated integer will cause the fuzzer to initialize hosts at a certain priority level, each integer generated adding a priority + // level with integer generated hosts in that new priority level. Capped at 20 for simplicity. + repeated SetupPriorityLevel setup_priority_levels = 3 + [(validate.rules).repeated = {min_items: 1, max_items: 20}]; + + // This number is used to instantiate the prng. The prng takes the place of random() calls, allowing a representative random distribution + // which is also deterministic. + uint64 seed_for_prng = 4 [(validate.rules).uint64.gt = 0]; +} diff --git a/test/common/upstream/load_balancer_fuzz_base.cc b/test/common/upstream/load_balancer_fuzz_base.cc new file mode 100644 index 000000000000..6741f95f7581 --- /dev/null +++ b/test/common/upstream/load_balancer_fuzz_base.cc @@ -0,0 +1,229 @@ +#include "test/common/upstream/load_balancer_fuzz_base.h" + +#include "test/common/upstream/utility.h" + +namespace Envoy { +namespace Upstream { + +namespace { +// TODO(zasweq): This will be relaxed in the future in order to fully represent the state space +// possible within Load Balancing. In it's current state, it is too slow (particularly due to calls +// to makeTestHost()) to scale up hosts. Once this is made more efficient, this number will be +// increased. +constexpr uint32_t MaxNumHostsPerPriorityLevel = 256; + +} // namespace + +void LoadBalancerFuzzBase::initializeASingleHostSet( + const test::common::upstream::SetupPriorityLevel& setup_priority_level, + const uint8_t priority_level, uint16_t& port) { + const uint32_t num_hosts_in_priority_level = setup_priority_level.num_hosts_in_priority_level(); + ENVOY_LOG_MISC(trace, "Will attempt to initialize host set at priority level {} with {} hosts.", + priority_level, num_hosts_in_priority_level); + MockHostSet& host_set = *priority_set_.getMockHostSet(priority_level); + uint32_t hosts_made = 0; + // Cap each host set at 256 hosts for efficiency - Leave port clause in for future changes + while (hosts_made < std::min(num_hosts_in_priority_level, MaxNumHostsPerPriorityLevel) && + port < 65535) { + host_set.hosts_.push_back(makeTestHost(info_, "tcp://127.0.0.1:" + std::to_string(port))); + ++port; + ++hosts_made; + } + + Fuzz::ProperSubsetSelector subset_selector(setup_priority_level.random_bytestring()); + + const std::vector> localities = subset_selector.constructSubsets( + {setup_priority_level.num_hosts_locality_a(), setup_priority_level.num_hosts_locality_b(), + setup_priority_level.num_hosts_locality_c()}, + host_set.hosts_.size()); + + HostVector locality_a = {}; + HostVector locality_b = {}; + HostVector locality_c = {}; + // Used to index into correct locality in iteration through subsets + std::array locality_indexes = {locality_a, locality_b, locality_c}; + + for (uint8_t locality = 0; locality < locality_indexes.size(); locality++) { + for (uint8_t index : localities[locality]) { + locality_indexes[locality].push_back(host_set.hosts_[index]); + locality_indexes_[index] = locality; + } + ENVOY_LOG_MISC(trace, "Added these hosts to locality {}: {}", locality + 1, + absl::StrJoin(localities.at(locality), " ")); + } + + host_set.hosts_per_locality_ = makeHostsPerLocality({locality_a, locality_b, locality_c}); +} + +// Initializes random and fixed host sets +void LoadBalancerFuzzBase::initializeLbComponents( + const test::common::upstream::LoadBalancerTestCase& input) { + random_.initializeSeed(input.seed_for_prng()); + uint16_t port = 80; + for (uint8_t priority_of_host_set = 0; + priority_of_host_set < input.setup_priority_levels().size(); ++priority_of_host_set) { + initializeASingleHostSet(input.setup_priority_levels().at(priority_of_host_set), + priority_of_host_set, port); + } + num_priority_levels_ = input.setup_priority_levels().size(); +} + +// Updating host sets is shared amongst all the load balancer tests. Since logically, we're just +// setting the mock priority set to have certain values, and all load balancers interface with host +// sets and their health statuses, this action maps to all load balancers. +void LoadBalancerFuzzBase::updateHealthFlagsForAHostSet(const uint64_t host_priority, + const uint32_t num_healthy_hosts, + const uint32_t num_degraded_hosts, + const uint32_t num_excluded_hosts, + const std::string random_bytestring) { + const uint8_t priority_of_host_set = host_priority % num_priority_levels_; + ENVOY_LOG_MISC(trace, "Updating health flags for host set at priority: {}", priority_of_host_set); + MockHostSet& host_set = *priority_set_.getMockHostSet(priority_of_host_set); + // Remove health flags from hosts with health flags set - previous iterations of this function + // with hosts placed degraded and excluded buckets + for (auto& host : host_set.degraded_hosts_) { + host->healthFlagClear(Host::HealthFlag::DEGRADED_ACTIVE_HC); + } + for (auto& host : host_set.excluded_hosts_) { + host->healthFlagClear(Host::HealthFlag::FAILED_ACTIVE_HC); + } + // This downcast will not overflow because size is capped by port numbers + const uint32_t host_set_size = host_set.hosts_.size(); + host_set.healthy_hosts_.clear(); + host_set.degraded_hosts_.clear(); + host_set.excluded_hosts_.clear(); + + enum HealthStatus { + HEALTHY = 0, + DEGRADED = 1, + EXCLUDED = 2, + }; + + Fuzz::ProperSubsetSelector subset_selector(random_bytestring); + + const std::vector> subsets = subset_selector.constructSubsets( + {num_healthy_hosts, num_degraded_hosts, num_excluded_hosts}, host_set_size); + + // Healthy hosts are first subset + for (uint8_t index : subsets.at(HealthStatus::HEALTHY)) { + host_set.healthy_hosts_.push_back(host_set.hosts_[index]); + // No health flags for healthy + } + ENVOY_LOG_MISC(trace, "Hosts made healthy at priority level {}: {}", priority_of_host_set, + absl::StrJoin(subsets.at(HealthStatus::HEALTHY), " ")); + + // Degraded hosts are second subset + for (uint8_t index : subsets.at(HealthStatus::DEGRADED)) { + host_set.degraded_hosts_.push_back(host_set.hosts_[index]); + // Health flags are not currently directly used by most load balancers, but + // they may be added and also are used by other components. + // There are two health flags that map to Host::Health::Degraded, DEGRADED_ACTIVE_HC and + // DEGRADED_EDS_HEALTH. Choose one hardcoded for simplicity. + host_set.hosts_[index]->healthFlagSet(Host::HealthFlag::DEGRADED_ACTIVE_HC); + } + ENVOY_LOG_MISC(trace, "Hosts made degraded at priority level {}: {}", priority_of_host_set, + absl::StrJoin(subsets.at(HealthStatus::DEGRADED), " ")); + + // Excluded hosts are third subset + for (uint8_t index : subsets.at(HealthStatus::EXCLUDED)) { + host_set.excluded_hosts_.push_back(host_set.hosts_[index]); + // Health flags are not currently directly used by most load balancers, but + // they may be added and also are used by other components. + // There are three health flags that map to Host::Health::Degraded, FAILED_ACTIVE_HC, + // FAILED_OUTLIER_CHECK, and FAILED_EDS_HEALTH. Choose one hardcoded for simplicity. + host_set.hosts_[index]->healthFlagSet(Host::HealthFlag::FAILED_ACTIVE_HC); + } + ENVOY_LOG_MISC(trace, "Hosts made excluded at priority level {}: {}", priority_of_host_set, + absl::StrJoin(subsets.at(HealthStatus::EXCLUDED), " ")); + + // Handle updating health flags for hosts_per_locality_ + enum Locality { + A = 0, + B = 1, + C = 2, + }; + + // The index within the array of the vector represents the locality + std::array healthy_hosts_per_locality; + std::array degraded_hosts_per_locality; + std::array excluded_hosts_per_locality; + + // Wrap those three in an array here, where the index represents health status of + // healthy/degraded/excluded, used for indexing during iteration through subsets + std::array, 3> locality_health_statuses = { + healthy_hosts_per_locality, degraded_hosts_per_locality, excluded_hosts_per_locality}; + + // Iterate through subsets + for (uint8_t health_status = 0; health_status < locality_health_statuses.size(); + health_status++) { + for (uint8_t index : subsets.at(health_status)) { // Each subset logically represents a health + // status + // If the host is in a locality, we have to update the corresponding health status host vector + if (!(locality_indexes_.find(index) == locality_indexes_.end())) { + // After computing the host index subsets, we want to propagate these changes to a host set + // by building and using these host vectors + uint8_t locality = locality_indexes_[index]; + locality_health_statuses[health_status][locality].push_back(host_set.hosts_[index]); + ENVOY_LOG_MISC(trace, "Added host at index {} in locality {} to health status {}", index, + locality_indexes_[index] + 1, health_status + 1); + } + } + } + + // This overrides what is currently present in the host set, thus not having to explicitly call + // vector.clear() + host_set.healthy_hosts_per_locality_ = makeHostsPerLocality( + {healthy_hosts_per_locality[Locality::A], healthy_hosts_per_locality[Locality::B], + healthy_hosts_per_locality[Locality::C]}); + host_set.degraded_hosts_per_locality_ = makeHostsPerLocality( + {degraded_hosts_per_locality[Locality::A], degraded_hosts_per_locality[Locality::B], + degraded_hosts_per_locality[Locality::C]}); + host_set.excluded_hosts_per_locality_ = makeHostsPerLocality( + {excluded_hosts_per_locality[Locality::A], excluded_hosts_per_locality[Locality::B], + excluded_hosts_per_locality[Locality::C]}); + + // These callbacks update load balancing data structures (callbacks are piped into priority set + // in LoadBalancerBase constructor) This won't have any hosts added or removed, as untrusted + // upstreams cannot do that. + host_set.runCallbacks({}, {}); +} + +void LoadBalancerFuzzBase::prefetch() { + // TODO: context, could generate it in proto action + lb_->peekAnotherHost(nullptr); +} + +void LoadBalancerFuzzBase::chooseHost() { + // TODO: context, could generate it in proto action + lb_->chooseHost(nullptr); +} + +void LoadBalancerFuzzBase::replay( + const Protobuf::RepeatedPtrField& actions) { + constexpr auto max_actions = 64; + for (int i = 0; i < std::min(max_actions, actions.size()); ++i) { + const auto& event = actions.at(i); + ENVOY_LOG_MISC(trace, "Action: {}", event.DebugString()); + switch (event.action_selector_case()) { + case test::common::upstream::LbAction::kUpdateHealthFlags: { + updateHealthFlagsForAHostSet(event.update_health_flags().host_priority(), + event.update_health_flags().num_healthy_hosts(), + event.update_health_flags().num_degraded_hosts(), + event.update_health_flags().num_excluded_hosts(), + event.update_health_flags().random_bytestring()); + break; + } + case test::common::upstream::LbAction::kPrefetch: + prefetch(); + break; + case test::common::upstream::LbAction::kChooseHost: + chooseHost(); + break; + default: + break; + } + } +} + +} // namespace Upstream +} // namespace Envoy diff --git a/test/common/upstream/load_balancer_fuzz_base.h b/test/common/upstream/load_balancer_fuzz_base.h new file mode 100644 index 000000000000..deeb4c82c216 --- /dev/null +++ b/test/common/upstream/load_balancer_fuzz_base.h @@ -0,0 +1,67 @@ +#include "envoy/config/cluster/v3/cluster.pb.h" + +#include "common/upstream/load_balancer_impl.h" + +#include "test/common/upstream/load_balancer_fuzz.pb.validate.h" +#include "test/fuzz/random.h" +#include "test/mocks/common.h" +#include "test/mocks/runtime/mocks.h" +#include "test/mocks/upstream/cluster_info.h" +#include "test/mocks/upstream/host_set.h" +#include "test/mocks/upstream/load_balancer_context.h" +#include "test/mocks/upstream/priority_set.h" + +namespace Envoy { +namespace Upstream { + +// This class implements replay logic, and also handles the initial setup of static host sets and +// the subsequent updates to those sets. +class LoadBalancerFuzzBase { +public: + LoadBalancerFuzzBase() : stats_(ClusterInfoImpl::generateStats(stats_store_)){}; + + // Initializes load balancer components shared amongst every load balancer, random_, and + // priority_set_ + void initializeLbComponents(const test::common::upstream::LoadBalancerTestCase& input); + void updateHealthFlagsForAHostSet(const uint64_t host_priority, const uint32_t num_healthy_hosts, + const uint32_t num_degraded_hosts, + const uint32_t num_excluded_hosts, + const std::string random_bytestring); + // These two actions have a lot of logic attached to them. However, all the logic that the load + // balancer needs to run its algorithm is already encapsulated within the load balancer. Thus, + // once the load balancer is constructed, all this class has to do is call lb_->peekAnotherHost() + // and lb_->chooseHost(). + void prefetch(); + void chooseHost(); + ~LoadBalancerFuzzBase() = default; + void replay(const Protobuf::RepeatedPtrField& actions); + + // These public objects shared amongst all types of load balancers will be used to construct load + // balancers in specific load balancer fuzz classes + Stats::IsolatedStoreImpl stats_store_; + ClusterStats stats_; + NiceMock runtime_; + Random::PsuedoRandomGenerator64 random_; + NiceMock priority_set_; + std::shared_ptr info_{new NiceMock()}; + std::unique_ptr lb_; + +private: + // Untrusted upstreams don't have the ability to change the host set size, so keep it constant + // over the fuzz iteration. + void + initializeASingleHostSet(const test::common::upstream::SetupPriorityLevel& setup_priority_level, + const uint8_t priority_level, uint16_t& port); + + // This is used to choose a host set to update the flags in an update flags event by modding a + // random uint64 against this number. + uint8_t num_priority_levels_ = 0; + + // This map used when updating health flags - making sure the health flags are updated hosts in + // localities Key - index of host within full host list, value - locality level host at index is + // in + absl::node_hash_map locality_indexes_; +}; + +} // namespace Upstream +} // namespace Envoy diff --git a/test/common/upstream/random_load_balancer_corpus/random_256_ports b/test/common/upstream/random_load_balancer_corpus/random_256_ports new file mode 100644 index 000000000000..1924462a2ee7 --- /dev/null +++ b/test/common/upstream/random_load_balancer_corpus/random_256_ports @@ -0,0 +1,41 @@ +load_balancer_test_case { +common_lb_config { + +} +actions { + update_health_flags { + host_priority: 0 + num_healthy_hosts: 256 + random_bytestring: "\x01\x02\x03\x04\x45\x80" + } +} +actions { + prefetch { + + } +} +actions { + prefetch { + + } +} +actions { + choose_host { + + } +} +actions { + choose_host { + + } +} +setup_priority_levels { + num_hosts_in_priority_level: 256 + random_bytestring: "\x01\x02" +} +setup_priority_levels { + num_hosts_in_priority_level: 256 + random_bytestring: "\x01\x02" +} +seed_for_prng: 4 +} diff --git a/test/common/upstream/random_load_balancer_corpus/random_NoHosts b/test/common/upstream/random_load_balancer_corpus/random_NoHosts new file mode 100644 index 000000000000..551225e908e3 --- /dev/null +++ b/test/common/upstream/random_load_balancer_corpus/random_NoHosts @@ -0,0 +1,24 @@ +load_balancer_test_case { +common_lb_config { + +} +actions { + prefetch { + + } +} +actions { + choose_host { + + } +} +setup_priority_levels { + num_hosts_in_priority_level: 0 + random_bytestring: "\x01\x02" +} +setup_priority_levels { + num_hosts_in_priority_level: 0 + random_bytestring: "\x01\x02" +} +seed_for_prng: 2 +} diff --git a/test/common/upstream/random_load_balancer_corpus/random_Normal b/test/common/upstream/random_load_balancer_corpus/random_Normal new file mode 100644 index 000000000000..61bb66f8638c --- /dev/null +++ b/test/common/upstream/random_load_balancer_corpus/random_Normal @@ -0,0 +1,41 @@ +load_balancer_test_case { +common_lb_config { + +} +actions { + update_health_flags { + host_priority: 0 + num_healthy_hosts: 2 + random_bytestring: "\x01\x02" + } +} +actions { + prefetch { + + } +} +actions { + prefetch { + + } +} +actions { + choose_host { + + } +} +actions { + choose_host { + + } +} +setup_priority_levels { + num_hosts_in_priority_level: 2 + random_bytestring: "\x01\x02" +} +setup_priority_levels { + num_hosts_in_priority_level: 0 + random_bytestring: "\x01\x02" +} +seed_for_prng: 1 +} diff --git a/test/common/upstream/random_load_balancer_corpus/random_crash-ba5efdfd9c412a8507087120783fe6529b1ac0cb b/test/common/upstream/random_load_balancer_corpus/random_crash-ba5efdfd9c412a8507087120783fe6529b1ac0cb new file mode 100644 index 000000000000..602a393132bf --- /dev/null +++ b/test/common/upstream/random_load_balancer_corpus/random_crash-ba5efdfd9c412a8507087120783fe6529b1ac0cb @@ -0,0 +1,38 @@ +load_balancer_test_case { +common_lb_config { + healthy_panic_threshold { + value: 2.12199579096527e-314 + } + locality_weighted_lb_config { + } +} +actions { + choose_host { + } +} +actions { + prefetch { + } +} +actions { + prefetch { + } +} +actions { + choose_host { + } +} +actions { + choose_host { + } +} +setup_priority_levels { + num_hosts_in_priority_level: 2 + random_bytestring: "\x01\x02" +} +setup_priority_levels { + num_hosts_in_priority_level: 9007199259945536 + random_bytestring: "\x01\x02" +} +seed_for_prng: 6 +} diff --git a/test/common/upstream/random_load_balancer_corpus/random_largest-port-value b/test/common/upstream/random_load_balancer_corpus/random_largest-port-value new file mode 100644 index 000000000000..2f95ce787ef1 --- /dev/null +++ b/test/common/upstream/random_load_balancer_corpus/random_largest-port-value @@ -0,0 +1,34 @@ +load_balancer_test_case { +common_lb_config { + +} +actions { + choose_host { + } +} +actions { + prefetch { + } +} +actions { + prefetch { + } +} +actions { + choose_host { + } +} +actions { + choose_host { + } +} +nsetup_priority_levels { + num_hosts_in_priority_level: 65455 + random_bytestring: "\x01\x02" +} +setup_priority_levels { + num_hosts_in_priority_level: 65455 + random_bytestring: "\x01\x02" +} +seed_for_prng: 5 +} diff --git a/test/common/upstream/random_load_balancer_corpus/random_many_choose_hosts b/test/common/upstream/random_load_balancer_corpus/random_many_choose_hosts new file mode 100644 index 000000000000..b263d07ec40e --- /dev/null +++ b/test/common/upstream/random_load_balancer_corpus/random_many_choose_hosts @@ -0,0 +1,61 @@ +load_balancer_test_case { +common_lb_config { + +} +actions { + update_health_flags { + host_priority: 0 + num_healthy_hosts: 2 + random_bytestring: "\x01\x02\x03\x04" + } +} +actions { + prefetch { + + } +} +actions { + prefetch { + + } +} +actions { + choose_host { + + } +} +actions { + choose_host { + + } +} +actions { + choose_host { + + } +} +actions { + choose_host { + + } +} +actions { + choose_host { + + } +} +actions { + choose_host { + + } +} +setup_priority_levels { + num_hosts_in_priority_level: 2 + random_bytestring: "\x01\x02" +} +setup_priority_levels { + num_hosts_in_priority_level: 0 + random_bytestring: "\x01\x02" +} +seed_for_prng: 1 +} diff --git a/test/common/upstream/random_load_balancer_corpus/random_max_ports b/test/common/upstream/random_load_balancer_corpus/random_max_ports new file mode 100644 index 000000000000..4a7406d8b765 --- /dev/null +++ b/test/common/upstream/random_load_balancer_corpus/random_max_ports @@ -0,0 +1,41 @@ +load_balancer_test_case { +common_lb_config { + +} +actions { + update_health_flags { + host_priority: 0 + num_healthy_hosts: 2 + random_bytestring: "\x01\x02\x03\x04" + } +} +actions { + prefetch { + + } +} +actions { + prefetch { + + } +} +actions { + choose_host { + + } +} +actions { + choose_host { + + } +} +setup_priority_levels { + num_hosts_in_priority_level: 32726 + random_bytestring: "\x01\x02" +} +setup_priority_levels { + num_hosts_in_priority_level: 32726 + random_bytestring: "\x01\x02" +} +seed_for_prng: 88 +} diff --git a/test/common/upstream/random_load_balancer_corpus/random_overflowing_ports b/test/common/upstream/random_load_balancer_corpus/random_overflowing_ports new file mode 100644 index 000000000000..4598c29dbe10 --- /dev/null +++ b/test/common/upstream/random_load_balancer_corpus/random_overflowing_ports @@ -0,0 +1,41 @@ +load_balancer_test_case { +common_lb_config { + +} +actions { + update_health_flags { + host_priority: 0 + num_healthy_hosts: 2 + random_bytestring: "\x01\x02" + } +} +actions { + prefetch { + + } +} +actions { + prefetch { + + } +} +actions { + choose_host { + + } +} +actions { + choose_host { + + } +} +setup_priority_levels { + num_hosts_in_priority_level: 60000 + random_bytestring: "\x01\x02" +} +setup_priority_levels { + num_hosts_in_priority_level: 60000 + random_bytestring: "\x01\x02" +} +seed_for_prng: 4 +} diff --git a/test/common/upstream/random_load_balancer_corpus/random_slow-unit-eed4596101efb3e737f736c8d5bcd4f0815a8728 b/test/common/upstream/random_load_balancer_corpus/random_slow-unit-eed4596101efb3e737f736c8d5bcd4f0815a8728 new file mode 100644 index 000000000000..7bebf1a2cf96 --- /dev/null +++ b/test/common/upstream/random_load_balancer_corpus/random_slow-unit-eed4596101efb3e737f736c8d5bcd4f0815a8728 @@ -0,0 +1,40 @@ +load_balancer_test_case { + common_lb_config { + update_merge_window { + nanos: 512 + } + } + actions { + update_health_flags { + num_healthy_hosts: 2 + random_bytestring: "\001\002\003\004" + } + } + actions { + prefetch { + } + } + actions { + prefetch { + } + } + actions { + choose_host { + } + } + actions { + update_health_flags { + num_healthy_hosts: 2 + random_bytestring: "\001\002\003\004" + } + } + setup_priority_levels { + num_hosts_in_priority_level: 536903638 + random_bytestring: "\001\002" + } + setup_priority_levels { + num_hosts_in_priority_level: 32726 + random_bytestring: "\001\002" + } + seed_for_prng: 88 +} diff --git a/test/common/upstream/random_load_balancer_corpus/random_slow-unit-test b/test/common/upstream/random_load_balancer_corpus/random_slow-unit-test new file mode 100644 index 000000000000..e1f2fcfdd303 --- /dev/null +++ b/test/common/upstream/random_load_balancer_corpus/random_slow-unit-test @@ -0,0 +1,46 @@ +load_balancer_test_case { + common_lb_config { + update_merge_window { + nanos: 512 + } + } + actions { + update_health_flags { + num_healthy_hosts: 2 + random_bytestring: "\001\002\003\004" + } + } + actions { + prefetch { + } + } + actions { + prefetch { + } + } + actions { + choose_host { + } + } + actions { + update_health_flags { + num_healthy_hosts: 2 + random_bytestring: "\001\002\003\004" + } + } + setup_priority_levels { + num_hosts_in_priority_level: 500 + num_hosts_locality_one: 50 + num_hosts_locality_two: 50 + num_hosts_locality_three: 50 + random_bytestring: "\001\002" + } + setup_priority_levels { + num_hosts_in_priority_level: 500 + num_hosts_locality_one: 50 + num_hosts_locality_two: 50 + num_hosts_locality_three: 50 + random_bytestring: "\001\002" + } + seed_for_prng: 88 +} diff --git a/test/common/upstream/random_load_balancer_corpus/random_test_something b/test/common/upstream/random_load_balancer_corpus/random_test_something new file mode 100644 index 000000000000..172e2cb1c051 --- /dev/null +++ b/test/common/upstream/random_load_balancer_corpus/random_test_something @@ -0,0 +1,41 @@ +load_balancer_test_case { +common_lb_config { + +} +actions { + update_health_flags { + host_priority: 0 + num_healthy_hosts: 2 + random_bytestring: "\x01\x02" + } +} +actions { + prefetch { + + } +} +actions { + prefetch { + + } +} +actions { + choose_host { + + } +} +actions { + choose_host { + + } +} +setup_priority_levels { + num_hosts_in_priority_level: 250 + random_bytestring: "\x01\x02" +} +setup_priority_levels { + num_hosts_in_priority_level: 250 + random_bytestring: "\x01\x02" +} +seed_for_prng: 4 +} diff --git a/test/common/upstream/random_load_balancer_corpus/random_timeout-6b0d6b83136a4cf0b9ccd468f11207a792859d43 b/test/common/upstream/random_load_balancer_corpus/random_timeout-6b0d6b83136a4cf0b9ccd468f11207a792859d43 new file mode 100644 index 000000000000..96f3d0efd72b --- /dev/null +++ b/test/common/upstream/random_load_balancer_corpus/random_timeout-6b0d6b83136a4cf0b9ccd468f11207a792859d43 @@ -0,0 +1,31 @@ +load_balancer_test_case { + common_lb_config { + healthy_panic_threshold { + value: 1.35807730621777e-312 + } + zone_aware_lb_config { + routing_enabled { + } + min_cluster_size { + value: 7380836839843720192 + } + fail_traffic_on_panic: true + } + consistent_hashing_lb_config { + hash_balance_factor { + value: 27656 + } + } + } + actions { + update_health_flags { + num_excluded_hosts: 268435456 + random_bytestring: "\x01\x02" + } + } + setup_priority_levels { + num_hosts_in_priority_level: 13534154135 + random_bytestring: "\x01\x02" + } + seed_for_prng: 32 +} diff --git a/test/common/upstream/random_load_balancer_corpus/random_timeout-9144cfbb40b5101ecc28b205b10e6c36a72aae83 b/test/common/upstream/random_load_balancer_corpus/random_timeout-9144cfbb40b5101ecc28b205b10e6c36a72aae83 new file mode 100644 index 000000000000..2fca35ed475d --- /dev/null +++ b/test/common/upstream/random_load_balancer_corpus/random_timeout-9144cfbb40b5101ecc28b205b10e6c36a72aae83 @@ -0,0 +1,25 @@ +load_balancer_test_case { + common_lb_config { + healthy_panic_threshold { + value: 4.88907830238399e-311 + } + consistent_hashing_lb_config { + use_hostname_for_hashing: true + hash_balance_factor { + value: 1024 + } + } + } + actions { + update_health_flags { + host_priority: 270582939648 + num_degraded_hosts: 4194304 + random_bytestring: "\x01\x02\x03\x04" + } + } + setup_priority_levels { + num_hosts_in_priority_level: 1024 + random_bytestring: "\x01\x02" + } + seed_for_prng: 62208 +} diff --git a/test/common/upstream/random_load_balancer_corpus/random_with-locality b/test/common/upstream/random_load_balancer_corpus/random_with-locality new file mode 100644 index 000000000000..3f2f1281845f --- /dev/null +++ b/test/common/upstream/random_load_balancer_corpus/random_with-locality @@ -0,0 +1,49 @@ +load_balancer_test_case { +common_lb_config { + +} +actions { + update_health_flags { + host_priority: 0 + num_healthy_hosts: 2 + num_degraded_hosts: 3 + num_excluded_hosts: 4 + random_bytestring: "\x01\x02\x03\x04\x05\x06" + } +} +actions { + prefetch { + + } +} +actions { + prefetch { + + } +} +actions { + choose_host { + + } +} +actions { + choose_host { + + } +} +setup_priority_levels { + num_hosts_in_priority_level: 20 + num_hosts_locality_a: 3 + num_hosts_locality_b: 4 + num_hosts_locality_c: 5 + random_bytestring: "\x01\x02" +} +setup_priority_levels { + num_hosts_in_priority_level: 20 + num_hosts_locality_a: 3 + num_hosts_locality_b: 4 + num_hosts_locality_c: 5 + random_bytestring: "\x01\x02" +} +seed_for_prng: 1 +} diff --git a/test/common/upstream/random_load_balancer_fuzz.proto b/test/common/upstream/random_load_balancer_fuzz.proto new file mode 100644 index 000000000000..ba277976d0fe --- /dev/null +++ b/test/common/upstream/random_load_balancer_fuzz.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; + +package test.common.upstream; + +import "validate/validate.proto"; +import "test/common/upstream/load_balancer_fuzz.proto"; + +//This has no specific logic needed for initialization +message RandomLoadBalancerTestCase { + LoadBalancerTestCase load_balancer_test_case = 1 [(validate.rules).message.required = true]; +} diff --git a/test/common/upstream/random_load_balancer_fuzz_test.cc b/test/common/upstream/random_load_balancer_fuzz_test.cc new file mode 100644 index 000000000000..6d5700654989 --- /dev/null +++ b/test/common/upstream/random_load_balancer_fuzz_test.cc @@ -0,0 +1,36 @@ +#include + +#include "test/common/upstream/load_balancer_fuzz_base.h" +#include "test/common/upstream/random_load_balancer_fuzz.pb.validate.h" +#include "test/fuzz/fuzz_runner.h" +#include "test/test_common/utility.h" + +namespace Envoy { +namespace Upstream { + +DEFINE_PROTO_FUZZER(const test::common::upstream::RandomLoadBalancerTestCase& input) { + try { + TestUtility::validate(input); + } catch (const ProtoValidationException& e) { + ENVOY_LOG_MISC(debug, "ProtoValidationException: {}", e.what()); + return; + } + + LoadBalancerFuzzBase load_balancer_fuzz = LoadBalancerFuzzBase(); + load_balancer_fuzz.initializeLbComponents(input.load_balancer_test_case()); + + try { + load_balancer_fuzz.lb_ = std::make_unique( + load_balancer_fuzz.priority_set_, nullptr, load_balancer_fuzz.stats_, + load_balancer_fuzz.runtime_, load_balancer_fuzz.random_, + input.load_balancer_test_case().common_lb_config()); + } catch (EnvoyException& e) { + ENVOY_LOG_MISC(debug, "EnvoyException; {}", e.what()); + return; + } + + load_balancer_fuzz.replay(input.load_balancer_test_case().actions()); +} + +} // namespace Upstream +} // namespace Envoy diff --git a/test/common/upstream/transport_socket_matcher_test.cc b/test/common/upstream/transport_socket_matcher_test.cc index f770d1f4fdd8..cfde130d1d1f 100644 --- a/test/common/upstream/transport_socket_matcher_test.cc +++ b/test/common/upstream/transport_socket_matcher_test.cc @@ -33,7 +33,6 @@ class FakeTransportSocketFactory : public Network::TransportSocketFactory { MOCK_METHOD(bool, implementsSecureTransport, (), (const)); MOCK_METHOD(Network::TransportSocketPtr, createTransportSocket, (Network::TransportSocketOptionsSharedPtr), (const)); - MOCK_METHOD(void, addReadyCb, (std::function)); FakeTransportSocketFactory(std::string id) : id_(std::move(id)) {} std::string id() const { return id_; } @@ -49,7 +48,6 @@ class FooTransportSocketFactory MOCK_METHOD(bool, implementsSecureTransport, (), (const)); MOCK_METHOD(Network::TransportSocketPtr, createTransportSocket, (Network::TransportSocketOptionsSharedPtr), (const)); - MOCK_METHOD(void, addReadyCb, (std::function)); Network::TransportSocketFactoryPtr createTransportSocketFactory(const Protobuf::Message& proto, diff --git a/test/extensions/watchdog/abort_action/BUILD b/test/common/watchdog/BUILD similarity index 51% rename from test/extensions/watchdog/abort_action/BUILD rename to test/common/watchdog/BUILD index b79b1205e86e..85edbe118222 100644 --- a/test/extensions/watchdog/abort_action/BUILD +++ b/test/common/watchdog/BUILD @@ -1,50 +1,43 @@ load( "//bazel:envoy_build_system.bzl", + "envoy_cc_test", "envoy_package", ) -load( - "//test/extensions:extensions_build_system.bzl", - "envoy_extension_cc_test", -) licenses(["notice"]) # Apache 2 envoy_package() -envoy_extension_cc_test( +envoy_cc_test( name = "abort_action_test", srcs = ["abort_action_test.cc"], - extension_name = "envoy.watchdog.abort_action", external_deps = [ "abseil_synchronization", ], - tags = ["skip_on_windows"], deps = [ "//include/envoy/common:time_interface", "//include/envoy/registry", "//include/envoy/server:guarddog_config_interface", - "//source/extensions/watchdog/abort_action:abort_action_lib", - "//source/extensions/watchdog/abort_action:config", + "//source/common/watchdog:abort_action_config", + "//source/common/watchdog:abort_action_lib", "//test/common/stats:stat_test_utility_lib", "//test/test_common:utility_lib", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", - "@envoy_api//envoy/extensions/watchdog/abort_action/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/watchdog/v3alpha:pkg_cc_proto", ], ) -envoy_extension_cc_test( - name = "config_test", - srcs = ["config_test.cc"], - extension_name = "envoy.watchdog.abort_action", - tags = ["skip_on_windows"], +envoy_cc_test( + name = "abort_action_config_test", + srcs = ["abort_action_config_test.cc"], deps = [ "//include/envoy/registry", "//include/envoy/server:guarddog_config_interface", - "//source/extensions/watchdog/abort_action:abort_action_lib", - "//source/extensions/watchdog/abort_action:config", + "//source/common/watchdog:abort_action_config", + "//source/common/watchdog:abort_action_lib", "//test/common/stats:stat_test_utility_lib", "//test/mocks/event:event_mocks", "//test/test_common:utility_lib", - "@envoy_api//envoy/extensions/watchdog/abort_action/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/watchdog/v3alpha:pkg_cc_proto", ], ) diff --git a/test/extensions/watchdog/abort_action/config_test.cc b/test/common/watchdog/abort_action_config_test.cc similarity index 79% rename from test/extensions/watchdog/abort_action/config_test.cc rename to test/common/watchdog/abort_action_config_test.cc index 7e2b716ac8f3..6c3f729fe1a4 100644 --- a/test/extensions/watchdog/abort_action/config_test.cc +++ b/test/common/watchdog/abort_action_config_test.cc @@ -1,8 +1,8 @@ -#include "envoy/extensions/watchdog/abort_action/v3alpha/abort_action.pb.h" #include "envoy/registry/registry.h" #include "envoy/server/guarddog_config.h" +#include "envoy/watchdog/v3alpha/abort_action.pb.h" -#include "extensions/watchdog/abort_action/config.h" +#include "common/watchdog/abort_action_config.h" #include "test/common/stats/stat_test_utility.h" #include "test/mocks/event/mocks.h" @@ -11,9 +11,7 @@ #include "gtest/gtest.h" namespace Envoy { -namespace Extensions { namespace Watchdog { -namespace AbortAction { namespace { TEST(AbortActionFactoryTest, CanCreateAction) { @@ -31,7 +29,7 @@ TEST(AbortActionFactoryTest, CanCreateAction) { "name": "envoy.watchdog.abort_action", "typed_config": { "@type": "type.googleapis.com/udpa.type.v1.TypedStruct", - "type_url": "type.googleapis.com/envoy.extensions.watchdog.abort_action.v3alpha.AbortActionConfig", + "type_url": "type.googleapis.com/envoy.watchdog.abort_action.v3alpha.AbortActionConfig", "value": { "wait_duration": "2s", } @@ -50,7 +48,5 @@ TEST(AbortActionFactoryTest, CanCreateAction) { } } // namespace -} // namespace AbortAction } // namespace Watchdog -} // namespace Extensions } // namespace Envoy diff --git a/test/extensions/watchdog/abort_action/abort_action_test.cc b/test/common/watchdog/abort_action_test.cc similarity index 90% rename from test/extensions/watchdog/abort_action/abort_action_test.cc rename to test/common/watchdog/abort_action_test.cc index e648fbe66c60..7f2f1bf4606e 100644 --- a/test/extensions/watchdog/abort_action/abort_action_test.cc +++ b/test/common/watchdog/abort_action_test.cc @@ -4,12 +4,12 @@ #include "envoy/common/time.h" #include "envoy/config/bootstrap/v3/bootstrap.pb.h" #include "envoy/event/dispatcher.h" -#include "envoy/extensions/watchdog/abort_action/v3alpha/abort_action.pb.h" #include "envoy/server/guarddog_config.h" #include "envoy/thread/thread.h" +#include "envoy/watchdog/v3alpha/abort_action.pb.h" -#include "extensions/watchdog/abort_action/abort_action.h" -#include "extensions/watchdog/abort_action/config.h" +#include "common/watchdog/abort_action.h" +#include "common/watchdog/abort_action_config.h" #include "test/common/stats/stat_test_utility.h" #include "test/test_common/utility.h" @@ -18,12 +18,10 @@ #include "gtest/gtest.h" namespace Envoy { -namespace Extensions { namespace Watchdog { -namespace AbortAction { namespace { -using AbortActionConfig = envoy::extensions::watchdog::abort_action::v3alpha::AbortActionConfig; +using AbortActionConfig = envoy::watchdog::v3alpha::AbortActionConfig; class AbortActionTest : public testing::Test { protected: @@ -54,8 +52,7 @@ TEST_F(AbortActionTest, ShouldNotAbortIfNoTids) { action_->run(envoy::config::bootstrap::v3::Watchdog::WatchdogAction::KILL, tid_ltt_pairs, now); } -// insufficient signal support on Windows. -TEST_F(AbortActionTest, CanKillThread) { +TEST_F(AbortActionTest, ShouldKillTheProcess) { AbortActionConfig config; config.mutable_wait_duration()->set_seconds(1); action_ = std::make_unique(config, context_); @@ -83,6 +80,8 @@ TEST_F(AbortActionTest, CanKillThread) { EXPECT_DEATH(die_function(), ""); } +#ifndef WIN32 +// insufficient signal support on Windows. void handler(int sig, siginfo_t* /*siginfo*/, void* /*context*/) { std::cout << "Eating signal :" << std::to_string(sig) << ". will ignore it." << std::endl; signal(SIGABRT, SIG_IGN); @@ -122,9 +121,8 @@ TEST_F(AbortActionTest, PanicsIfThreadDoesNotDie) { EXPECT_DEATH(die_function(), "aborting from Watchdog AbortAction instead"); } +#endif } // namespace -} // namespace AbortAction } // namespace Watchdog -} // namespace Extensions } // namespace Envoy diff --git a/test/extensions/common/tap/common.cc b/test/extensions/common/tap/common.cc index 8471de4d7686..219910483166 100644 --- a/test/extensions/common/tap/common.cc +++ b/test/extensions/common/tap/common.cc @@ -1,5 +1,7 @@ #include "test/extensions/common/tap/common.h" +#include + #include "envoy/data/tap/v3/wrapper.pb.h" namespace envoy { @@ -21,6 +23,37 @@ namespace Extensions { namespace Common { namespace Tap { +std::vector readTracesFromPath(const std::string& path_prefix) { + // Find the written .pb file and verify it. + auto files = TestUtility::listFiles(path_prefix, false); + auto pb_file_name = std::find_if(files.begin(), files.end(), [](const std::string& s) { + return absl::EndsWith(s, MessageUtil::FileExtensions::get().ProtoBinaryLengthDelimited); + }); + EXPECT_NE(pb_file_name, files.end()); + return readTracesFromFile(*pb_file_name); +} + +std::vector readTracesFromFile(const std::string& file) { + std::vector traces; + std::ifstream pb_file(file, std::ios_base::binary); + Protobuf::io::IstreamInputStream stream(&pb_file); + Protobuf::io::CodedInputStream coded_stream(&stream); + while (true) { + uint32_t message_size; + if (!coded_stream.ReadVarint32(&message_size)) { + break; + } + + traces.emplace_back(); + + auto limit = coded_stream.PushLimit(message_size); + EXPECT_TRUE(traces.back().ParseFromCodedStream(&coded_stream)); + coded_stream.PopLimit(limit); + } + + return traces; +} + MockPerTapSinkHandleManager::MockPerTapSinkHandleManager() = default; MockPerTapSinkHandleManager::~MockPerTapSinkHandleManager() = default; diff --git a/test/extensions/common/tap/common.h b/test/extensions/common/tap/common.h index 03b0d0b42840..023e61a82444 100644 --- a/test/extensions/common/tap/common.h +++ b/test/extensions/common/tap/common.h @@ -36,6 +36,13 @@ MATCHER_P(TraceEqual, rhs, "") { namespace Common { namespace Tap { +// Reads a PROTO_BINARY_LENGTH_DELIMITED set of messages from a file, found within the specified +// path prefix. +std::vector readTracesFromPath(const std::string& path_prefix); + +// Reads a PROTO_BINARY_LENGTH_DELIMITED set of messages from a file. +std::vector readTracesFromFile(const std::string& file); + class MockPerTapSinkHandleManager : public PerTapSinkHandleManager { public: MockPerTapSinkHandleManager(); diff --git a/test/extensions/common/wasm/wasm_vm_test.cc b/test/extensions/common/wasm/wasm_vm_test.cc index e0775dfb0866..af7816f20d0a 100644 --- a/test/extensions/common/wasm/wasm_vm_test.cc +++ b/test/extensions/common/wasm/wasm_vm_test.cc @@ -144,13 +144,6 @@ TEST_P(WasmVmTest, V8BadCode) { } TEST_P(WasmVmTest, V8Code) { -#ifndef NDEBUG - // Do not execute pre-compilation tests in debug mode because V8 will fail to load because the - // flags do not match. TODO: restore this test when the rust toolchain is integrated. - if (GetParam() == 1) { - return; - } -#endif auto wasm_vm = createWasmVm("envoy.wasm.runtime.v8", scope_); ASSERT_TRUE(wasm_vm != nullptr); EXPECT_TRUE(wasm_vm->runtime() == "v8"); @@ -171,13 +164,6 @@ TEST_P(WasmVmTest, V8Code) { } TEST_P(WasmVmTest, V8BadHostFunctions) { -#ifndef NDEBUG - // Do not execute pre-compilation tests in debug mode because V8 will fail to load because the - // flags do not match. TODO: restore this test when the rust toolchain is integrated. - if (GetParam() == 1) { - return; - } -#endif auto wasm_vm = createWasmVm("envoy.wasm.runtime.v8", scope_); ASSERT_TRUE(wasm_vm != nullptr); @@ -199,13 +185,6 @@ TEST_P(WasmVmTest, V8BadHostFunctions) { } TEST_P(WasmVmTest, V8BadModuleFunctions) { -#ifndef NDEBUG - // Do not execute pre-compilation tests in debug mode because V8 will fail to load because the - // flags do not match. TODO: restore this test when the rust toolchain is integrated. - if (GetParam() == 1) { - return; - } -#endif auto wasm_vm = createWasmVm("envoy.wasm.runtime.v8", scope_); ASSERT_TRUE(wasm_vm != nullptr); @@ -234,13 +213,6 @@ TEST_P(WasmVmTest, V8BadModuleFunctions) { } TEST_P(WasmVmTest, V8FunctionCalls) { -#ifndef NDEBUG - // Do not execute pre-compilation tests in debug mode because V8 will fail to load because the - // flags do not match. TODO: restore this test when the rust toolchain is integrated. - if (GetParam() == 1) { - return; - } -#endif auto wasm_vm = createWasmVm("envoy.wasm.runtime.v8", scope_); ASSERT_TRUE(wasm_vm != nullptr); @@ -279,13 +251,6 @@ TEST_P(WasmVmTest, V8FunctionCalls) { } TEST_P(WasmVmTest, V8Memory) { -#ifndef NDEBUG - // Do not execute pre-compilation tests in debug mode because V8 will fail to load because the - // flags do not match. TODO: restore this test when the rust toolchain is integrated. - if (GetParam() == 1) { - return; - } -#endif auto wasm_vm = createWasmVm("envoy.wasm.runtime.v8", scope_); ASSERT_TRUE(wasm_vm != nullptr); diff --git a/test/extensions/filters/http/admission_control/BUILD b/test/extensions/filters/http/admission_control/BUILD index 4551c349a40f..809a7c66a4e9 100644 --- a/test/extensions/filters/http/admission_control/BUILD +++ b/test/extensions/filters/http/admission_control/BUILD @@ -37,6 +37,7 @@ envoy_extension_cc_test( "//source/common/http:header_map_lib", "//source/common/http:headers_lib", "//source/extensions/filters/http/admission_control:admission_control_filter_lib", + "//source/extensions/filters/http/admission_control:config", "//test/mocks/http:http_mocks", "//test/mocks/server:factory_context_mocks", "//test/mocks/thread_local:thread_local_mocks", diff --git a/test/extensions/filters/http/admission_control/config_test.cc b/test/extensions/filters/http/admission_control/config_test.cc index df11bbeb6433..2fba5f26016f 100644 --- a/test/extensions/filters/http/admission_control/config_test.cc +++ b/test/extensions/filters/http/admission_control/config_test.cc @@ -6,6 +6,7 @@ #include "common/stats/isolated_store_impl.h" #include "extensions/filters/http/admission_control/admission_control.h" +#include "extensions/filters/http/admission_control/config.h" #include "extensions/filters/http/admission_control/evaluators/success_criteria_evaluator.h" #include "test/mocks/runtime/mocks.h" @@ -46,6 +47,36 @@ class AdmissionControlConfigTest : public testing::Test { NiceMock random_; }; +// Ensure the filter ingest throws an exception if it is passed a config with a default value of 0 +// for sr_threshold If exception was not thrown, a default value of 0 for sr_threshold induces a +// divide by zero error +TEST_F(AdmissionControlConfigTest, ZeroSuccessRateThreshold) { + AdmissionControlFilterFactory admission_control_filter_factory; + const std::string yaml = R"EOF( +enabled: + default_value: false + runtime_key: "foo.enabled" +sampling_window: 1337s +sr_threshold: + default_value: + value: 0 + runtime_key: "foo.sr_threshold" +aggression: + default_value: 4.2 + runtime_key: "foo.aggression" +success_criteria: + http_criteria: + grpc_criteria: +)EOF"; + + AdmissionControlProto proto; + TestUtility::loadFromYamlAndValidate(yaml, proto); + NiceMock factory_context; + EXPECT_THROW_WITH_MESSAGE(admission_control_filter_factory.createFilterFactoryFromProtoTyped( + proto, "whatever", factory_context), + EnvoyException, "Success Rate Threshold cannot be zero percent"); +} + // Verify the configuration when all fields are set. TEST_F(AdmissionControlConfigTest, BasicTestAllConfigured) { const std::string yaml = R"EOF( diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-filter_fuzz_test-5914972389113856 b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-filter_fuzz_test-5914972389113856 new file mode 100644 index 000000000000..5bb334c90502 --- /dev/null +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-filter_fuzz_test-5914972389113856 @@ -0,0 +1,12 @@ +config { + name: "envoy.filters.http.admission_control" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.http.admission_control.v3alpha.AdmissionControl" + value: "\022\000\032\000*\003\022\001$" + } +} +data { + http_body { + data: "\022\000" + } +} diff --git a/test/extensions/filters/http/common/fuzz/uber_per_filter.cc b/test/extensions/filters/http/common/fuzz/uber_per_filter.cc index c873b7bdd2da..515f4c632a26 100644 --- a/test/extensions/filters/http/common/fuzz/uber_per_filter.cc +++ b/test/extensions/filters/http/common/fuzz/uber_per_filter.cc @@ -90,25 +90,16 @@ void cleanAttachmentTemplate(Protobuf::Message* message) { void cleanTapConfig(Protobuf::Message* message) { envoy::extensions::filters::http::tap::v3::Tap& config = dynamic_cast(*message); - if (config.common_config().config_type_case() == - envoy::extensions::common::tap::v3::CommonExtensionConfig::ConfigTypeCase::kTapdsConfig) { - config.mutable_common_config()->mutable_static_config()->mutable_match_config()->set_any_match( - true); - } // TODO(samflattery): remove once StreamingGrpcSink is implemented // a static config filter is required to have one sink, but since validation isn't performed on // the filter until after this function runs, we have to manually check that there are sinks // before checking that they are not StreamingGrpc - else if (config.common_config().config_type_case() == - envoy::extensions::common::tap::v3::CommonExtensionConfig::ConfigTypeCase:: - kStaticConfig && - !config.common_config().static_config().output_config().sinks().empty() && - config.common_config() - .static_config() - .output_config() - .sinks(0) - .output_sink_type_case() == - envoy::config::tap::v3::OutputSink::OutputSinkTypeCase::kStreamingGrpc) { + if (config.common_config().config_type_case() == + envoy::extensions::common::tap::v3::CommonExtensionConfig::ConfigTypeCase:: + kStaticConfig && + !config.common_config().static_config().output_config().sinks().empty() && + config.common_config().static_config().output_config().sinks(0).output_sink_type_case() == + envoy::config::tap::v3::OutputSink::OutputSinkTypeCase::kStreamingGrpc) { // will be caught in UberFilterFuzzer::fuzz throw EnvoyException("received input with not implemented output_sink_type StreamingGrpcSink"); } diff --git a/test/extensions/filters/http/lua/BUILD b/test/extensions/filters/http/lua/BUILD index 9980083db8a4..729d383c5db7 100644 --- a/test/extensions/filters/http/lua/BUILD +++ b/test/extensions/filters/http/lua/BUILD @@ -38,6 +38,7 @@ envoy_extension_cc_test( srcs = ["wrappers_test.cc"], extension_name = "envoy.filters.http.lua", deps = [ + "//source/common/network:address_lib", "//source/common/stream_info:stream_info_lib", "//source/extensions/filters/http/lua:wrappers_lib", "//test/extensions/filters/common/lua:lua_wrappers_lib", diff --git a/test/extensions/filters/http/lua/lua_integration_test.cc b/test/extensions/filters/http/lua/lua_integration_test.cc index 7c99faea6df1..92f8e7e141a6 100644 --- a/test/extensions/filters/http/lua/lua_integration_test.cc +++ b/test/extensions/filters/http/lua/lua_integration_test.cc @@ -266,6 +266,10 @@ name: lua end request_handle:headers():add("request_protocol", request_handle:streamInfo():protocol()) request_handle:headers():add("request_dynamic_metadata_value", dynamic_metadata_value) + request_handle:headers():add("request_downstream_local_address_value", + request_handle:streamInfo():downstreamLocalAddress()) + request_handle:headers():add("request_downstream_directremote_address_value", + request_handle:streamInfo():downstreamDirectRemoteAddress()) end function envoy_on_response(response_handle) @@ -325,6 +329,20 @@ name: lua ->value() .getStringView()); + EXPECT_TRUE( + absl::StrContains(upstream_request_->headers() + .get(Http::LowerCaseString("request_downstream_local_address_value"))[0] + ->value() + .getStringView(), + GetParam() == Network::Address::IpVersion::v4 ? "127.0.0.1:" : "[::1]:")); + + EXPECT_TRUE(absl::StrContains( + upstream_request_->headers() + .get(Http::LowerCaseString("request_downstream_directremote_address_value"))[0] + ->value() + .getStringView(), + GetParam() == Network::Address::IpVersion::v4 ? "127.0.0.1:" : "[::1]:")); + Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}, {"foo", "bar"}}; upstream_request_->encodeHeaders(response_headers, false); Buffer::OwnedImpl response_data1("good"); diff --git a/test/extensions/filters/http/lua/wrappers_test.cc b/test/extensions/filters/http/lua/wrappers_test.cc index 990016db3f15..8d671df5ce58 100644 --- a/test/extensions/filters/http/lua/wrappers_test.cc +++ b/test/extensions/filters/http/lua/wrappers_test.cc @@ -1,6 +1,7 @@ #include "envoy/config/core/v3/base.pb.h" #include "common/http/utility.h" +#include "common/network/address_impl.h" #include "common/stream_info/stream_info_impl.h" #include "extensions/filters/http/lua/wrappers.h" @@ -11,6 +12,7 @@ using testing::InSequence; using testing::ReturnPointee; +using testing::ReturnRef; namespace Envoy { namespace Extensions { @@ -269,6 +271,35 @@ TEST_F(LuaStreamInfoWrapperTest, ReturnCurrentProtocol) { expectToPrintCurrentProtocol(Http::Protocol::Http2); } +// Verify downstream local addresses and downstream direct remote addresses are available from +// stream info wrapper. +TEST_F(LuaStreamInfoWrapperTest, ReturnCurrentDownstreamAddresses) { + const std::string SCRIPT{R"EOF( + function callMe(object) + testPrint(object:downstreamLocalAddress()) + testPrint(object:downstreamDirectRemoteAddress()) + end + )EOF"}; + + InSequence s; + setup(SCRIPT); + + NiceMock stream_info; + auto address = Network::Address::InstanceConstSharedPtr{ + new Network::Address::Ipv4Instance("127.0.0.1", 8000)}; + auto downstream_direct_remote = + Network::Address::InstanceConstSharedPtr{new Network::Address::Ipv4Instance("8.8.8.8", 3000)}; + ON_CALL(stream_info, downstreamLocalAddress()).WillByDefault(ReturnRef(address)); + ON_CALL(stream_info, downstreamDirectRemoteAddress()) + .WillByDefault(ReturnRef(downstream_direct_remote)); + Filters::Common::Lua::LuaDeathRef wrapper( + StreamInfoWrapper::create(coroutine_->luaState(), stream_info), true); + EXPECT_CALL(printer_, testPrint(address->asString())); + EXPECT_CALL(printer_, testPrint(downstream_direct_remote->asString())); + start("callMe"); + wrapper.reset(); +} + // Set, get and iterate stream info dynamic metadata. TEST_F(LuaStreamInfoWrapperTest, SetGetAndIterateDynamicMetadata) { const std::string SCRIPT{R"EOF( diff --git a/test/extensions/filters/http/tap/BUILD b/test/extensions/filters/http/tap/BUILD index 357d6fcecaa4..ff491cc643da 100644 --- a/test/extensions/filters/http/tap/BUILD +++ b/test/extensions/filters/http/tap/BUILD @@ -54,6 +54,7 @@ envoy_extension_cc_test( extension_name = "envoy.filters.http.tap", deps = [ "//source/extensions/filters/http/tap:config", + "//test/extensions/common/tap:common", "//test/integration:http_integration_lib", "//test/test_common:utility_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", diff --git a/test/extensions/filters/http/tap/tap_filter_integration_test.cc b/test/extensions/filters/http/tap/tap_filter_integration_test.cc index fdfa591bc62e..de5562e7c322 100644 --- a/test/extensions/filters/http/tap/tap_filter_integration_test.cc +++ b/test/extensions/filters/http/tap/tap_filter_integration_test.cc @@ -1,8 +1,7 @@ -#include - #include "envoy/config/core/v3/base.pb.h" #include "envoy/data/tap/v3/wrapper.pb.h" +#include "test/extensions/common/tap/common.h" #include "test/integration/http_integration.h" #include "test/test_common/utility.h" @@ -99,35 +98,6 @@ class TapIntegrationTest : public testing::TestWithParam - readTracesFromFile(const std::string& path_prefix) { - // Find the written .pb file and verify it. - auto files = TestUtility::listFiles(path_prefix, false); - auto pb_file_name = std::find_if(files.begin(), files.end(), [](const std::string& s) { - return absl::EndsWith(s, MessageUtil::FileExtensions::get().ProtoBinaryLengthDelimited); - }); - EXPECT_NE(pb_file_name, files.end()); - - std::vector traces; - std::ifstream pb_file(*pb_file_name, std::ios_base::binary); - Protobuf::io::IstreamInputStream stream(&pb_file); - Protobuf::io::CodedInputStream coded_stream(&stream); - while (true) { - uint32_t message_size; - if (!coded_stream.ReadVarint32(&message_size)) { - break; - } - - traces.emplace_back(); - - auto limit = coded_stream.PushLimit(message_size); - EXPECT_TRUE(traces.back().ParseFromCodedStream(&coded_stream)); - coded_stream.PopLimit(limit); - } - - return traces; - } - void verifyStaticFilePerTap(const std::string& filter_config) { const std::string path_prefix = getTempPathPrefix(); initializeFilter(fmt::format(filter_config, path_prefix)); @@ -532,7 +502,8 @@ name: tap codec_client_->close(); test_server_->waitForCounterGe("http.config_test.downstream_cx_destroy", 1); - std::vector traces = readTracesFromFile(path_prefix); + std::vector traces = + Extensions::Common::Tap::readTracesFromPath(path_prefix); ASSERT_EQ(6, traces.size()); EXPECT_TRUE(traces[0].http_streamed_trace_segment().has_request_headers()); EXPECT_EQ("hello", traces[1].http_streamed_trace_segment().request_body_chunk().as_bytes()); @@ -577,7 +548,8 @@ name: tap codec_client_->close(); test_server_->waitForCounterGe("http.config_test.downstream_cx_destroy", 1); - std::vector traces = readTracesFromFile(path_prefix); + std::vector traces = + Extensions::Common::Tap::readTracesFromPath(path_prefix); ASSERT_EQ(6, traces.size()); EXPECT_TRUE(traces[0].http_streamed_trace_segment().has_request_headers()); EXPECT_EQ("hello", traces[1].http_streamed_trace_segment().request_body_chunk().as_bytes()); diff --git a/test/extensions/transport_sockets/tls/integration/BUILD b/test/extensions/transport_sockets/tls/integration/BUILD index d6cb3ad636b3..277e6346cb21 100644 --- a/test/extensions/transport_sockets/tls/integration/BUILD +++ b/test/extensions/transport_sockets/tls/integration/BUILD @@ -25,6 +25,7 @@ envoy_cc_test( "//source/extensions/transport_sockets/tls:config", "//source/extensions/transport_sockets/tls:context_config_lib", "//source/extensions/transport_sockets/tls:context_lib", + "//test/extensions/common/tap:common", "//test/integration:http_integration_lib", "//test/mocks/secret:secret_mocks", "//test/test_common:utility_lib", diff --git a/test/extensions/transport_sockets/tls/integration/ssl_integration_test.cc b/test/extensions/transport_sockets/tls/integration/ssl_integration_test.cc index 297f328071e7..d9ee113fbfec 100644 --- a/test/extensions/transport_sockets/tls/integration/ssl_integration_test.cc +++ b/test/extensions/transport_sockets/tls/integration/ssl_integration_test.cc @@ -19,6 +19,7 @@ #include "extensions/transport_sockets/tls/context_config_impl.h" #include "extensions/transport_sockets/tls/context_manager_impl.h" +#include "test/extensions/common/tap/common.h" #include "test/integration/integration.h" #include "test/integration/utility.h" #include "test/test_common/network_utility.h" @@ -463,6 +464,7 @@ class SslTapIntegrationTest : public SslIntegrationTest { if (max_tx_bytes_.has_value()) { output_config->mutable_max_buffered_tx_bytes()->set_value(max_tx_bytes_.value()); } + output_config->set_streaming(streaming_tap_); auto* output_sink = output_config->mutable_sinks()->Add(); output_sink->set_format(format_); @@ -477,6 +479,7 @@ class SslTapIntegrationTest : public SslIntegrationTest { absl::optional max_rx_bytes_; absl::optional max_tx_bytes_; bool upstream_tap_{}; + bool streaming_tap_{}; }; INSTANTIATE_TEST_SUITE_P(IpVersions, SslTapIntegrationTest, @@ -651,5 +654,44 @@ TEST_P(SslTapIntegrationTest, RequestWithJsonBodyAsStringUpstreamTap) { EXPECT_TRUE(trace.socket_buffered_trace().write_truncated()); } +// Validate a single request with length delimited binary proto output. This test uses an upstream +// tap. +TEST_P(SslTapIntegrationTest, RequestWithStreamingUpstreamTap) { + upstream_tap_ = true; + streaming_tap_ = true; + max_rx_bytes_ = 5; + max_tx_bytes_ = 4; + + format_ = envoy::config::tap::v3::OutputSink::PROTO_BINARY_LENGTH_DELIMITED; + ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr { + return makeSslClientConnection({}); + }; + const uint64_t id = Network::ConnectionImpl::nextGlobalIdForTest() + 2; + testRouterRequestAndResponseWithBody(512, 1024, false, false, &creator); + checkStats(); + codec_client_->close(); + test_server_->waitForCounterGe("http.config_test.downstream_cx_destroy", 1); + test_server_.reset(); + + // This must be done after server shutdown so that connection pool connections are closed and + // the tap written. + std::vector traces = + Extensions::Common::Tap::readTracesFromFile( + fmt::format("{}_{}.pb_length_delimited", path_prefix_, id)); + ASSERT_EQ(4, traces.size()); + + // The initial connection message has no local address, but has a remote address (not connected + // yet). + EXPECT_TRUE(traces[0].socket_streamed_trace_segment().has_connection()); + EXPECT_FALSE(traces[0].socket_streamed_trace_segment().connection().has_local_address()); + EXPECT_TRUE(traces[0].socket_streamed_trace_segment().connection().has_remote_address()); + + // Verify truncated request/response data. + EXPECT_EQ(traces[1].socket_streamed_trace_segment().event().write().data().as_bytes(), "POST"); + EXPECT_TRUE(traces[1].socket_streamed_trace_segment().event().write().data().truncated()); + EXPECT_EQ(traces[2].socket_streamed_trace_segment().event().read().data().as_bytes(), "HTTP/"); + EXPECT_TRUE(traces[2].socket_streamed_trace_segment().event().read().data().truncated()); +} + } // namespace Ssl } // namespace Envoy diff --git a/test/extensions/transport_sockets/tls/ssl_socket_test.cc b/test/extensions/transport_sockets/tls/ssl_socket_test.cc index b06a9ac66203..b4bdb84e5737 100644 --- a/test/extensions/transport_sockets/tls/ssl_socket_test.cc +++ b/test/extensions/transport_sockets/tls/ssl_socket_test.cc @@ -59,7 +59,6 @@ using testing::ContainsRegex; using testing::DoAll; using testing::InSequence; using testing::Invoke; -using testing::MockFunction; using testing::NiceMock; using testing::Return; using testing::ReturnRef; @@ -4491,12 +4490,6 @@ TEST_P(SslSocketTest, DownstreamNotReadySslSocket) { ContextManagerImpl manager(time_system_); ServerSslSocketFactory server_ssl_socket_factory(std::move(server_cfg), manager, stats_store, std::vector{}); - - // Add a secrets ready callback that should not be invoked. - MockFunction mock_callback_; - EXPECT_CALL(mock_callback_, Call()).Times(0); - server_ssl_socket_factory.addReadyCb(mock_callback_.AsStdFunction()); - auto transport_socket = server_ssl_socket_factory.createTransportSocket(nullptr); EXPECT_EQ(EMPTY_STRING, transport_socket->protocol()); EXPECT_EQ(nullptr, transport_socket->ssl()); @@ -4532,12 +4525,6 @@ TEST_P(SslSocketTest, UpstreamNotReadySslSocket) { ContextManagerImpl manager(time_system_); ClientSslSocketFactory client_ssl_socket_factory(std::move(client_cfg), manager, stats_store); - - // Add a secrets ready callback that should not be invoked. - MockFunction mock_callback_; - EXPECT_CALL(mock_callback_, Call()).Times(0); - client_ssl_socket_factory.addReadyCb(mock_callback_.AsStdFunction()); - auto transport_socket = client_ssl_socket_factory.createTransportSocket(nullptr); EXPECT_EQ(EMPTY_STRING, transport_socket->protocol()); EXPECT_EQ(nullptr, transport_socket->ssl()); @@ -4549,97 +4536,6 @@ TEST_P(SslSocketTest, UpstreamNotReadySslSocket) { EXPECT_EQ("TLS error: Secret is not supplied by SDS", transport_socket->failureReason()); } -// Validate that secrets callbacks are invoked when secrets become ready. -TEST_P(SslSocketTest, ClientAddSecretsReadyCallback) { - Stats::TestUtil::TestStore stats_store; - NiceMock local_info; - testing::NiceMock factory_context; - NiceMock init_manager; - NiceMock dispatcher; - EXPECT_CALL(factory_context, localInfo()).WillOnce(ReturnRef(local_info)); - EXPECT_CALL(factory_context, stats()).WillOnce(ReturnRef(stats_store)); - EXPECT_CALL(factory_context, initManager()).WillRepeatedly(ReturnRef(init_manager)); - EXPECT_CALL(factory_context, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); - - envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext tls_context; - auto sds_secret_configs = - tls_context.mutable_common_tls_context()->mutable_tls_certificate_sds_secret_configs()->Add(); - sds_secret_configs->set_name("abc.com"); - sds_secret_configs->mutable_sds_config(); - auto client_cfg = std::make_unique(tls_context, factory_context); - EXPECT_TRUE(client_cfg->tlsCertificates().empty()); - EXPECT_FALSE(client_cfg->isReady()); - - NiceMock context_manager; - ClientSslSocketFactory client_ssl_socket_factory(std::move(client_cfg), context_manager, - stats_store); - - // Add a secrets ready callback. It should not be invoked until onAddOrUpdateSecret() is called. - MockFunction mock_callback_; - EXPECT_CALL(mock_callback_, Call()).Times(0); - client_ssl_socket_factory.addReadyCb(mock_callback_.AsStdFunction()); - - // Call onAddOrUpdateSecret, but return a null ssl_ctx. This should not invoke the callback. - EXPECT_CALL(context_manager, createSslClientContext(_, _)).WillOnce(Return(nullptr)); - client_ssl_socket_factory.onAddOrUpdateSecret(); - - EXPECT_CALL(mock_callback_, Call()); - Ssl::ClientContextSharedPtr mock_context = std::make_shared(); - EXPECT_CALL(context_manager, createSslClientContext(_, _)).WillOnce(Return(mock_context)); - client_ssl_socket_factory.onAddOrUpdateSecret(); - - // Add another callback, it should be invoked immediately. - MockFunction second_callback_; - EXPECT_CALL(second_callback_, Call()); - client_ssl_socket_factory.addReadyCb(second_callback_.AsStdFunction()); -} - -// Validate that secrets callbacks are invoked when secrets become ready. -TEST_P(SslSocketTest, ServerAddSecretsReadyCallback) { - Stats::TestUtil::TestStore stats_store; - NiceMock local_info; - testing::NiceMock factory_context; - NiceMock init_manager; - NiceMock dispatcher; - EXPECT_CALL(factory_context, localInfo()).WillOnce(ReturnRef(local_info)); - EXPECT_CALL(factory_context, stats()).WillOnce(ReturnRef(stats_store)); - EXPECT_CALL(factory_context, initManager()).WillRepeatedly(ReturnRef(init_manager)); - EXPECT_CALL(factory_context, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); - - envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context; - auto sds_secret_configs = - tls_context.mutable_common_tls_context()->mutable_tls_certificate_sds_secret_configs()->Add(); - sds_secret_configs->set_name("abc.com"); - sds_secret_configs->mutable_sds_config(); - auto server_cfg = std::make_unique(tls_context, factory_context); - EXPECT_TRUE(server_cfg->tlsCertificates().empty()); - EXPECT_FALSE(server_cfg->isReady()); - - NiceMock context_manager; - ServerSslSocketFactory server_ssl_socket_factory(std::move(server_cfg), context_manager, - stats_store, std::vector{}); - - // Add a secrets ready callback. It should not be invoked until onAddOrUpdateSecret() is called. - MockFunction mock_callback_; - EXPECT_CALL(mock_callback_, Call()).Times(0); - server_ssl_socket_factory.addReadyCb(mock_callback_.AsStdFunction()); - - // Call onAddOrUpdateSecret, but return a null ssl_ctx. This should not invoke the callback. - EXPECT_CALL(context_manager, createSslServerContext(_, _, _)).WillOnce(Return(nullptr)); - server_ssl_socket_factory.onAddOrUpdateSecret(); - - // Now return a ssl context which should result in the callback being invoked. - EXPECT_CALL(mock_callback_, Call()); - Ssl::ServerContextSharedPtr mock_context = std::make_shared(); - EXPECT_CALL(context_manager, createSslServerContext(_, _, _)).WillOnce(Return(mock_context)); - server_ssl_socket_factory.onAddOrUpdateSecret(); - - // Add another callback, it should be invoked immediately. - MockFunction second_callback_; - EXPECT_CALL(second_callback_, Call()); - server_ssl_socket_factory.addReadyCb(second_callback_.AsStdFunction()); -} - TEST_P(SslSocketTest, TestTransportSocketCallback) { // Make MockTransportSocketCallbacks. Network::MockIoHandle io_handle; diff --git a/test/fuzz/BUILD b/test/fuzz/BUILD index 35bd8e0ac197..153f6f0a94f4 100644 --- a/test/fuzz/BUILD +++ b/test/fuzz/BUILD @@ -1,5 +1,6 @@ load( "//bazel:envoy_build_system.bzl", + "envoy_cc_test", "envoy_cc_test_library", "envoy_package", "envoy_proto_library", @@ -68,3 +69,21 @@ envoy_cc_test_library( "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], ) + +envoy_cc_test_library( + name = "random_lib", + hdrs = ["random.h"], + deps = [ + "//include/envoy/common:random_generator_interface", + "//source/common/common:assert_lib", + "//source/common/common:minimal_logger_lib", + ], +) + +envoy_cc_test( + name = "random_test", + srcs = ["random_test.cc"], + deps = [ + "//test/fuzz:random_lib", + ], +) diff --git a/test/fuzz/random.h b/test/fuzz/random.h new file mode 100644 index 000000000000..01b8f5e83df5 --- /dev/null +++ b/test/fuzz/random.h @@ -0,0 +1,104 @@ +#include +#include +#include +#include + +#include "envoy/common/random_generator.h" + +#include "common/common/assert.h" +#include "common/common/logger.h" + +namespace Envoy { +namespace Random { + +class PsuedoRandomGenerator64 : public RandomGenerator { +public: + PsuedoRandomGenerator64() = default; + ~PsuedoRandomGenerator64() override = default; + + void initializeSeed(uint64_t seed) { prng_ = std::make_unique(seed); } + + // RandomGenerator + uint64_t random() override { + // Makes sure initializeSeed() was already called + ASSERT(prng_ != nullptr); + const uint64_t to_return = (*prng_)(); + ENVOY_LOG_MISC(trace, "random() returned: {}", to_return); + return to_return; + } + std::string uuid() override { return ""; } + std::unique_ptr prng_; +}; + +} // namespace Random + +namespace Fuzz { +class ProperSubsetSelector { +public: + ProperSubsetSelector(const std::string& random_bytestring) + : random_bytestring_(random_bytestring) {} + + /** + * This function does proper subset selection on a certain number of elements. It returns a vector + * of vectors of bytes. Each vector of bytes represents the indexes of a single subset. The + * "randomness" of the subset that the class will use is determined by a bytestring passed into + * the class. Example: call into function with a vector {3, 5} representing subset sizes, and 15 + * as number_of_elements. This function would return something such as {{3, 14, 7}, {2, 1, 13, 8, + * 6}}. If the sum of the number of elements in each elements in each subset > number of elements, + * this will stop constructing subsets once the number of elements has ran out and been already + * placed into subsets. So, if you had a vector {3, 5} representing subset sizes, and 2 as number + * of elements, the function would return something such as {{5, 3}}. + */ + + std::vector> + constructSubsets(const std::vector& number_of_elements_in_each_subset, + uint32_t number_of_elements) { + num_elements_left_ = number_of_elements; + std::vector index_vector; + index_vector.reserve(number_of_elements); + for (uint32_t i = 0; i < number_of_elements; i++) { + index_vector.push_back(i); + } + std::vector> subsets; + subsets.reserve(number_of_elements_in_each_subset.size()); + for (uint32_t i : number_of_elements_in_each_subset) { + subsets.push_back(constructSubset(i, index_vector)); + } + return subsets; + } + +private: + // Builds a single subset by pulling indexes off index_vector_ + std::vector constructSubset(uint32_t number_of_elements_in_subset, + std::vector& index_vector) { + std::vector subset; + + for (uint32_t i = 0; i < number_of_elements_in_subset && !(num_elements_left_ == 0); i++) { + // Index of bytestring will wrap around if it "overflows" past the random bytestring's length. + uint64_t index_of_index_vector = + random_bytestring_[index_of_random_bytestring_ % random_bytestring_.length()] % + num_elements_left_; + const uint64_t index = index_vector.at(index_of_index_vector); + subset.push_back(index); + // Move the index chosen to the end of the vector - will not be chosen again + std::swap(index_vector[index_of_index_vector], index_vector[num_elements_left_ - 1]); + --num_elements_left_; + + ++index_of_random_bytestring_; + } + + return subset; + } + + // This bytestring will be iterated through representing randomness in order to choose + // subsets + const std::string random_bytestring_; + uint32_t index_of_random_bytestring_ = 0; + + // Used to make subset construction linear time complexity with std::swap - chosen indexes will be + // swapped to end of vector, and won't be chosen again due to modding against this integer + uint32_t num_elements_left_; +}; + +} // namespace Fuzz +} // namespace Envoy diff --git a/test/fuzz/random_test.cc b/test/fuzz/random_test.cc new file mode 100644 index 000000000000..9e7fd1012260 --- /dev/null +++ b/test/fuzz/random_test.cc @@ -0,0 +1,27 @@ +#include "test/fuzz/random.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::ContainerEq; + +namespace Envoy { +namespace Fuzz { + +// Test the subset selection - since selection is based on a passed in random bytestring you can +// work the algorithm yourself Pass in 5 elements, expect first subset to be element 2 and element +// 4, second subset to be elements 1, 2, 3 +TEST(BasicSubsetSelection, RandomTest) { + // \x01 chooses the first element, which gets swapped with last element, 0x3 chooses the third + // index, which gets swapped with last element etc. + std::string random_bytestring = "\x01\x03\x09\x04\x33"; + ProperSubsetSelector subset_selector(random_bytestring); + const std::vector> subsets = subset_selector.constructSubsets({2, 3}, 5); + const std::vector expected_subset_one = {1, 3}; + const std::vector expected_subset_two = {0, 2, 4}; + EXPECT_THAT(subsets[0], ContainerEq(expected_subset_one)); + EXPECT_THAT(subsets[1], ContainerEq(expected_subset_two)); +} + +} // namespace Fuzz +} // namespace Envoy diff --git a/test/integration/BUILD b/test/integration/BUILD index e3dbfb5d8731..010d85cc480b 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -312,6 +312,30 @@ envoy_cc_test( ], ) +envoy_cc_test( + name = "http2_flood_integration_test", + srcs = [ + "http2_flood_integration_test.cc", + ], + shard_count = 4, + tags = ["flaky_on_windows"], + deps = [ + ":autonomous_upstream_lib", + ":http_integration_lib", + "//test/common/http/http2:http2_frame", + "//test/integration/filters:backpressure_filter_config_lib", + "//test/integration/filters:set_response_code_filter_config_proto_cc_proto", + "//test/integration/filters:set_response_code_filter_lib", + "//test/integration/filters:test_socket_interface_lib", + "//test/mocks/http:http_mocks", + "//test/test_common:utility_lib", + "@com_google_absl//absl/synchronization", + "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", + "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", + ], +) + envoy_cc_test( name = "http2_integration_test", srcs = [ @@ -321,21 +345,17 @@ envoy_cc_test( shard_count = 4, tags = ["flaky_on_windows"], deps = [ - ":autonomous_upstream_lib", ":http_integration_lib", "//source/common/buffer:buffer_lib", "//source/common/http:header_map_lib", "//source/extensions/filters/http/buffer:config", "//source/extensions/filters/http/health_check:config", - "//test/common/http/http2:http2_frame", - "//test/integration/filters:backpressure_filter_config_lib", "//test/integration/filters:metadata_stop_all_filter_config_lib", "//test/integration/filters:request_metadata_filter_config_lib", "//test/integration/filters:response_metadata_filter_config_lib", "//test/integration/filters:set_response_code_filter_config_proto_cc_proto", "//test/integration/filters:set_response_code_filter_lib", "//test/integration/filters:stop_iteration_and_continue", - "//test/integration/filters:test_socket_interface_lib", "//test/mocks/http:http_mocks", "//test/mocks/upstream:retry_priority_factory_mocks", "//test/mocks/upstream:retry_priority_mocks", @@ -501,10 +521,12 @@ envoy_cc_test_library( ":test_host_predicate_lib", "//include/envoy/event:timer_interface", "//source/common/common:thread_annotations", + "//source/common/network:socket_option_lib", "//source/extensions/filters/http/on_demand:config", "//source/extensions/filters/http/router:config", "//source/extensions/filters/network/http_connection_manager:config", "//source/extensions/transport_sockets/tls:context_lib", + "//test/common/http/http2:http2_frame", "//test/common/upstream:utility_lib", "//test/integration/filters:add_body_filter_config_lib", "//test/integration/filters:add_trailers_filter_config_lib", diff --git a/test/integration/hotrestart_test.sh b/test/integration/hotrestart_test.sh index d16a5a0bc59e..729cc09bdc3c 100755 --- a/test/integration/hotrestart_test.sh +++ b/test/integration/hotrestart_test.sh @@ -1,7 +1,7 @@ #!/bin/bash # For this test we use a slightly modiified test binary, based on -# source/exe/enovy-static. If this starts failing to run or build, ensure that +# source/exe/envoy-static. If this starts failing to run or build, ensure that # source/exe/main.cc and ./hotrestart_main.cc have not diverged except for # adding the new gauge. export ENVOY_BIN="${TEST_SRCDIR}"/envoy/test/integration/hotrestart_main diff --git a/test/integration/http2_flood_integration_test.cc b/test/integration/http2_flood_integration_test.cc new file mode 100644 index 000000000000..6b47b7de186c --- /dev/null +++ b/test/integration/http2_flood_integration_test.cc @@ -0,0 +1,1059 @@ +#include +#include + +#include "envoy/config/bootstrap/v3/bootstrap.pb.h" +#include "envoy/config/cluster/v3/cluster.pb.h" +#include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" + +#include "common/buffer/buffer_impl.h" +#include "common/common/random_generator.h" +#include "common/http/header_map_impl.h" +#include "common/network/socket_option_impl.h" + +#include "test/integration/autonomous_upstream.h" +#include "test/integration/filters/test_socket_interface.h" +#include "test/integration/http_integration.h" +#include "test/integration/utility.h" +#include "test/mocks/http/mocks.h" +#include "test/test_common/network_utility.h" +#include "test/test_common/printers.h" +#include "test/test_common/utility.h" + +#include "gtest/gtest.h" + +using ::testing::HasSubstr; + +namespace Envoy { + +namespace { +const uint32_t ControlFrameFloodLimit = 100; +const uint32_t AllFrameFloodLimit = 1000; +} // namespace + +class SocketInterfaceSwap { +public: + // Object of this class hold the state determining the IoHandle which + // should return EAGAIN from the `writev` call. + struct IoHandleMatcher { + bool shouldReturnEgain(uint32_t port) const { + absl::ReaderMutexLock lock(&mutex_); + return port == port_ && writev_returns_egain_; + } + + void setSourcePort(uint32_t port) { + absl::WriterMutexLock lock(&mutex_); + port_ = port; + } + + void setWritevReturnsEgain() { + absl::WriterMutexLock lock(&mutex_); + writev_returns_egain_ = true; + } + + private: + mutable absl::Mutex mutex_; + uint32_t port_ ABSL_GUARDED_BY(mutex_) = 0; + bool writev_returns_egain_ ABSL_GUARDED_BY(mutex_) = false; + }; + + SocketInterfaceSwap() { + Envoy::Network::SocketInterfaceSingleton::clear(); + test_socket_interface_loader_ = std::make_unique( + std::make_unique( + [writev_matcher = writev_matcher_]( + Envoy::Network::TestIoSocketHandle* io_handle, const Buffer::RawSlice*, + uint64_t) -> absl::optional { + if (writev_matcher->shouldReturnEgain(io_handle->localAddress()->ip()->port())) { + return Api::IoCallUint64Result( + 0, Api::IoErrorPtr(Network::IoSocketError::getIoSocketEagainInstance(), + Network::IoSocketError::deleteIoError)); + } + return absl::nullopt; + })); + } + + ~SocketInterfaceSwap() { + test_socket_interface_loader_.reset(); + Envoy::Network::SocketInterfaceSingleton::initialize(previous_socket_interface_); + } + +protected: + Envoy::Network::SocketInterface* const previous_socket_interface_{ + Envoy::Network::SocketInterfaceSingleton::getExisting()}; + std::shared_ptr writev_matcher_{std::make_shared()}; + std::unique_ptr test_socket_interface_loader_; +}; + +// It is important that the new socket interface is installed before any I/O activity starts and +// the previous one is restored after all I/O activity stops. Since the HttpIntegrationTest +// destructor stops Envoy the SocketInterfaceSwap destructor needs to run after it. This order of +// multiple inheritance ensures that SocketInterfaceSwap destructor runs after +// Http2FrameIntegrationTest destructor completes. +class Http2FloodMitigationTest : public SocketInterfaceSwap, + public testing::TestWithParam, + public Http2RawFrameIntegrationTest { +public: + Http2FloodMitigationTest() : Http2RawFrameIntegrationTest(GetParam()) { + config_helper_.addConfigModifier( + [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { hcm.mutable_delayed_close_timeout()->set_seconds(1); }); + } + +protected: + void floodServer(const Http2Frame& frame, const std::string& flood_stat, uint32_t num_frames); + void floodServer(absl::string_view host, absl::string_view path, + Http2Frame::ResponseStatus expected_http_status, const std::string& flood_stat, + uint32_t num_frames); + + void setNetworkConnectionBufferSize(); + void beginSession() override; + void prefillOutboundDownstreamQueue(uint32_t data_frame_count, uint32_t data_frame_size = 10); + void triggerListenerDrain(); +}; + +INSTANTIATE_TEST_SUITE_P(IpVersions, Http2FloodMitigationTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); + +void Http2FloodMitigationTest::setNetworkConnectionBufferSize() { + // nghttp2 library has its own internal mitigation for outbound control frames (see + // NGHTTP2_DEFAULT_MAX_OBQ_FLOOD_ITEM). The default nghttp2 mitigation threshold of 1K is modified + // to 10K in the ConnectionImpl::Http2Options::Http2Options. The mitigation is triggered when + // there are more than 10000 PING or SETTINGS frames with ACK flag in the nghttp2 internal + // outbound queue. It is possible to trigger this mitigation in nghttp2 before triggering Envoy's + // own flood mitigation. This can happen when a buffer large enough to contain over 10K PING or + // SETTINGS frames is dispatched to the nghttp2 library. To prevent this from happening the + // network connection receive buffer needs to be smaller than 90Kb (which is 10K SETTINGS frames). + // Set it to the arbitrarily chosen value of 32K. Note that this buffer has 16K lower bound. + config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { + RELEASE_ASSERT(bootstrap.mutable_static_resources()->listeners_size() >= 1, ""); + auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0); + + listener->mutable_per_connection_buffer_limit_bytes()->set_value(32 * 1024); + }); +} + +void Http2FloodMitigationTest::beginSession() { + setDownstreamProtocol(Http::CodecClient::Type::HTTP2); + setUpstreamProtocol(FakeHttpConnection::Type::HTTP2); + // set lower outbound frame limits to make tests run faster + config_helper_.setOutboundFramesLimits(AllFrameFloodLimit, ControlFrameFloodLimit); + initialize(); + // Set up a raw connection to easily send requests without reading responses. Also, set a small + // TCP receive buffer to speed up connection backup. + auto options = std::make_shared(); + options->emplace_back(std::make_shared( + envoy::config::core::v3::SocketOption::STATE_PREBIND, + ENVOY_MAKE_SOCKET_OPTION_NAME(SOL_SOCKET, SO_RCVBUF), 1024)); + writev_matcher_->setSourcePort(lookupPort("http")); + tcp_client_ = makeTcpConnection(lookupPort("http"), options); + startHttp2Session(); +} + +// Verify that the server detects the flood of the given frame. +void Http2FloodMitigationTest::floodServer(const Http2Frame& frame, const std::string& flood_stat, + uint32_t num_frames) { + // make sure all frames can fit into 16k buffer + ASSERT_LE(num_frames, (16u * 1024u) / frame.size()); + std::vector buf(num_frames * frame.size()); + for (auto pos = buf.begin(); pos != buf.end();) { + pos = std::copy(frame.begin(), frame.end(), pos); + } + + ASSERT_TRUE(tcp_client_->write({buf.begin(), buf.end()}, false, false)); + + // Envoy's flood mitigation should kill the connection + tcp_client_->waitForDisconnect(); + + EXPECT_EQ(1, test_server_->counter(flood_stat)->value()); + test_server_->waitForCounterGe("http.config_test.downstream_cx_delayed_close_timeout", 1); +} + +// Verify that the server detects the flood using specified request parameters. +void Http2FloodMitigationTest::floodServer(absl::string_view host, absl::string_view path, + Http2Frame::ResponseStatus expected_http_status, + const std::string& flood_stat, uint32_t num_frames) { + uint32_t request_idx = 0; + auto request = Http2Frame::makeRequest(Http2Frame::makeClientStreamId(request_idx), host, path); + sendFrame(request); + auto frame = readFrame(); + EXPECT_EQ(Http2Frame::Type::Headers, frame.type()); + EXPECT_EQ(expected_http_status, frame.responseStatus()); + writev_matcher_->setWritevReturnsEgain(); + for (uint32_t frame = 0; frame < num_frames; ++frame) { + request = Http2Frame::makeRequest(Http2Frame::makeClientStreamId(++request_idx), host, path); + sendFrame(request); + } + tcp_client_->waitForDisconnect(); + if (!flood_stat.empty()) { + EXPECT_EQ(1, test_server_->counter(flood_stat)->value()); + } + EXPECT_EQ(1, + test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value()); +} + +void Http2FloodMitigationTest::prefillOutboundDownstreamQueue(uint32_t data_frame_count, + uint32_t data_frame_size) { + // Set large buffer limits so the test is not affected by the flow control. + config_helper_.setBufferLimits(1024 * 1024 * 1024, 1024 * 1024 * 1024); + autonomous_upstream_ = true; + autonomous_allow_incomplete_streams_ = true; + beginSession(); + + // Do not read from the socket and send request that causes autonomous upstream to respond + // with the specified number of DATA frames. This pre-fills downstream outbound frame queue + // such the the next response triggers flood protection. + // Simulate TCP push back on the Envoy's downstream network socket, so that outbound frames + // start to accumulate in the transport socket buffer. + writev_matcher_->setWritevReturnsEgain(); + + const auto request = Http2Frame::makeRequest( + Http2Frame::makeClientStreamId(0), "host", "/test/long/url", + {Http2Frame::Header("response_data_blocks", absl::StrCat(data_frame_count)), + Http2Frame::Header("response_size_bytes", absl::StrCat(data_frame_size)), + Http2Frame::Header("no_trailers", "0")}); + sendFrame(request); + + // Wait for some data to arrive and then wait for the upstream_rq_active to flip to 0 to indicate + // that the first request has completed. + test_server_->waitForCounterGe("cluster.cluster_0.upstream_cx_rx_bytes_total", 10000); + test_server_->waitForGaugeEq("cluster.cluster_0.upstream_rq_active", 0); + // Verify that pre-fill did not trigger flood protection + EXPECT_EQ(0, test_server_->counter("http2.outbound_flood")->value()); +} + +void Http2FloodMitigationTest::triggerListenerDrain() { + absl::Notification drain_sequence_started; + test_server_->server().dispatcher().post([this, &drain_sequence_started]() { + test_server_->drainManager().startDrainSequence([] {}); + drain_sequence_started.Notify(); + }); + drain_sequence_started.WaitForNotification(); +} + +TEST_P(Http2FloodMitigationTest, Ping) { + setNetworkConnectionBufferSize(); + beginSession(); + writev_matcher_->setWritevReturnsEgain(); + floodServer(Http2Frame::makePingFrame(), "http2.outbound_control_flood", + ControlFrameFloodLimit + 1); +} + +TEST_P(Http2FloodMitigationTest, Settings) { + setNetworkConnectionBufferSize(); + beginSession(); + writev_matcher_->setWritevReturnsEgain(); + floodServer(Http2Frame::makeEmptySettingsFrame(), "http2.outbound_control_flood", + ControlFrameFloodLimit + 1); +} + +// Verify that the server can detect flood of internally generated 404 responses. +TEST_P(Http2FloodMitigationTest, 404) { + // Change the default route to be restrictive, and send a request to a non existent route. + config_helper_.setDefaultHostAndRoute("foo.com", "/found"); + beginSession(); + + // Send requests to a non existent path to generate 404s + floodServer("host", "/notfound", Http2Frame::ResponseStatus::NotFound, "http2.outbound_flood", + AllFrameFloodLimit + 1); +} + +// Verify that the server can detect flood of response DATA frames +TEST_P(Http2FloodMitigationTest, Data) { + // Set large buffer limits so the test is not affected by the flow control. + config_helper_.setBufferLimits(1024 * 1024 * 1024, 1024 * 1024 * 1024); + autonomous_upstream_ = true; + autonomous_allow_incomplete_streams_ = true; + beginSession(); + + // Do not read from the socket and send request that causes autonomous upstream + // to respond with 1000 DATA frames. The Http2FloodMitigationTest::beginSession() + // sets 1000 flood limit for all frame types. Including 1 HEADERS response frame + // 1000 DATA frames should trigger flood protection. + // Simulate TCP push back on the Envoy's downstream network socket, so that outbound frames start + // to accumulate in the transport socket buffer. + writev_matcher_->setWritevReturnsEgain(); + + const auto request = Http2Frame::makeRequest( + 1, "host", "/test/long/url", + {Http2Frame::Header("response_data_blocks", "1000"), Http2Frame::Header("no_trailers", "0")}); + sendFrame(request); + + // Wait for connection to be flooded with outbound DATA frames and disconnected. + tcp_client_->waitForDisconnect(); + + // If the server codec had incorrectly thrown an exception on flood detection it would cause + // the entire upstream to be disconnected. Verify it is still active, and there are no destroyed + // connections. + ASSERT_EQ(1, test_server_->gauge("cluster.cluster_0.upstream_cx_active")->value()); + ASSERT_EQ(0, test_server_->counter("cluster.cluster_0.upstream_cx_destroy")->value()); + // Verify that the flood check was triggered + EXPECT_EQ(1, test_server_->counter("http2.outbound_flood")->value()); +} + +// Verify that the server can detect flood triggered by a DATA frame from a decoder filter call +// to sendLocalReply(). +// This test also verifies that RELEASE_ASSERT in the ConnectionImpl::StreamImpl::encodeDataHelper() +// is not fired when it is called by the sendLocalReply() in the dispatching context. +TEST_P(Http2FloodMitigationTest, DataOverflowFromDecoderFilterSendLocalReply) { + config_helper_.addConfigModifier( + [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) -> void { + const std::string yaml_string = R"EOF( +name: send_local_reply_filter +typed_config: + "@type": type.googleapis.com/test.integration.filters.SetResponseCodeFilterConfig + prefix: "/call_send_local_reply" + code: 404 + body: "something" + )EOF"; + TestUtility::loadFromYaml(yaml_string, *hcm.add_http_filters()); + // keep router the last + auto size = hcm.http_filters_size(); + hcm.mutable_http_filters()->SwapElements(size - 2, size - 1); + }); + + // pre-fill 2 away from overflow + prefillOutboundDownstreamQueue(AllFrameFloodLimit - 2); + + // At this point the outbound downstream frame queue should be 2 away from overflowing. + // Make the SetResponseCodeFilterConfig decoder filter call sendLocalReply with body. + // HEADERS + DATA frames should overflow the queue. + // Verify that connection was disconnected and appropriate counters were set. + auto request2 = + Http2Frame::makeRequest(Http2Frame::makeClientStreamId(1), "host", "/call_send_local_reply"); + sendFrame(request2); + + // Wait for connection to be flooded with outbound DATA frame and disconnected. + tcp_client_->waitForDisconnect(); + + // Verify that the upstream connection is still alive. + ASSERT_EQ(1, test_server_->gauge("cluster.cluster_0.upstream_cx_active")->value()); + ASSERT_EQ(0, test_server_->counter("cluster.cluster_0.upstream_cx_destroy")->value()); + // Verify that the flood check was triggered + EXPECT_EQ(1, test_server_->counter("http2.outbound_flood")->value()); +} + +// Verify that the server can detect flood of response HEADERS frames +TEST_P(Http2FloodMitigationTest, Headers) { + // pre-fill one away from overflow + prefillOutboundDownstreamQueue(AllFrameFloodLimit - 1); + + // Send second request which should trigger headers only response. + // Verify that connection was disconnected and appropriate counters were set. + auto request2 = Http2Frame::makeRequest( + Http2Frame::makeClientStreamId(1), "host", "/test/long/url", + {Http2Frame::Header("response_data_blocks", "0"), Http2Frame::Header("no_trailers", "0")}); + sendFrame(request2); + + // Wait for connection to be flooded with outbound HEADERS frame and disconnected. + tcp_client_->waitForDisconnect(); + + // If the server codec had incorrectly thrown an exception on flood detection it would cause + // the entire upstream to be disconnected. Verify it is still active, and there are no destroyed + // connections. + ASSERT_EQ(1, test_server_->gauge("cluster.cluster_0.upstream_cx_active")->value()); + ASSERT_EQ(0, test_server_->counter("cluster.cluster_0.upstream_cx_destroy")->value()); + // Verify that the flood check was triggered + EXPECT_EQ(1, test_server_->counter("http2.outbound_flood")->value()); +} + +// Verify that the server can detect overflow by 100 continue response sent by Envoy itself +TEST_P(Http2FloodMitigationTest, Envoy100ContinueHeaders) { + // pre-fill one away from overflow + prefillOutboundDownstreamQueue(AllFrameFloodLimit - 1); + + // Send second request which should trigger Envoy to respond with 100 continue. + // Verify that connection was disconnected and appropriate counters were set. + auto request2 = Http2Frame::makeRequest( + Http2Frame::makeClientStreamId(1), "host", "/test/long/url", + {Http2Frame::Header("response_data_blocks", "0"), Http2Frame::Header("no_trailers", "0"), + Http2Frame::Header("expect", "100-continue")}); + sendFrame(request2); + + // Wait for connection to be flooded with outbound HEADERS frame and disconnected. + tcp_client_->waitForDisconnect(); + + // If the server codec had incorrectly thrown an exception on flood detection it would cause + // the entire upstream to be disconnected. Verify it is still active, and there are no destroyed + // connections. + ASSERT_EQ(1, test_server_->gauge("cluster.cluster_0.upstream_cx_active")->value()); + ASSERT_EQ(0, test_server_->counter("cluster.cluster_0.upstream_cx_destroy")->value()); + // The second upstream request should be reset since it is disconnected when sending 100 continue + // response + EXPECT_EQ(1, test_server_->counter("cluster.cluster_0.upstream_rq_tx_reset")->value()); + // Verify that the flood check was triggered + EXPECT_EQ(1, test_server_->counter("http2.outbound_flood")->value()); +} + +// Verify that the server can detect flood triggered by a HEADERS frame from a decoder filter call +// to sendLocalReply(). +// This test also verifies that RELEASE_ASSERT in the +// ConnectionImpl::StreamImpl::encodeHeadersBase() is not fired when it is called by the +// sendLocalReply() in the dispatching context. +TEST_P(Http2FloodMitigationTest, HeadersOverflowFromDecoderFilterSendLocalReply) { + config_helper_.addConfigModifier( + [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) -> void { + const std::string yaml_string = R"EOF( +name: send_local_reply_filter +typed_config: + "@type": type.googleapis.com/test.integration.filters.SetResponseCodeFilterConfig + prefix: "/call_send_local_reply" + code: 404 + )EOF"; + TestUtility::loadFromYaml(yaml_string, *hcm.add_http_filters()); + // keep router the last + auto size = hcm.http_filters_size(); + hcm.mutable_http_filters()->SwapElements(size - 2, size - 1); + }); + + // pre-fill one away from overflow + prefillOutboundDownstreamQueue(AllFrameFloodLimit - 1); + + // At this point the outbound downstream frame queue should be 1 away from overflowing. + // Make the SetResponseCodeFilterConfig decoder filter call sendLocalReply without body. + // Verify that connection was disconnected and appropriate counters were set. + auto request2 = + Http2Frame::makeRequest(Http2Frame::makeClientStreamId(1), "host", "/call_send_local_reply"); + sendFrame(request2); + + // Wait for connection to be flooded with outbound HEADERS frame and disconnected. + tcp_client_->waitForDisconnect(); + + // Verify that the upstream connection is still alive. + ASSERT_EQ(1, test_server_->gauge("cluster.cluster_0.upstream_cx_active")->value()); + ASSERT_EQ(0, test_server_->counter("cluster.cluster_0.upstream_cx_destroy")->value()); + // Verify that the flood check was triggered + EXPECT_EQ(1, test_server_->counter("http2.outbound_flood")->value()); +} + +// TODO(yanavlasov): add the same tests as above for the encoder filters. +// This is currently blocked by the https://github.com/envoyproxy/envoy/pull/13256 + +// Verify that the server can detect flood of response METADATA frames +TEST_P(Http2FloodMitigationTest, Metadata) { + config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { + RELEASE_ASSERT(bootstrap.mutable_static_resources()->clusters_size() >= 1, ""); + auto* cluster = bootstrap.mutable_static_resources()->mutable_clusters(0); + cluster->mutable_http2_protocol_options()->set_allow_metadata(true); + }); + config_helper_.addConfigModifier( + [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) -> void { hcm.mutable_http2_protocol_options()->set_allow_metadata(true); }); + + // pre-fill one away from overflow + prefillOutboundDownstreamQueue(AllFrameFloodLimit - 1); + + // Send second request which should trigger response with METADATA frame. + auto metadata_map_vector_ptr = std::make_unique(); + Http::MetadataMap metadata_map = { + {"header_key1", "header_value1"}, + {"header_key2", "header_value2"}, + }; + auto metadata_map_ptr = std::make_unique(metadata_map); + metadata_map_vector_ptr->push_back(std::move(metadata_map_ptr)); + static_cast(fake_upstreams_.front().get()) + ->setPreResponseHeadersMetadata(std::move(metadata_map_vector_ptr)); + + // Verify that connection was disconnected and appropriate counters were set. + auto request2 = Http2Frame::makeRequest( + Http2Frame::makeClientStreamId(1), "host", "/test/long/url", + {Http2Frame::Header("response_data_blocks", "0"), Http2Frame::Header("no_trailers", "0")}); + sendFrame(request2); + + // Wait for connection to be flooded with outbound METADATA frame and disconnected. + tcp_client_->waitForDisconnect(); + + // If the server codec had incorrectly thrown an exception on flood detection it would cause + // the entire upstream to be disconnected. Verify it is still active, and there are no destroyed + // connections. + ASSERT_EQ(1, test_server_->gauge("cluster.cluster_0.upstream_cx_active")->value()); + ASSERT_EQ(0, test_server_->counter("cluster.cluster_0.upstream_cx_destroy")->value()); + // Verify that the flood check was triggered + EXPECT_EQ(1, test_server_->counter("http2.outbound_flood")->value()); +} + +// Verify that the server can detect flood of response trailers. +TEST_P(Http2FloodMitigationTest, Trailers) { + // Set large buffer limits so the test is not affected by the flow control. + config_helper_.setBufferLimits(1024 * 1024 * 1024, 1024 * 1024 * 1024); + autonomous_upstream_ = true; + autonomous_allow_incomplete_streams_ = true; + beginSession(); + + // Do not read from the socket and send request that causes autonomous upstream + // to respond with 999 DATA frames and trailers. The Http2FloodMitigationTest::beginSession() + // sets 1000 flood limit for all frame types. Including 1 HEADERS response frame + // 999 DATA frames and trailers should trigger flood protection. + // Simulate TCP push back on the Envoy's downstream network socket, so that outbound frames start + // to accumulate in the transport socket buffer. + writev_matcher_->setWritevReturnsEgain(); + + static_cast(fake_upstreams_.front().get()) + ->setResponseTrailers(std::make_unique( + Http::TestResponseTrailerMapImpl({{"foo", "bar"}}))); + + const auto request = + Http2Frame::makeRequest(Http2Frame::makeClientStreamId(0), "host", "/test/long/url", + {Http2Frame::Header("response_data_blocks", "999")}); + sendFrame(request); + + // Wait for connection to be flooded with outbound trailers and disconnected. + tcp_client_->waitForDisconnect(); + + // If the server codec had incorrectly thrown an exception on flood detection it would cause + // the entire upstream to be disconnected. Verify it is still active, and there are no destroyed + // connections. + ASSERT_EQ(1, test_server_->gauge("cluster.cluster_0.upstream_cx_active")->value()); + ASSERT_EQ(0, test_server_->counter("cluster.cluster_0.upstream_cx_destroy")->value()); + // Verify that the flood check was triggered + EXPECT_EQ(1, test_server_->counter("http2.outbound_flood")->value()); +} + +// Verify flood detection by the WINDOW_UPDATE frame when a decoder filter is resuming reading from +// the downstream via DecoderFilterBelowWriteBufferLowWatermark. +TEST_P(Http2FloodMitigationTest, WindowUpdateOnLowWatermarkFlood) { + config_helper_.addFilter(R"EOF( + name: backpressure-filter + )EOF"); + config_helper_.setBufferLimits(1024 * 1024 * 1024, 1024 * 1024 * 1024); + // Set low window sizes in the server codec as nghttp2 sends WINDOW_UPDATE only after it consumes + // more than 25% of the window. + config_helper_.addConfigModifier( + [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) -> void { + auto* h2_options = hcm.mutable_http2_protocol_options(); + h2_options->mutable_initial_stream_window_size()->set_value(70000); + h2_options->mutable_initial_connection_window_size()->set_value(70000); + }); + autonomous_upstream_ = true; + autonomous_allow_incomplete_streams_ = true; + beginSession(); + + writev_matcher_->setWritevReturnsEgain(); + + // pre-fill two away from overflow + const auto request = Http2Frame::makePostRequest( + Http2Frame::makeClientStreamId(0), "host", "/test/long/url", + {Http2Frame::Header("response_data_blocks", "998"), Http2Frame::Header("no_trailers", "0")}); + sendFrame(request); + + // The backpressure-filter disables reading when it sees request headers, and it should prevent + // WINDOW_UPDATE to be sent on the following DATA frames. Send enough DATA to consume more than + // 25% of the 70K window so that nghttp2 will send WINDOW_UPDATE on read resumption. + auto data_frame = + Http2Frame::makeDataFrame(Http2Frame::makeClientStreamId(0), std::string(16384, '0')); + sendFrame(data_frame); + sendFrame(data_frame); + data_frame = Http2Frame::makeDataFrame(Http2Frame::makeClientStreamId(0), std::string(16384, '1'), + Http2Frame::DataFlags::EndStream); + sendFrame(data_frame); + + // Upstream will respond with 998 DATA frames and the backpressure-filter filter will re-enable + // reading on the last DATA frame. This will cause nghttp2 to send two WINDOW_UPDATE frames for + // stream and connection windows. Together with response DATA frames it should overflow outbound + // frame queue. Wait for connection to be flooded with outbound WINDOW_UPDATE frame and + // disconnected. + tcp_client_->waitForDisconnect(); + + EXPECT_EQ(1, + test_server_->counter("http.config_test.downstream_flow_control_paused_reading_total") + ->value()); + + // If the server codec had incorrectly thrown an exception on flood detection it would cause + // the entire upstream to be disconnected. Verify it is still active, and there are no destroyed + // connections. + ASSERT_EQ(1, test_server_->gauge("cluster.cluster_0.upstream_cx_active")->value()); + ASSERT_EQ(0, test_server_->counter("cluster.cluster_0.upstream_cx_destroy")->value()); + // Verify that the flood check was triggered + EXPECT_EQ(1, test_server_->counter("http2.outbound_flood")->value()); +} + +// TODO(yanavlasov): add tests for WINDOW_UPDATE overflow from the router filter. These tests need +// missing support for write resumption from test sockets that were forced to return EAGAIN by the +// test. + +// Verify that the server can detect flood of RST_STREAM frames. +TEST_P(Http2FloodMitigationTest, RST_STREAM) { + // Use invalid HTTP headers to trigger sending RST_STREAM frames. + config_helper_.addConfigModifier( + [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) -> void { + hcm.mutable_http2_protocol_options() + ->mutable_override_stream_error_on_invalid_http_message() + ->set_value(true); + }); + beginSession(); + + uint32_t stream_index = 0; + auto request = + Http::Http2::Http2Frame::makeMalformedRequest(Http2Frame::makeClientStreamId(stream_index)); + sendFrame(request); + auto response = readFrame(); + // Make sure we've got RST_STREAM from the server + EXPECT_EQ(Http2Frame::Type::RstStream, response.type()); + + // Simulate TCP push back on the Envoy's downstream network socket, so that outbound frames start + // to accumulate in the transport socket buffer. + writev_matcher_->setWritevReturnsEgain(); + + for (++stream_index; stream_index < ControlFrameFloodLimit + 2; ++stream_index) { + request = + Http::Http2::Http2Frame::makeMalformedRequest(Http2Frame::makeClientStreamId(stream_index)); + sendFrame(request); + } + tcp_client_->waitForDisconnect(); + EXPECT_EQ(1, test_server_->counter("http2.outbound_control_flood")->value()); + EXPECT_EQ(1, + test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value()); +} + +// Verify detection of flood by the RST_STREAM frame sent on pending flush timeout +TEST_P(Http2FloodMitigationTest, RstStreamOverflowOnPendingFlushTimeout) { + config_helper_.addConfigModifier( + [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { + hcm.mutable_stream_idle_timeout()->set_seconds(0); + constexpr uint64_t IdleTimeoutMs = 400; + hcm.mutable_stream_idle_timeout()->set_nanos(IdleTimeoutMs * 1000 * 1000); + }); + + // Pending flush timer is started when upstream response has completed but there is no window to + // send DATA downstream. The test downstream client does not update WINDOW and as such Envoy will + // use the default 65535 bytes. First, pre-fill outbound queue with 65 byte frames, which should + // consume 65 * 997 = 64805 bytes of downstream connection window. + prefillOutboundDownstreamQueue(AllFrameFloodLimit - 3, 65); + + // At this point the outbound downstream frame queue should be 3 away from overflowing with 730 + // byte window. Make response to be 1 DATA frame with 1024 payload. This should overflow the + // available downstream window and start pending flush timer. Envoy proxies 2 frames downstream, + // HEADERS and partial DATA frame, which makes the frame queue 1 away from overflow. + const auto request2 = Http2Frame::makeRequest( + Http2Frame::makeClientStreamId(1), "host", "/test/long/url", + {Http2Frame::Header("response_data_blocks", "1"), + Http2Frame::Header("response_size_bytes", "1024"), Http2Frame::Header("no_trailers", "0")}); + sendFrame(request2); + + // Pending flush timer sends RST_STREAM frame which should overflow outbound frame queue and + // disconnect the connection. + tcp_client_->waitForDisconnect(); + + // Verify that the flood check was triggered + EXPECT_EQ(1, test_server_->counter("http2.outbound_flood")->value()); + // Verify that pending flush timeout was hit + EXPECT_EQ(1, test_server_->counter("http2.tx_flush_timeout")->value()); +} + +// Verify detection of frame flood when sending second GOAWAY frame on drain timeout +TEST_P(Http2FloodMitigationTest, GoAwayOverflowOnDrainTimeout) { + config_helper_.addConfigModifier( + [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { + auto* drain_time_out = hcm.mutable_drain_timeout(); + std::chrono::milliseconds timeout(1000); + auto seconds = std::chrono::duration_cast(timeout); + drain_time_out->set_seconds(seconds.count()); + + auto* http_protocol_options = hcm.mutable_common_http_protocol_options(); + auto* idle_time_out = http_protocol_options->mutable_idle_timeout(); + idle_time_out->set_seconds(seconds.count()); + }); + // pre-fill two away from overflow + prefillOutboundDownstreamQueue(AllFrameFloodLimit - 2); + + // connection idle timeout will send first GOAWAY frame and start drain timer + // drain timeout will send second GOAWAY frame which should trigger flood protection + // Wait for connection to be flooded with outbound GOAWAY frame and disconnected. + tcp_client_->waitForDisconnect(); + + // Verify that the flood check was triggered + EXPECT_EQ(1, test_server_->counter("http2.outbound_flood")->value()); +} + +// Verify detection of overflowing outbound frame queue with the GOAWAY frames sent after the +// downstream idle connection timeout disconnects the connection. +// The test verifies protocol constraint violation handling in the +// Http2::ConnectionImpl::shutdownNotice() method. +TEST_P(Http2FloodMitigationTest, DownstreamIdleTimeoutTriggersFloodProtection) { + config_helper_.addConfigModifier( + [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { + auto* http_protocol_options = hcm.mutable_common_http_protocol_options(); + auto* idle_time_out = http_protocol_options->mutable_idle_timeout(); + std::chrono::milliseconds timeout(1000); + auto seconds = std::chrono::duration_cast(timeout); + idle_time_out->set_seconds(seconds.count()); + }); + + prefillOutboundDownstreamQueue(AllFrameFloodLimit - 1); + tcp_client_->waitForDisconnect(); + + EXPECT_EQ(1, test_server_->counter("http2.outbound_flood")->value()); + EXPECT_EQ(1, test_server_->counter("http.config_test.downstream_cx_idle_timeout")->value()); +} + +// Verify detection of overflowing outbound frame queue with the GOAWAY frames sent after the +// downstream connection duration timeout disconnects the connection. +// The test verifies protocol constraint violation handling in the +// Http2::ConnectionImpl::shutdownNotice() method. +TEST_P(Http2FloodMitigationTest, DownstreamConnectionDurationTimeoutTriggersFloodProtection) { + config_helper_.addConfigModifier( + [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { + auto* http_protocol_options = hcm.mutable_common_http_protocol_options(); + auto* max_connection_duration = http_protocol_options->mutable_max_connection_duration(); + std::chrono::milliseconds timeout(1000); + auto seconds = std::chrono::duration_cast(timeout); + max_connection_duration->set_seconds(seconds.count()); + }); + prefillOutboundDownstreamQueue(AllFrameFloodLimit - 1); + tcp_client_->waitForDisconnect(); + + EXPECT_EQ(1, test_server_->counter("http2.outbound_flood")->value()); + EXPECT_EQ(1, + test_server_->counter("http.config_test.downstream_cx_max_duration_reached")->value()); +} + +// Verify detection of frame flood when sending GOAWAY frame during processing of response headers +// on a draining listener. +TEST_P(Http2FloodMitigationTest, GoawayOverflowDuringResponseWhenDraining) { + // pre-fill one away from overflow + prefillOutboundDownstreamQueue(AllFrameFloodLimit - 1); + + triggerListenerDrain(); + + // Send second request which should trigger Envoy to send GOAWAY (since it is in the draining + // state) when processing response headers. Verify that connection was disconnected and + // appropriate counters were set. + auto request2 = + Http2Frame::makeRequest(Http2Frame::makeClientStreamId(1), "host", "/test/long/url"); + sendFrame(request2); + + // Wait for connection to be flooded with outbound GOAWAY frame and disconnected. + tcp_client_->waitForDisconnect(); + + // Verify that the upstream connection is still alive. + ASSERT_EQ(1, test_server_->gauge("cluster.cluster_0.upstream_cx_active")->value()); + ASSERT_EQ(0, test_server_->counter("cluster.cluster_0.upstream_cx_destroy")->value()); + // Verify that the flood check was triggered + EXPECT_EQ(1, test_server_->counter("http2.outbound_flood")->value()); + EXPECT_EQ(1, test_server_->counter("http.config_test.downstream_cx_drain_close")->value()); +} + +// Verify detection of frame flood when sending GOAWAY frame during call to sendLocalReply() +// from decoder filter on a draining listener. +TEST_P(Http2FloodMitigationTest, GoawayOverflowFromDecoderFilterSendLocalReplyWhenDraining) { + config_helper_.addConfigModifier( + [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) -> void { + const std::string yaml_string = R"EOF( +name: send_local_reply_filter +typed_config: + "@type": type.googleapis.com/test.integration.filters.SetResponseCodeFilterConfig + prefix: "/call_send_local_reply" + code: 404 + )EOF"; + TestUtility::loadFromYaml(yaml_string, *hcm.add_http_filters()); + // keep router the last + auto size = hcm.http_filters_size(); + hcm.mutable_http_filters()->SwapElements(size - 2, size - 1); + }); + + // pre-fill one away from overflow + prefillOutboundDownstreamQueue(AllFrameFloodLimit - 1); + + triggerListenerDrain(); + + // At this point the outbound downstream frame queue should be 1 away from overflowing. + // Make the SetResponseCodeFilterConfig decoder filter call sendLocalReply without body which + // should trigger Envoy to send GOAWAY (since it is in the draining state) when processing + // sendLocalReply() headers. Verify that connection was disconnected and appropriate counters were + // set. + auto request2 = + Http2Frame::makeRequest(Http2Frame::makeClientStreamId(1), "host", "/call_send_local_reply"); + sendFrame(request2); + + // Wait for connection to be flooded with outbound GOAWAY frame and disconnected. + tcp_client_->waitForDisconnect(); + + // Verify that the upstream connection is still alive. + ASSERT_EQ(1, test_server_->gauge("cluster.cluster_0.upstream_cx_active")->value()); + ASSERT_EQ(0, test_server_->counter("cluster.cluster_0.upstream_cx_destroy")->value()); + // Verify that the flood check was triggered + EXPECT_EQ(1, test_server_->counter("http2.outbound_flood")->value()); + EXPECT_EQ(1, test_server_->counter("http.config_test.downstream_cx_drain_close")->value()); +} + +// Verify that the server can detect flooding by the RST_STREAM on when upstream disconnects +// before sending response headers. +TEST_P(Http2FloodMitigationTest, RstStreamOnUpstreamRemoteCloseBeforeResponseHeaders) { + // pre-fill 3 away from overflow + prefillOutboundDownstreamQueue(AllFrameFloodLimit - 3); + + // Start second request. + auto request2 = + Http2Frame::makePostRequest(Http2Frame::makeClientStreamId(1), "host", "/test/long/url"); + sendFrame(request2); + + // Wait for it to be proxied + test_server_->waitForCounterGe("cluster.cluster_0.upstream_rq_total", 2); + + // Disconnect upstream connection. Since there no response headers were sent yet the router + // filter will send 503 with body and then RST_STREAM. With these 3 frames the downstream outbound + // frame queue should overflow. + ASSERT_TRUE(static_cast(fake_upstreams_.front().get())->closeConnection(0)); + + // Wait for connection to be flooded with outbound RST_STREAM frame and disconnected. + tcp_client_->waitForDisconnect(); + + ASSERT_EQ(1, test_server_->counter("cluster.cluster_0.upstream_cx_destroy")->value()); + // Verify that the flood check was triggered + EXPECT_EQ(1, test_server_->counter("http2.outbound_flood")->value()); +} + +// Verify that the server can detect flooding by the RST_STREAM on stream idle timeout +// after sending response headers. +TEST_P(Http2FloodMitigationTest, RstStreamOnStreamIdleTimeoutAfterResponseHeaders) { + config_helper_.addConfigModifier( + [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { + auto* stream_idle_timeout = hcm.mutable_stream_idle_timeout(); + std::chrono::milliseconds timeout(1000); + auto seconds = std::chrono::duration_cast(timeout); + stream_idle_timeout->set_seconds(seconds.count()); + }); + // pre-fill 2 away from overflow + prefillOutboundDownstreamQueue(AllFrameFloodLimit - 2); + + // Start second request, which should result in response headers to be sent but the stream kept + // open. + auto request2 = Http2Frame::makeRequest( + Http2Frame::makeClientStreamId(1), "host", "/test/long/url", + {Http2Frame::Header("response_data_blocks", "0"), Http2Frame::Header("no_end_stream", "0")}); + sendFrame(request2); + + // Wait for stream idle timeout to send RST_STREAM. With the response headers frame from the + // second response the downstream outbound frame queue should overflow. + tcp_client_->waitForDisconnect(); + + EXPECT_EQ(1, test_server_->counter("http2.outbound_flood")->value()); + EXPECT_EQ(1, test_server_->counter("http.config_test.downstream_rq_idle_timeout")->value()); +} + +// Verify detection of overflowing outbound frame queue with the PING frames sent by the keep alive +// timer. The test verifies protocol constraint violation handling in the +// Http2::ConnectionImpl::sendKeepalive() method. +TEST_P(Http2FloodMitigationTest, KeepAliveTimeeTriggersFloodProtection) { + config_helper_.addConfigModifier( + [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { + auto* keep_alive = hcm.mutable_http2_protocol_options()->mutable_connection_keepalive(); + keep_alive->mutable_interval()->set_nanos(500 * 1000 * 1000); + keep_alive->mutable_timeout()->set_seconds(1); + }); + + prefillOutboundDownstreamQueue(AllFrameFloodLimit - 1); + tcp_client_->waitForDisconnect(); + + EXPECT_EQ(1, test_server_->counter("http2.outbound_flood")->value()); +} + +// Verify that the server stop reading downstream connection on protocol error. +TEST_P(Http2FloodMitigationTest, TooManyStreams) { + config_helper_.addConfigModifier( + [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) -> void { + hcm.mutable_http2_protocol_options()->mutable_max_concurrent_streams()->set_value(2); + }); + autonomous_upstream_ = true; + beginSession(); + // To prevent Envoy from closing client streams the upstream connection needs to push back on + // writing by the upstream server. In this case Envoy will not see upstream responses and will + // keep client streams open, eventually maxing them out and causing client connection to be + // closed. + writev_matcher_->setSourcePort(fake_upstreams_[0]->localAddress()->ip()->port()); + + // Exceed the number of streams allowed by the server. The server should stop reading from the + // client. + floodServer("host", "/test/long/url", Http2Frame::ResponseStatus::Ok, "", 3); +} + +TEST_P(Http2FloodMitigationTest, EmptyHeaders) { + config_helper_.addConfigModifier( + [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) -> void { + hcm.mutable_http2_protocol_options() + ->mutable_max_consecutive_inbound_frames_with_empty_payload() + ->set_value(0); + }); + beginSession(); + + const auto request = Http2Frame::makeEmptyHeadersFrame(Http2Frame::makeClientStreamId(0)); + sendFrame(request); + + tcp_client_->waitForDisconnect(); + + EXPECT_EQ(1, test_server_->counter("http2.inbound_empty_frames_flood")->value()); + EXPECT_EQ(1, + test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value()); +} + +TEST_P(Http2FloodMitigationTest, EmptyHeadersContinuation) { + useAccessLog("%RESPONSE_FLAGS% %RESPONSE_CODE_DETAILS%"); + beginSession(); + + const uint32_t request_stream_id = Http2Frame::makeClientStreamId(0); + auto request = Http2Frame::makeEmptyHeadersFrame(request_stream_id); + sendFrame(request); + + for (int i = 0; i < 2; i++) { + request = Http2Frame::makeEmptyContinuationFrame(request_stream_id); + sendFrame(request); + } + + tcp_client_->waitForDisconnect(); + + EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("http2.inbound_empty_frames_flood")); + EXPECT_EQ(1, test_server_->counter("http2.inbound_empty_frames_flood")->value()); + EXPECT_EQ(1, + test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value()); +} + +TEST_P(Http2FloodMitigationTest, EmptyData) { + useAccessLog("%RESPONSE_FLAGS% %RESPONSE_CODE_DETAILS%"); + beginSession(); + + const uint32_t request_stream_id = Http2Frame::makeClientStreamId(0); + auto request = Http2Frame::makePostRequest(request_stream_id, "host", "/"); + sendFrame(request); + + for (int i = 0; i < 2; i++) { + request = Http2Frame::makeEmptyDataFrame(request_stream_id); + sendFrame(request); + } + + tcp_client_->waitForDisconnect(); + + EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("http2.inbound_empty_frames_flood")); + EXPECT_EQ(1, test_server_->counter("http2.inbound_empty_frames_flood")->value()); + EXPECT_EQ(1, + test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value()); +} + +TEST_P(Http2FloodMitigationTest, PriorityIdleStream) { + beginSession(); + + floodServer(Http2Frame::makePriorityFrame(Http2Frame::makeClientStreamId(0), + Http2Frame::makeClientStreamId(1)), + "http2.inbound_priority_frames_flood", + Http2::Utility::OptionsLimits::DEFAULT_MAX_INBOUND_PRIORITY_FRAMES_PER_STREAM + 1); +} + +TEST_P(Http2FloodMitigationTest, PriorityOpenStream) { + beginSession(); + + // Open stream. + const uint32_t request_stream_id = Http2Frame::makeClientStreamId(0); + const auto request = Http2Frame::makeRequest(request_stream_id, "host", "/"); + sendFrame(request); + + floodServer(Http2Frame::makePriorityFrame(request_stream_id, Http2Frame::makeClientStreamId(1)), + "http2.inbound_priority_frames_flood", + Http2::Utility::OptionsLimits::DEFAULT_MAX_INBOUND_PRIORITY_FRAMES_PER_STREAM * 2 + + 1); +} + +TEST_P(Http2FloodMitigationTest, PriorityClosedStream) { + autonomous_upstream_ = true; + beginSession(); + + // Open stream. + const uint32_t request_stream_id = Http2Frame::makeClientStreamId(0); + const auto request = Http2Frame::makeRequest(request_stream_id, "host", "/"); + sendFrame(request); + // Reading response marks this stream as closed in nghttp2. + auto frame = readFrame(); + EXPECT_EQ(Http2Frame::Type::Headers, frame.type()); + + floodServer(Http2Frame::makePriorityFrame(request_stream_id, Http2Frame::makeClientStreamId(1)), + "http2.inbound_priority_frames_flood", + Http2::Utility::OptionsLimits::DEFAULT_MAX_INBOUND_PRIORITY_FRAMES_PER_STREAM * 2 + + 1); +} + +TEST_P(Http2FloodMitigationTest, WindowUpdate) { + beginSession(); + + // Open stream. + const uint32_t request_stream_id = Http2Frame::makeClientStreamId(0); + const auto request = Http2Frame::makeRequest(request_stream_id, "host", "/"); + sendFrame(request); + + // Since we do not send any DATA frames, only 4 sequential WINDOW_UPDATE frames should + // trigger flood protection. + floodServer(Http2Frame::makeWindowUpdateFrame(request_stream_id, 1), + "http2.inbound_window_update_frames_flood", 4); +} + +// Verify that the HTTP/2 connection is terminated upon receiving invalid HEADERS frame. +TEST_P(Http2FloodMitigationTest, ZerolenHeader) { + useAccessLog("%RESPONSE_FLAGS% %RESPONSE_CODE_DETAILS%"); + beginSession(); + + // Send invalid request. + const auto request = Http2Frame::makeMalformedRequestWithZerolenHeader( + Http2Frame::makeClientStreamId(0), "host", "/"); + sendFrame(request); + + tcp_client_->waitForDisconnect(); + + EXPECT_EQ(1, test_server_->counter("http2.rx_messaging_error")->value()); + EXPECT_EQ(1, + test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value()); + EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("http2.invalid.header.field")); + // expect a downstream protocol error. + EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("DPE")); +} + +// Verify that only the offending stream is terminated upon receiving invalid HEADERS frame. +TEST_P(Http2FloodMitigationTest, ZerolenHeaderAllowed) { + useAccessLog("%RESPONSE_FLAGS% %RESPONSE_CODE_DETAILS%"); + config_helper_.addConfigModifier( + [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) -> void { + hcm.mutable_http2_protocol_options() + ->mutable_override_stream_error_on_invalid_http_message() + ->set_value(true); + }); + autonomous_upstream_ = true; + beginSession(); + + // Send invalid request. + uint32_t request_idx = 0; + auto request = Http2Frame::makeMalformedRequestWithZerolenHeader( + Http2Frame::makeClientStreamId(request_idx), "host", "/"); + sendFrame(request); + // Make sure we've got RST_STREAM from the server. + auto response = readFrame(); + EXPECT_EQ(Http2Frame::Type::RstStream, response.type()); + + // Send valid request using the same connection. + request_idx++; + request = Http2Frame::makeRequest(Http2Frame::makeClientStreamId(request_idx), "host", "/"); + sendFrame(request); + response = readFrame(); + EXPECT_EQ(Http2Frame::Type::Headers, response.type()); + EXPECT_EQ(Http2Frame::ResponseStatus::Ok, response.responseStatus()); + + tcp_client_->close(); + + EXPECT_EQ(1, test_server_->counter("http2.rx_messaging_error")->value()); + EXPECT_EQ(0, + test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value()); + EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("http2.invalid.header.field")); + // expect Downstream Protocol Error + EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("DPE")); +} + +} // namespace Envoy diff --git a/test/integration/http2_integration_test.cc b/test/integration/http2_integration_test.cc index 655181183fb0..1ad689a4849e 100644 --- a/test/integration/http2_integration_test.cc +++ b/test/integration/http2_integration_test.cc @@ -10,9 +10,7 @@ #include "common/buffer/buffer_impl.h" #include "common/common/random_generator.h" #include "common/http/header_map_impl.h" -#include "common/network/socket_option_impl.h" -#include "test/integration/autonomous_upstream.h" #include "test/integration/utility.h" #include "test/mocks/http/mocks.h" #include "test/test_common/network_utility.h" @@ -1537,58 +1535,11 @@ TEST_P(Http2RingHashIntegrationTest, CookieRoutingWithCookieWithTtlSet) { EXPECT_EQ(served_by.size(), 1); } -void Http2FrameIntegrationTest::startHttp2Session() { - ASSERT_TRUE(tcp_client_->write(Http2Frame::Preamble, false, false)); - - // Send empty initial SETTINGS frame. - auto settings = Http2Frame::makeEmptySettingsFrame(); - ASSERT_TRUE(tcp_client_->write(std::string(settings), false, false)); - - // Read initial SETTINGS frame from the server. - readFrame(); - - // Send an SETTINGS ACK. - settings = Http2Frame::makeEmptySettingsFrame(Http2Frame::SettingsFlags::Ack); - ASSERT_TRUE(tcp_client_->write(std::string(settings), false, false)); - - // read pending SETTINGS and WINDOW_UPDATE frames - readFrame(); - readFrame(); -} - -void Http2FrameIntegrationTest::beginSession() { - setDownstreamProtocol(Http::CodecClient::Type::HTTP2); - setUpstreamProtocol(FakeHttpConnection::Type::HTTP2); - // set lower outbound frame limits to make tests run faster - config_helper_.setOutboundFramesLimits(1000, 100); - initialize(); - // Set up a raw connection to easily send requests without reading responses. - auto options = std::make_shared(); - options->emplace_back(std::make_shared( - envoy::config::core::v3::SocketOption::STATE_PREBIND, - ENVOY_MAKE_SOCKET_OPTION_NAME(SOL_SOCKET, SO_RCVBUF), 1024)); - tcp_client_ = makeTcpConnection(lookupPort("http"), options); - startHttp2Session(); -} - -Http2Frame Http2FrameIntegrationTest::readFrame() { - Http2Frame frame; - EXPECT_TRUE(tcp_client_->waitForData(frame.HeaderSize)); - frame.setHeader(tcp_client_->data()); - tcp_client_->clearData(frame.HeaderSize); - auto len = frame.payloadSize(); - if (len) { - EXPECT_TRUE(tcp_client_->waitForData(len)); - frame.setPayload(tcp_client_->data()); - tcp_client_->clearData(len); - } - return frame; -} - -void Http2FrameIntegrationTest::sendFrame(const Http2Frame& frame) { - ASSERT_TRUE(tcp_client_->connected()); - ASSERT_TRUE(tcp_client_->write(std::string(frame), false, false)); -} +class Http2FrameIntegrationTest : public testing::TestWithParam, + public Http2RawFrameIntegrationTest { +public: + Http2FrameIntegrationTest() : Http2RawFrameIntegrationTest(GetParam()) {} +}; // Regression test. TEST_P(Http2FrameIntegrationTest, SetDetailsTwice) { @@ -1614,981 +1565,4 @@ INSTANTIATE_TEST_SUITE_P(IpVersions, Http2FrameIntegrationTest, testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), TestUtility::ipTestParamsToString); -namespace { -const uint32_t ControlFrameFloodLimit = 100; -const uint32_t AllFrameFloodLimit = 1000; -} // namespace - -SocketInterfaceSwap::SocketInterfaceSwap() { - Envoy::Network::SocketInterfaceSingleton::clear(); - test_socket_interface_loader_ = std::make_unique( - std::make_unique( - [writev_matcher = writev_matcher_](Envoy::Network::TestIoSocketHandle* io_handle, - const Buffer::RawSlice*, - uint64_t) -> absl::optional { - if (writev_matcher->shouldReturnEgain(io_handle->localAddress()->ip()->port())) { - return Api::IoCallUint64Result( - 0, Api::IoErrorPtr(Network::IoSocketError::getIoSocketEagainInstance(), - Network::IoSocketError::deleteIoError)); - } - return absl::nullopt; - })); -} - -SocketInterfaceSwap::~SocketInterfaceSwap() { - test_socket_interface_loader_.reset(); - Envoy::Network::SocketInterfaceSingleton::initialize(previous_socket_interface_); -} - -Http2FloodMitigationTest::Http2FloodMitigationTest() { - config_helper_.addConfigModifier( - [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& - hcm) { hcm.mutable_delayed_close_timeout()->set_seconds(1); }); -} - -void Http2FloodMitigationTest::setNetworkConnectionBufferSize() { - // nghttp2 library has its own internal mitigation for outbound control frames (see - // NGHTTP2_DEFAULT_MAX_OBQ_FLOOD_ITEM). The default nghttp2 mitigation threshold of 1K is modified - // to 10K in the ConnectionImpl::Http2Options::Http2Options. The mitigation is triggered when - // there are more than 10000 PING or SETTINGS frames with ACK flag in the nghttp2 internal - // outbound queue. It is possible to trigger this mitigation in nghttp2 before triggering Envoy's - // own flood mitigation. This can happen when a buffer large enough to contain over 10K PING or - // SETTINGS frames is dispatched to the nghttp2 library. To prevent this from happening the - // network connection receive buffer needs to be smaller than 90Kb (which is 10K SETTINGS frames). - // Set it to the arbitrarily chosen value of 32K. Note that this buffer has 16K lower bound. - config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { - RELEASE_ASSERT(bootstrap.mutable_static_resources()->listeners_size() >= 1, ""); - auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0); - - listener->mutable_per_connection_buffer_limit_bytes()->set_value(32 * 1024); - }); -} - -void Http2FloodMitigationTest::beginSession() { - setDownstreamProtocol(Http::CodecClient::Type::HTTP2); - setUpstreamProtocol(FakeHttpConnection::Type::HTTP2); - // set lower outbound frame limits to make tests run faster - config_helper_.setOutboundFramesLimits(AllFrameFloodLimit, ControlFrameFloodLimit); - initialize(); - // Set up a raw connection to easily send requests without reading responses. Also, set a small - // TCP receive buffer to speed up connection backup. - auto options = std::make_shared(); - options->emplace_back(std::make_shared( - envoy::config::core::v3::SocketOption::STATE_PREBIND, - ENVOY_MAKE_SOCKET_OPTION_NAME(SOL_SOCKET, SO_RCVBUF), 1024)); - writev_matcher_->setSourcePort(lookupPort("http")); - tcp_client_ = makeTcpConnection(lookupPort("http"), options); - startHttp2Session(); -} - -// Verify that the server detects the flood of the given frame. -void Http2FloodMitigationTest::floodServer(const Http2Frame& frame, const std::string& flood_stat, - uint32_t num_frames) { - // make sure all frames can fit into 16k buffer - ASSERT_LE(num_frames, (16u * 1024u) / frame.size()); - std::vector buf(num_frames * frame.size()); - for (auto pos = buf.begin(); pos != buf.end();) { - pos = std::copy(frame.begin(), frame.end(), pos); - } - - ASSERT_TRUE(tcp_client_->write({buf.begin(), buf.end()}, false, false)); - - // Envoy's flood mitigation should kill the connection - tcp_client_->waitForDisconnect(); - - EXPECT_EQ(1, test_server_->counter(flood_stat)->value()); - test_server_->waitForCounterGe("http.config_test.downstream_cx_delayed_close_timeout", 1); -} - -// Verify that the server detects the flood using specified request parameters. -void Http2FloodMitigationTest::floodServer(absl::string_view host, absl::string_view path, - Http2Frame::ResponseStatus expected_http_status, - const std::string& flood_stat, uint32_t num_frames) { - uint32_t request_idx = 0; - auto request = Http2Frame::makeRequest(Http2Frame::makeClientStreamId(request_idx), host, path); - sendFrame(request); - auto frame = readFrame(); - EXPECT_EQ(Http2Frame::Type::Headers, frame.type()); - EXPECT_EQ(expected_http_status, frame.responseStatus()); - writev_matcher_->setWritevReturnsEgain(); - for (uint32_t frame = 0; frame < num_frames; ++frame) { - request = Http2Frame::makeRequest(Http2Frame::makeClientStreamId(++request_idx), host, path); - sendFrame(request); - } - tcp_client_->waitForDisconnect(); - if (!flood_stat.empty()) { - EXPECT_EQ(1, test_server_->counter(flood_stat)->value()); - } - EXPECT_EQ(1, - test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value()); -} - -void Http2FloodMitigationTest::prefillOutboundDownstreamQueue(uint32_t data_frame_count, - uint32_t data_frame_size) { - // Set large buffer limits so the test is not affected by the flow control. - config_helper_.setBufferLimits(1024 * 1024 * 1024, 1024 * 1024 * 1024); - autonomous_upstream_ = true; - autonomous_allow_incomplete_streams_ = true; - beginSession(); - - // Do not read from the socket and send request that causes autonomous upstream to respond - // with the specified number of DATA frames. This pre-fills downstream outbound frame queue - // such the the next response triggers flood protection. - // Simulate TCP push back on the Envoy's downstream network socket, so that outbound frames - // start to accumulate in the transport socket buffer. - writev_matcher_->setWritevReturnsEgain(); - - const auto request = Http2Frame::makeRequest( - Http2Frame::makeClientStreamId(0), "host", "/test/long/url", - {Http2Frame::Header("response_data_blocks", absl::StrCat(data_frame_count)), - Http2Frame::Header("response_size_bytes", absl::StrCat(data_frame_size)), - Http2Frame::Header("no_trailers", "0")}); - sendFrame(request); - - // Wait for some data to arrive and then wait for the upstream_rq_active to flip to 0 to indicate - // that the first request has completed. - test_server_->waitForCounterGe("cluster.cluster_0.upstream_cx_rx_bytes_total", 10000); - test_server_->waitForGaugeEq("cluster.cluster_0.upstream_rq_active", 0); - // Verify that pre-fill did not trigger flood protection - EXPECT_EQ(0, test_server_->counter("http2.outbound_flood")->value()); -} - -void Http2FloodMitigationTest::triggerListenerDrain() { - absl::Notification drain_sequence_started; - test_server_->server().dispatcher().post([this, &drain_sequence_started]() { - test_server_->drainManager().startDrainSequence([] {}); - drain_sequence_started.Notify(); - }); - drain_sequence_started.WaitForNotification(); -} - -INSTANTIATE_TEST_SUITE_P(IpVersions, Http2FloodMitigationTest, - testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), - TestUtility::ipTestParamsToString); - -TEST_P(Http2FloodMitigationTest, Ping) { - setNetworkConnectionBufferSize(); - beginSession(); - writev_matcher_->setWritevReturnsEgain(); - floodServer(Http2Frame::makePingFrame(), "http2.outbound_control_flood", - ControlFrameFloodLimit + 1); -} - -TEST_P(Http2FloodMitigationTest, Settings) { - setNetworkConnectionBufferSize(); - beginSession(); - writev_matcher_->setWritevReturnsEgain(); - floodServer(Http2Frame::makeEmptySettingsFrame(), "http2.outbound_control_flood", - ControlFrameFloodLimit + 1); -} - -// Verify that the server can detect flood of internally generated 404 responses. -TEST_P(Http2FloodMitigationTest, 404) { - // Change the default route to be restrictive, and send a request to a non existent route. - config_helper_.setDefaultHostAndRoute("foo.com", "/found"); - beginSession(); - - // Send requests to a non existent path to generate 404s - floodServer("host", "/notfound", Http2Frame::ResponseStatus::NotFound, "http2.outbound_flood", - AllFrameFloodLimit + 1); -} - -// Verify that the server can detect flood of response DATA frames -TEST_P(Http2FloodMitigationTest, Data) { - // Set large buffer limits so the test is not affected by the flow control. - config_helper_.setBufferLimits(1024 * 1024 * 1024, 1024 * 1024 * 1024); - autonomous_upstream_ = true; - autonomous_allow_incomplete_streams_ = true; - beginSession(); - - // Do not read from the socket and send request that causes autonomous upstream - // to respond with 1000 DATA frames. The Http2FloodMitigationTest::beginSession() - // sets 1000 flood limit for all frame types. Including 1 HEADERS response frame - // 1000 DATA frames should trigger flood protection. - // Simulate TCP push back on the Envoy's downstream network socket, so that outbound frames start - // to accumulate in the transport socket buffer. - writev_matcher_->setWritevReturnsEgain(); - - const auto request = Http2Frame::makeRequest( - 1, "host", "/test/long/url", - {Http2Frame::Header("response_data_blocks", "1000"), Http2Frame::Header("no_trailers", "0")}); - sendFrame(request); - - // Wait for connection to be flooded with outbound DATA frames and disconnected. - tcp_client_->waitForDisconnect(); - - // If the server codec had incorrectly thrown an exception on flood detection it would cause - // the entire upstream to be disconnected. Verify it is still active, and there are no destroyed - // connections. - ASSERT_EQ(1, test_server_->gauge("cluster.cluster_0.upstream_cx_active")->value()); - ASSERT_EQ(0, test_server_->counter("cluster.cluster_0.upstream_cx_destroy")->value()); - // Verify that the flood check was triggered - EXPECT_EQ(1, test_server_->counter("http2.outbound_flood")->value()); -} - -// Verify that the server can detect flood triggered by a DATA frame from a decoder filter call -// to sendLocalReply(). -// This test also verifies that RELEASE_ASSERT in the ConnectionImpl::StreamImpl::encodeDataHelper() -// is not fired when it is called by the sendLocalReply() in the dispatching context. -TEST_P(Http2FloodMitigationTest, DataOverflowFromDecoderFilterSendLocalReply) { - config_helper_.addConfigModifier( - [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& - hcm) -> void { - const std::string yaml_string = R"EOF( -name: send_local_reply_filter -typed_config: - "@type": type.googleapis.com/test.integration.filters.SetResponseCodeFilterConfig - prefix: "/call_send_local_reply" - code: 404 - body: "something" - )EOF"; - TestUtility::loadFromYaml(yaml_string, *hcm.add_http_filters()); - // keep router the last - auto size = hcm.http_filters_size(); - hcm.mutable_http_filters()->SwapElements(size - 2, size - 1); - }); - - // pre-fill 2 away from overflow - prefillOutboundDownstreamQueue(AllFrameFloodLimit - 2); - - // At this point the outbound downstream frame queue should be 2 away from overflowing. - // Make the SetResponseCodeFilterConfig decoder filter call sendLocalReply with body. - // HEADERS + DATA frames should overflow the queue. - // Verify that connection was disconnected and appropriate counters were set. - auto request2 = - Http2Frame::makeRequest(Http2Frame::makeClientStreamId(1), "host", "/call_send_local_reply"); - sendFrame(request2); - - // Wait for connection to be flooded with outbound DATA frame and disconnected. - tcp_client_->waitForDisconnect(); - - // Verify that the upstream connection is still alive. - ASSERT_EQ(1, test_server_->gauge("cluster.cluster_0.upstream_cx_active")->value()); - ASSERT_EQ(0, test_server_->counter("cluster.cluster_0.upstream_cx_destroy")->value()); - // Verify that the flood check was triggered - EXPECT_EQ(1, test_server_->counter("http2.outbound_flood")->value()); -} - -// Verify that the server can detect flood of response HEADERS frames -TEST_P(Http2FloodMitigationTest, Headers) { - // pre-fill one away from overflow - prefillOutboundDownstreamQueue(AllFrameFloodLimit - 1); - - // Send second request which should trigger headers only response. - // Verify that connection was disconnected and appropriate counters were set. - auto request2 = Http2Frame::makeRequest( - Http2Frame::makeClientStreamId(1), "host", "/test/long/url", - {Http2Frame::Header("response_data_blocks", "0"), Http2Frame::Header("no_trailers", "0")}); - sendFrame(request2); - - // Wait for connection to be flooded with outbound HEADERS frame and disconnected. - tcp_client_->waitForDisconnect(); - - // If the server codec had incorrectly thrown an exception on flood detection it would cause - // the entire upstream to be disconnected. Verify it is still active, and there are no destroyed - // connections. - ASSERT_EQ(1, test_server_->gauge("cluster.cluster_0.upstream_cx_active")->value()); - ASSERT_EQ(0, test_server_->counter("cluster.cluster_0.upstream_cx_destroy")->value()); - // Verify that the flood check was triggered - EXPECT_EQ(1, test_server_->counter("http2.outbound_flood")->value()); -} - -// Verify that the server can detect overflow by 100 continue response sent by Envoy itself -TEST_P(Http2FloodMitigationTest, Envoy100ContinueHeaders) { - // pre-fill one away from overflow - prefillOutboundDownstreamQueue(AllFrameFloodLimit - 1); - - // Send second request which should trigger Envoy to respond with 100 continue. - // Verify that connection was disconnected and appropriate counters were set. - auto request2 = Http2Frame::makeRequest( - Http2Frame::makeClientStreamId(1), "host", "/test/long/url", - {Http2Frame::Header("response_data_blocks", "0"), Http2Frame::Header("no_trailers", "0"), - Http2Frame::Header("expect", "100-continue")}); - sendFrame(request2); - - // Wait for connection to be flooded with outbound HEADERS frame and disconnected. - tcp_client_->waitForDisconnect(); - - // If the server codec had incorrectly thrown an exception on flood detection it would cause - // the entire upstream to be disconnected. Verify it is still active, and there are no destroyed - // connections. - ASSERT_EQ(1, test_server_->gauge("cluster.cluster_0.upstream_cx_active")->value()); - ASSERT_EQ(0, test_server_->counter("cluster.cluster_0.upstream_cx_destroy")->value()); - // The second upstream request should be reset since it is disconnected when sending 100 continue - // response - EXPECT_EQ(1, test_server_->counter("cluster.cluster_0.upstream_rq_tx_reset")->value()); - // Verify that the flood check was triggered - EXPECT_EQ(1, test_server_->counter("http2.outbound_flood")->value()); -} - -// Verify that the server can detect flood triggered by a HEADERS frame from a decoder filter call -// to sendLocalReply(). -// This test also verifies that RELEASE_ASSERT in the -// ConnectionImpl::StreamImpl::encodeHeadersBase() is not fired when it is called by the -// sendLocalReply() in the dispatching context. -TEST_P(Http2FloodMitigationTest, HeadersOverflowFromDecoderFilterSendLocalReply) { - config_helper_.addConfigModifier( - [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& - hcm) -> void { - const std::string yaml_string = R"EOF( -name: send_local_reply_filter -typed_config: - "@type": type.googleapis.com/test.integration.filters.SetResponseCodeFilterConfig - prefix: "/call_send_local_reply" - code: 404 - )EOF"; - TestUtility::loadFromYaml(yaml_string, *hcm.add_http_filters()); - // keep router the last - auto size = hcm.http_filters_size(); - hcm.mutable_http_filters()->SwapElements(size - 2, size - 1); - }); - - // pre-fill one away from overflow - prefillOutboundDownstreamQueue(AllFrameFloodLimit - 1); - - // At this point the outbound downstream frame queue should be 1 away from overflowing. - // Make the SetResponseCodeFilterConfig decoder filter call sendLocalReply without body. - // Verify that connection was disconnected and appropriate counters were set. - auto request2 = - Http2Frame::makeRequest(Http2Frame::makeClientStreamId(1), "host", "/call_send_local_reply"); - sendFrame(request2); - - // Wait for connection to be flooded with outbound HEADERS frame and disconnected. - tcp_client_->waitForDisconnect(); - - // Verify that the upstream connection is still alive. - ASSERT_EQ(1, test_server_->gauge("cluster.cluster_0.upstream_cx_active")->value()); - ASSERT_EQ(0, test_server_->counter("cluster.cluster_0.upstream_cx_destroy")->value()); - // Verify that the flood check was triggered - EXPECT_EQ(1, test_server_->counter("http2.outbound_flood")->value()); -} - -// TODO(yanavlasov): add the same tests as above for the encoder filters. -// This is currently blocked by the https://github.com/envoyproxy/envoy/pull/13256 - -// Verify that the server can detect flood of response METADATA frames -TEST_P(Http2FloodMitigationTest, Metadata) { - config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { - RELEASE_ASSERT(bootstrap.mutable_static_resources()->clusters_size() >= 1, ""); - auto* cluster = bootstrap.mutable_static_resources()->mutable_clusters(0); - cluster->mutable_http2_protocol_options()->set_allow_metadata(true); - }); - config_helper_.addConfigModifier( - [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& - hcm) -> void { hcm.mutable_http2_protocol_options()->set_allow_metadata(true); }); - - // pre-fill one away from overflow - prefillOutboundDownstreamQueue(AllFrameFloodLimit - 1); - - // Send second request which should trigger response with METADATA frame. - auto metadata_map_vector_ptr = std::make_unique(); - Http::MetadataMap metadata_map = { - {"header_key1", "header_value1"}, - {"header_key2", "header_value2"}, - }; - auto metadata_map_ptr = std::make_unique(metadata_map); - metadata_map_vector_ptr->push_back(std::move(metadata_map_ptr)); - static_cast(fake_upstreams_.front().get()) - ->setPreResponseHeadersMetadata(std::move(metadata_map_vector_ptr)); - - // Verify that connection was disconnected and appropriate counters were set. - auto request2 = Http2Frame::makeRequest( - Http2Frame::makeClientStreamId(1), "host", "/test/long/url", - {Http2Frame::Header("response_data_blocks", "0"), Http2Frame::Header("no_trailers", "0")}); - sendFrame(request2); - - // Wait for connection to be flooded with outbound METADATA frame and disconnected. - tcp_client_->waitForDisconnect(); - - // If the server codec had incorrectly thrown an exception on flood detection it would cause - // the entire upstream to be disconnected. Verify it is still active, and there are no destroyed - // connections. - ASSERT_EQ(1, test_server_->gauge("cluster.cluster_0.upstream_cx_active")->value()); - ASSERT_EQ(0, test_server_->counter("cluster.cluster_0.upstream_cx_destroy")->value()); - // Verify that the flood check was triggered - EXPECT_EQ(1, test_server_->counter("http2.outbound_flood")->value()); -} - -// Verify that the server can detect flood of response trailers. -TEST_P(Http2FloodMitigationTest, Trailers) { - // Set large buffer limits so the test is not affected by the flow control. - config_helper_.setBufferLimits(1024 * 1024 * 1024, 1024 * 1024 * 1024); - autonomous_upstream_ = true; - autonomous_allow_incomplete_streams_ = true; - beginSession(); - - // Do not read from the socket and send request that causes autonomous upstream - // to respond with 999 DATA frames and trailers. The Http2FloodMitigationTest::beginSession() - // sets 1000 flood limit for all frame types. Including 1 HEADERS response frame - // 999 DATA frames and trailers should trigger flood protection. - // Simulate TCP push back on the Envoy's downstream network socket, so that outbound frames start - // to accumulate in the transport socket buffer. - writev_matcher_->setWritevReturnsEgain(); - - static_cast(fake_upstreams_.front().get()) - ->setResponseTrailers(std::make_unique( - Http::TestResponseTrailerMapImpl({{"foo", "bar"}}))); - - const auto request = - Http2Frame::makeRequest(Http2Frame::makeClientStreamId(0), "host", "/test/long/url", - {Http2Frame::Header("response_data_blocks", "999")}); - sendFrame(request); - - // Wait for connection to be flooded with outbound trailers and disconnected. - tcp_client_->waitForDisconnect(); - - // If the server codec had incorrectly thrown an exception on flood detection it would cause - // the entire upstream to be disconnected. Verify it is still active, and there are no destroyed - // connections. - ASSERT_EQ(1, test_server_->gauge("cluster.cluster_0.upstream_cx_active")->value()); - ASSERT_EQ(0, test_server_->counter("cluster.cluster_0.upstream_cx_destroy")->value()); - // Verify that the flood check was triggered - EXPECT_EQ(1, test_server_->counter("http2.outbound_flood")->value()); -} - -// Verify flood detection by the WINDOW_UPDATE frame when a decoder filter is resuming reading from -// the downstream via DecoderFilterBelowWriteBufferLowWatermark. -TEST_P(Http2FloodMitigationTest, WindowUpdateOnLowWatermarkFlood) { - config_helper_.addFilter(R"EOF( - name: backpressure-filter - )EOF"); - config_helper_.setBufferLimits(1024 * 1024 * 1024, 1024 * 1024 * 1024); - // Set low window sizes in the server codec as nghttp2 sends WINDOW_UPDATE only after it consumes - // more than 25% of the window. - config_helper_.addConfigModifier( - [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& - hcm) -> void { - auto* h2_options = hcm.mutable_http2_protocol_options(); - h2_options->mutable_initial_stream_window_size()->set_value(70000); - h2_options->mutable_initial_connection_window_size()->set_value(70000); - }); - autonomous_upstream_ = true; - autonomous_allow_incomplete_streams_ = true; - beginSession(); - - writev_matcher_->setWritevReturnsEgain(); - - // pre-fill two away from overflow - const auto request = Http2Frame::makePostRequest( - Http2Frame::makeClientStreamId(0), "host", "/test/long/url", - {Http2Frame::Header("response_data_blocks", "998"), Http2Frame::Header("no_trailers", "0")}); - sendFrame(request); - - // The backpressure-filter disables reading when it sees request headers, and it should prevent - // WINDOW_UPDATE to be sent on the following DATA frames. Send enough DATA to consume more than - // 25% of the 70K window so that nghttp2 will send WINDOW_UPDATE on read resumption. - auto data_frame = - Http2Frame::makeDataFrame(Http2Frame::makeClientStreamId(0), std::string(16384, '0')); - sendFrame(data_frame); - sendFrame(data_frame); - data_frame = Http2Frame::makeDataFrame(Http2Frame::makeClientStreamId(0), std::string(16384, '1'), - Http2Frame::DataFlags::EndStream); - sendFrame(data_frame); - - // Upstream will respond with 998 DATA frames and the backpressure-filter filter will re-enable - // reading on the last DATA frame. This will cause nghttp2 to send two WINDOW_UPDATE frames for - // stream and connection windows. Together with response DATA frames it should overflow outbound - // frame queue. Wait for connection to be flooded with outbound WINDOW_UPDATE frame and - // disconnected. - tcp_client_->waitForDisconnect(); - - EXPECT_EQ(1, - test_server_->counter("http.config_test.downstream_flow_control_paused_reading_total") - ->value()); - - // If the server codec had incorrectly thrown an exception on flood detection it would cause - // the entire upstream to be disconnected. Verify it is still active, and there are no destroyed - // connections. - ASSERT_EQ(1, test_server_->gauge("cluster.cluster_0.upstream_cx_active")->value()); - ASSERT_EQ(0, test_server_->counter("cluster.cluster_0.upstream_cx_destroy")->value()); - // Verify that the flood check was triggered - EXPECT_EQ(1, test_server_->counter("http2.outbound_flood")->value()); -} - -// TODO(yanavlasov): add tests for WINDOW_UPDATE overflow from the router filter. These tests need -// missing support for write resumption from test sockets that were forced to return EAGAIN by the -// test. - -// Verify that the server can detect flood of RST_STREAM frames. -TEST_P(Http2FloodMitigationTest, RST_STREAM) { - // Use invalid HTTP headers to trigger sending RST_STREAM frames. - config_helper_.addConfigModifier( - [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& - hcm) -> void { - hcm.mutable_http2_protocol_options() - ->mutable_override_stream_error_on_invalid_http_message() - ->set_value(true); - }); - beginSession(); - - uint32_t stream_index = 0; - auto request = - Http::Http2::Http2Frame::makeMalformedRequest(Http2Frame::makeClientStreamId(stream_index)); - sendFrame(request); - auto response = readFrame(); - // Make sure we've got RST_STREAM from the server - EXPECT_EQ(Http2Frame::Type::RstStream, response.type()); - - // Simulate TCP push back on the Envoy's downstream network socket, so that outbound frames start - // to accumulate in the transport socket buffer. - writev_matcher_->setWritevReturnsEgain(); - - for (++stream_index; stream_index < ControlFrameFloodLimit + 2; ++stream_index) { - request = - Http::Http2::Http2Frame::makeMalformedRequest(Http2Frame::makeClientStreamId(stream_index)); - sendFrame(request); - } - tcp_client_->waitForDisconnect(); - EXPECT_EQ(1, test_server_->counter("http2.outbound_control_flood")->value()); - EXPECT_EQ(1, - test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value()); -} - -// Verify detection of flood by the RST_STREAM frame sent on pending flush timeout -TEST_P(Http2FloodMitigationTest, RstStreamOverflowOnPendingFlushTimeout) { - config_helper_.addConfigModifier( - [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& - hcm) { - hcm.mutable_stream_idle_timeout()->set_seconds(0); - constexpr uint64_t IdleTimeoutMs = 400; - hcm.mutable_stream_idle_timeout()->set_nanos(IdleTimeoutMs * 1000 * 1000); - }); - - // Pending flush timer is started when upstream response has completed but there is no window to - // send DATA downstream. The test downstream client does not update WINDOW and as such Envoy will - // use the default 65535 bytes. First, pre-fill outbound queue with 65 byte frames, which should - // consume 65 * 997 = 64805 bytes of downstream connection window. - prefillOutboundDownstreamQueue(AllFrameFloodLimit - 3, 65); - - // At this point the outbound downstream frame queue should be 3 away from overflowing with 730 - // byte window. Make response to be 1 DATA frame with 1024 payload. This should overflow the - // available downstream window and start pending flush timer. Envoy proxies 2 frames downstream, - // HEADERS and partial DATA frame, which makes the frame queue 1 away from overflow. - const auto request2 = Http2Frame::makeRequest( - Http2Frame::makeClientStreamId(1), "host", "/test/long/url", - {Http2Frame::Header("response_data_blocks", "1"), - Http2Frame::Header("response_size_bytes", "1024"), Http2Frame::Header("no_trailers", "0")}); - sendFrame(request2); - - // Pending flush timer sends RST_STREAM frame which should overflow outbound frame queue and - // disconnect the connection. - tcp_client_->waitForDisconnect(); - - // Verify that the flood check was triggered - EXPECT_EQ(1, test_server_->counter("http2.outbound_flood")->value()); - // Verify that pending flush timeout was hit - EXPECT_EQ(1, test_server_->counter("http2.tx_flush_timeout")->value()); -} - -// Verify detection of frame flood when sending second GOAWAY frame on drain timeout -TEST_P(Http2FloodMitigationTest, GoAwayOverflowOnDrainTimeout) { - config_helper_.addConfigModifier( - [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& - hcm) { - auto* drain_time_out = hcm.mutable_drain_timeout(); - std::chrono::milliseconds timeout(1000); - auto seconds = std::chrono::duration_cast(timeout); - drain_time_out->set_seconds(seconds.count()); - - auto* http_protocol_options = hcm.mutable_common_http_protocol_options(); - auto* idle_time_out = http_protocol_options->mutable_idle_timeout(); - idle_time_out->set_seconds(seconds.count()); - }); - // pre-fill two away from overflow - prefillOutboundDownstreamQueue(AllFrameFloodLimit - 2); - - // connection idle timeout will send first GOAWAY frame and start drain timer - // drain timeout will send second GOAWAY frame which should trigger flood protection - // Wait for connection to be flooded with outbound GOAWAY frame and disconnected. - tcp_client_->waitForDisconnect(); - - // Verify that the flood check was triggered - EXPECT_EQ(1, test_server_->counter("http2.outbound_flood")->value()); -} - -// Verify detection of overflowing outbound frame queue with the GOAWAY frames sent after the -// downstream idle connection timeout disconnects the connection. -// The test verifies protocol constraint violation handling in the -// Http2::ConnectionImpl::shutdownNotice() method. -TEST_P(Http2FloodMitigationTest, DownstreamIdleTimeoutTriggersFloodProtection) { - config_helper_.addConfigModifier( - [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& - hcm) { - auto* http_protocol_options = hcm.mutable_common_http_protocol_options(); - auto* idle_time_out = http_protocol_options->mutable_idle_timeout(); - std::chrono::milliseconds timeout(1000); - auto seconds = std::chrono::duration_cast(timeout); - idle_time_out->set_seconds(seconds.count()); - }); - - prefillOutboundDownstreamQueue(AllFrameFloodLimit - 1); - tcp_client_->waitForDisconnect(); - - EXPECT_EQ(1, test_server_->counter("http2.outbound_flood")->value()); - EXPECT_EQ(1, test_server_->counter("http.config_test.downstream_cx_idle_timeout")->value()); -} - -// Verify detection of overflowing outbound frame queue with the GOAWAY frames sent after the -// downstream connection duration timeout disconnects the connection. -// The test verifies protocol constraint violation handling in the -// Http2::ConnectionImpl::shutdownNotice() method. -TEST_P(Http2FloodMitigationTest, DownstreamConnectionDurationTimeoutTriggersFloodProtection) { - config_helper_.addConfigModifier( - [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& - hcm) { - auto* http_protocol_options = hcm.mutable_common_http_protocol_options(); - auto* max_connection_duration = http_protocol_options->mutable_max_connection_duration(); - std::chrono::milliseconds timeout(1000); - auto seconds = std::chrono::duration_cast(timeout); - max_connection_duration->set_seconds(seconds.count()); - }); - prefillOutboundDownstreamQueue(AllFrameFloodLimit - 1); - tcp_client_->waitForDisconnect(); - - EXPECT_EQ(1, test_server_->counter("http2.outbound_flood")->value()); - EXPECT_EQ(1, - test_server_->counter("http.config_test.downstream_cx_max_duration_reached")->value()); -} - -// Verify detection of frame flood when sending GOAWAY frame during processing of response headers -// on a draining listener. -TEST_P(Http2FloodMitigationTest, GoawayOverflowDuringResponseWhenDraining) { - // pre-fill one away from overflow - prefillOutboundDownstreamQueue(AllFrameFloodLimit - 1); - - triggerListenerDrain(); - - // Send second request which should trigger Envoy to send GOAWAY (since it is in the draining - // state) when processing response headers. Verify that connection was disconnected and - // appropriate counters were set. - auto request2 = - Http2Frame::makeRequest(Http2Frame::makeClientStreamId(1), "host", "/test/long/url"); - sendFrame(request2); - - // Wait for connection to be flooded with outbound GOAWAY frame and disconnected. - tcp_client_->waitForDisconnect(); - - // Verify that the upstream connection is still alive. - ASSERT_EQ(1, test_server_->gauge("cluster.cluster_0.upstream_cx_active")->value()); - ASSERT_EQ(0, test_server_->counter("cluster.cluster_0.upstream_cx_destroy")->value()); - // Verify that the flood check was triggered - EXPECT_EQ(1, test_server_->counter("http2.outbound_flood")->value()); - EXPECT_EQ(1, test_server_->counter("http.config_test.downstream_cx_drain_close")->value()); -} - -// Verify detection of frame flood when sending GOAWAY frame during call to sendLocalReply() -// from decoder filter on a draining listener. -TEST_P(Http2FloodMitigationTest, GoawayOverflowFromDecoderFilterSendLocalReplyWhenDraining) { - config_helper_.addConfigModifier( - [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& - hcm) -> void { - const std::string yaml_string = R"EOF( -name: send_local_reply_filter -typed_config: - "@type": type.googleapis.com/test.integration.filters.SetResponseCodeFilterConfig - prefix: "/call_send_local_reply" - code: 404 - )EOF"; - TestUtility::loadFromYaml(yaml_string, *hcm.add_http_filters()); - // keep router the last - auto size = hcm.http_filters_size(); - hcm.mutable_http_filters()->SwapElements(size - 2, size - 1); - }); - - // pre-fill one away from overflow - prefillOutboundDownstreamQueue(AllFrameFloodLimit - 1); - - triggerListenerDrain(); - - // At this point the outbound downstream frame queue should be 1 away from overflowing. - // Make the SetResponseCodeFilterConfig decoder filter call sendLocalReply without body which - // should trigger Envoy to send GOAWAY (since it is in the draining state) when processing - // sendLocalReply() headers. Verify that connection was disconnected and appropriate counters were - // set. - auto request2 = - Http2Frame::makeRequest(Http2Frame::makeClientStreamId(1), "host", "/call_send_local_reply"); - sendFrame(request2); - - // Wait for connection to be flooded with outbound GOAWAY frame and disconnected. - tcp_client_->waitForDisconnect(); - - // Verify that the upstream connection is still alive. - ASSERT_EQ(1, test_server_->gauge("cluster.cluster_0.upstream_cx_active")->value()); - ASSERT_EQ(0, test_server_->counter("cluster.cluster_0.upstream_cx_destroy")->value()); - // Verify that the flood check was triggered - EXPECT_EQ(1, test_server_->counter("http2.outbound_flood")->value()); - EXPECT_EQ(1, test_server_->counter("http.config_test.downstream_cx_drain_close")->value()); -} - -// Verify that the server can detect flooding by the RST_STREAM on when upstream disconnects -// before sending response headers. -TEST_P(Http2FloodMitigationTest, RstStreamOnUpstreamRemoteCloseBeforeResponseHeaders) { - // pre-fill 3 away from overflow - prefillOutboundDownstreamQueue(AllFrameFloodLimit - 3); - - // Start second request. - auto request2 = - Http2Frame::makePostRequest(Http2Frame::makeClientStreamId(1), "host", "/test/long/url"); - sendFrame(request2); - - // Wait for it to be proxied - test_server_->waitForCounterGe("cluster.cluster_0.upstream_rq_total", 2); - - // Disconnect upstream connection. Since there no response headers were sent yet the router - // filter will send 503 with body and then RST_STREAM. With these 3 frames the downstream outbound - // frame queue should overflow. - ASSERT_TRUE(static_cast(fake_upstreams_.front().get())->closeConnection(0)); - - // Wait for connection to be flooded with outbound RST_STREAM frame and disconnected. - tcp_client_->waitForDisconnect(); - - ASSERT_EQ(1, test_server_->counter("cluster.cluster_0.upstream_cx_destroy")->value()); - // Verify that the flood check was triggered - EXPECT_EQ(1, test_server_->counter("http2.outbound_flood")->value()); -} - -// Verify that the server can detect flooding by the RST_STREAM on stream idle timeout -// after sending response headers. -TEST_P(Http2FloodMitigationTest, RstStreamOnStreamIdleTimeoutAfterResponseHeaders) { - config_helper_.addConfigModifier( - [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& - hcm) { - auto* stream_idle_timeout = hcm.mutable_stream_idle_timeout(); - std::chrono::milliseconds timeout(1000); - auto seconds = std::chrono::duration_cast(timeout); - stream_idle_timeout->set_seconds(seconds.count()); - }); - // pre-fill 2 away from overflow - prefillOutboundDownstreamQueue(AllFrameFloodLimit - 2); - - // Start second request, which should result in response headers to be sent but the stream kept - // open. - auto request2 = Http2Frame::makeRequest( - Http2Frame::makeClientStreamId(1), "host", "/test/long/url", - {Http2Frame::Header("response_data_blocks", "0"), Http2Frame::Header("no_end_stream", "0")}); - sendFrame(request2); - - // Wait for stream idle timeout to send RST_STREAM. With the response headers frame from the - // second response the downstream outbound frame queue should overflow. - tcp_client_->waitForDisconnect(); - - EXPECT_EQ(1, test_server_->counter("http2.outbound_flood")->value()); - EXPECT_EQ(1, test_server_->counter("http.config_test.downstream_rq_idle_timeout")->value()); -} - -// Verify detection of overflowing outbound frame queue with the PING frames sent by the keep alive -// timer. The test verifies protocol constraint violation handling in the -// Http2::ConnectionImpl::sendKeepalive() method. -TEST_P(Http2FloodMitigationTest, KeepAliveTimeeTriggersFloodProtection) { - config_helper_.addConfigModifier( - [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& - hcm) { - auto* keep_alive = hcm.mutable_http2_protocol_options()->mutable_connection_keepalive(); - keep_alive->mutable_interval()->set_nanos(500 * 1000 * 1000); - keep_alive->mutable_timeout()->set_seconds(1); - }); - - prefillOutboundDownstreamQueue(AllFrameFloodLimit - 1); - tcp_client_->waitForDisconnect(); - - EXPECT_EQ(1, test_server_->counter("http2.outbound_flood")->value()); -} - -// Verify that the server stop reading downstream connection on protocol error. -TEST_P(Http2FloodMitigationTest, TooManyStreams) { - config_helper_.addConfigModifier( - [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& - hcm) -> void { - hcm.mutable_http2_protocol_options()->mutable_max_concurrent_streams()->set_value(2); - }); - autonomous_upstream_ = true; - beginSession(); - // To prevent Envoy from closing client streams the upstream connection needs to push back on - // writing by the upstream server. In this case Envoy will not see upstream responses and will - // keep client streams open, eventually maxing them out and causing client connection to be - // closed. - writev_matcher_->setSourcePort(fake_upstreams_[0]->localAddress()->ip()->port()); - - // Exceed the number of streams allowed by the server. The server should stop reading from the - // client. - floodServer("host", "/test/long/url", Http2Frame::ResponseStatus::Ok, "", 3); -} - -TEST_P(Http2FloodMitigationTest, EmptyHeaders) { - config_helper_.addConfigModifier( - [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& - hcm) -> void { - hcm.mutable_http2_protocol_options() - ->mutable_max_consecutive_inbound_frames_with_empty_payload() - ->set_value(0); - }); - beginSession(); - - const auto request = Http2Frame::makeEmptyHeadersFrame(Http2Frame::makeClientStreamId(0)); - sendFrame(request); - - tcp_client_->waitForDisconnect(); - - EXPECT_EQ(1, test_server_->counter("http2.inbound_empty_frames_flood")->value()); - EXPECT_EQ(1, - test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value()); -} - -TEST_P(Http2FloodMitigationTest, EmptyHeadersContinuation) { - useAccessLog("%RESPONSE_FLAGS% %RESPONSE_CODE_DETAILS%"); - beginSession(); - - const uint32_t request_stream_id = Http2Frame::makeClientStreamId(0); - auto request = Http2Frame::makeEmptyHeadersFrame(request_stream_id); - sendFrame(request); - - for (int i = 0; i < 2; i++) { - request = Http2Frame::makeEmptyContinuationFrame(request_stream_id); - sendFrame(request); - } - - tcp_client_->waitForDisconnect(); - - EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("http2.inbound_empty_frames_flood")); - EXPECT_EQ(1, test_server_->counter("http2.inbound_empty_frames_flood")->value()); - EXPECT_EQ(1, - test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value()); -} - -TEST_P(Http2FloodMitigationTest, EmptyData) { - useAccessLog("%RESPONSE_FLAGS% %RESPONSE_CODE_DETAILS%"); - beginSession(); - - const uint32_t request_stream_id = Http2Frame::makeClientStreamId(0); - auto request = Http2Frame::makePostRequest(request_stream_id, "host", "/"); - sendFrame(request); - - for (int i = 0; i < 2; i++) { - request = Http2Frame::makeEmptyDataFrame(request_stream_id); - sendFrame(request); - } - - tcp_client_->waitForDisconnect(); - - EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("http2.inbound_empty_frames_flood")); - EXPECT_EQ(1, test_server_->counter("http2.inbound_empty_frames_flood")->value()); - EXPECT_EQ(1, - test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value()); -} - -TEST_P(Http2FloodMitigationTest, PriorityIdleStream) { - beginSession(); - - floodServer(Http2Frame::makePriorityFrame(Http2Frame::makeClientStreamId(0), - Http2Frame::makeClientStreamId(1)), - "http2.inbound_priority_frames_flood", - Http2::Utility::OptionsLimits::DEFAULT_MAX_INBOUND_PRIORITY_FRAMES_PER_STREAM + 1); -} - -TEST_P(Http2FloodMitigationTest, PriorityOpenStream) { - beginSession(); - - // Open stream. - const uint32_t request_stream_id = Http2Frame::makeClientStreamId(0); - const auto request = Http2Frame::makeRequest(request_stream_id, "host", "/"); - sendFrame(request); - - floodServer(Http2Frame::makePriorityFrame(request_stream_id, Http2Frame::makeClientStreamId(1)), - "http2.inbound_priority_frames_flood", - Http2::Utility::OptionsLimits::DEFAULT_MAX_INBOUND_PRIORITY_FRAMES_PER_STREAM * 2 + - 1); -} - -TEST_P(Http2FloodMitigationTest, PriorityClosedStream) { - autonomous_upstream_ = true; - beginSession(); - - // Open stream. - const uint32_t request_stream_id = Http2Frame::makeClientStreamId(0); - const auto request = Http2Frame::makeRequest(request_stream_id, "host", "/"); - sendFrame(request); - // Reading response marks this stream as closed in nghttp2. - auto frame = readFrame(); - EXPECT_EQ(Http2Frame::Type::Headers, frame.type()); - - floodServer(Http2Frame::makePriorityFrame(request_stream_id, Http2Frame::makeClientStreamId(1)), - "http2.inbound_priority_frames_flood", - Http2::Utility::OptionsLimits::DEFAULT_MAX_INBOUND_PRIORITY_FRAMES_PER_STREAM * 2 + - 1); -} - -TEST_P(Http2FloodMitigationTest, WindowUpdate) { - beginSession(); - - // Open stream. - const uint32_t request_stream_id = Http2Frame::makeClientStreamId(0); - const auto request = Http2Frame::makeRequest(request_stream_id, "host", "/"); - sendFrame(request); - - // Since we do not send any DATA frames, only 4 sequential WINDOW_UPDATE frames should - // trigger flood protection. - floodServer(Http2Frame::makeWindowUpdateFrame(request_stream_id, 1), - "http2.inbound_window_update_frames_flood", 4); -} - -// Verify that the HTTP/2 connection is terminated upon receiving invalid HEADERS frame. -TEST_P(Http2FloodMitigationTest, ZerolenHeader) { - useAccessLog("%RESPONSE_FLAGS% %RESPONSE_CODE_DETAILS%"); - beginSession(); - - // Send invalid request. - const auto request = Http2Frame::makeMalformedRequestWithZerolenHeader( - Http2Frame::makeClientStreamId(0), "host", "/"); - sendFrame(request); - - tcp_client_->waitForDisconnect(); - - EXPECT_EQ(1, test_server_->counter("http2.rx_messaging_error")->value()); - EXPECT_EQ(1, - test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value()); - EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("http2.invalid.header.field")); - // expect a downstream protocol error. - EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("DPE")); -} - -// Verify that only the offending stream is terminated upon receiving invalid HEADERS frame. -TEST_P(Http2FloodMitigationTest, ZerolenHeaderAllowed) { - useAccessLog("%RESPONSE_FLAGS% %RESPONSE_CODE_DETAILS%"); - config_helper_.addConfigModifier( - [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& - hcm) -> void { - hcm.mutable_http2_protocol_options() - ->mutable_override_stream_error_on_invalid_http_message() - ->set_value(true); - }); - autonomous_upstream_ = true; - beginSession(); - - // Send invalid request. - uint32_t request_idx = 0; - auto request = Http2Frame::makeMalformedRequestWithZerolenHeader( - Http2Frame::makeClientStreamId(request_idx), "host", "/"); - sendFrame(request); - // Make sure we've got RST_STREAM from the server. - auto response = readFrame(); - EXPECT_EQ(Http2Frame::Type::RstStream, response.type()); - - // Send valid request using the same connection. - request_idx++; - request = Http2Frame::makeRequest(Http2Frame::makeClientStreamId(request_idx), "host", "/"); - sendFrame(request); - response = readFrame(); - EXPECT_EQ(Http2Frame::Type::Headers, response.type()); - EXPECT_EQ(Http2Frame::ResponseStatus::Ok, response.responseStatus()); - - tcp_client_->close(); - - EXPECT_EQ(1, test_server_->counter("http2.rx_messaging_error")->value()); - EXPECT_EQ(0, - test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value()); - EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("http2.invalid.header.field")); - // expect Downstream Protocol Error - EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("DPE")); -} - } // namespace Envoy diff --git a/test/integration/http2_integration_test.h b/test/integration/http2_integration_test.h index c66b3472cb8b..45dcc445d5f4 100644 --- a/test/integration/http2_integration_test.h +++ b/test/integration/http2_integration_test.h @@ -5,15 +5,11 @@ #include "envoy/config/bootstrap/v3/bootstrap.pb.h" #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" -#include "test/common/http/http2/http2_frame.h" -#include "test/integration/filters/test_socket_interface.h" #include "test/integration/http_integration.h" #include "absl/synchronization/mutex.h" #include "gtest/gtest.h" -using Envoy::Http::Http2::Http2Frame; - namespace Envoy { class Http2IntegrationTest : public testing::TestWithParam, public HttpIntegrationTest { @@ -71,74 +67,4 @@ class Http2MetadataIntegrationTest : public Http2IntegrationTest { void runHeaderOnlyTest(bool send_request_body, size_t body_size); }; -class Http2FrameIntegrationTest : public testing::TestWithParam, - public HttpIntegrationTest { -public: - Http2FrameIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP2, GetParam()) {} - -protected: - void startHttp2Session(); - Http2Frame readFrame(); - void sendFrame(const Http2Frame& frame); - virtual void beginSession(); - - IntegrationTcpClientPtr tcp_client_; -}; - -class SocketInterfaceSwap { -public: - // Object of this class hold the state determining the IoHandle which - // should return EAGAIN from the `writev` call. - struct IoHandleMatcher { - bool shouldReturnEgain(uint32_t port) const { - absl::ReaderMutexLock lock(&mutex_); - return port == port_ && writev_returns_egain_; - } - - void setSourcePort(uint32_t port) { - absl::WriterMutexLock lock(&mutex_); - port_ = port; - } - - void setWritevReturnsEgain() { - absl::WriterMutexLock lock(&mutex_); - writev_returns_egain_ = true; - } - - private: - mutable absl::Mutex mutex_; - uint32_t port_ ABSL_GUARDED_BY(mutex_) = 0; - bool writev_returns_egain_ ABSL_GUARDED_BY(mutex_) = false; - }; - - SocketInterfaceSwap(); - ~SocketInterfaceSwap(); - -protected: - Envoy::Network::SocketInterface* const previous_socket_interface_{ - Envoy::Network::SocketInterfaceSingleton::getExisting()}; - std::shared_ptr writev_matcher_{std::make_shared()}; - std::unique_ptr test_socket_interface_loader_; -}; - -// It is important that the new socket interface is installed before any I/O activity starts and -// the previous one is restored after all I/O activity stops. Since the HttpIntegrationTest -// destructor stops Envoy the SocketInterfaceSwap destructor needs to run after it. This order of -// multiple inheritance ensures that SocketInterfaceSwap destructor runs after -// Http2FrameIntegrationTest destructor completes. -class Http2FloodMitigationTest : public SocketInterfaceSwap, public Http2FrameIntegrationTest { -public: - Http2FloodMitigationTest(); - -protected: - void floodServer(const Http2Frame& frame, const std::string& flood_stat, uint32_t num_frames); - void floodServer(absl::string_view host, absl::string_view path, - Http2Frame::ResponseStatus expected_http_status, const std::string& flood_stat, - uint32_t num_frames); - - void setNetworkConnectionBufferSize(); - void beginSession() override; - void prefillOutboundDownstreamQueue(uint32_t data_frame_count, uint32_t data_frame_size = 10); - void triggerListenerDrain(); -}; } // namespace Envoy diff --git a/test/integration/http_integration.cc b/test/integration/http_integration.cc index 8b28b8a22bce..eb2a70e3f121 100644 --- a/test/integration/http_integration.cc +++ b/test/integration/http_integration.cc @@ -20,6 +20,7 @@ #include "common/common/fmt.h" #include "common/common/thread_annotations.h" #include "common/http/headers.h" +#include "common/network/socket_option_impl.h" #include "common/network/utility.h" #include "common/protobuf/utility.h" #include "common/runtime/runtime_impl.h" @@ -1388,4 +1389,58 @@ std::string HttpIntegrationTest::listenerStatPrefix(const std::string& stat_name } return "listener.[__1]_0." + stat_name; } + +void Http2RawFrameIntegrationTest::startHttp2Session() { + ASSERT_TRUE(tcp_client_->write(Http2Frame::Preamble, false, false)); + + // Send empty initial SETTINGS frame. + auto settings = Http2Frame::makeEmptySettingsFrame(); + ASSERT_TRUE(tcp_client_->write(std::string(settings), false, false)); + + // Read initial SETTINGS frame from the server. + readFrame(); + + // Send an SETTINGS ACK. + settings = Http2Frame::makeEmptySettingsFrame(Http2Frame::SettingsFlags::Ack); + ASSERT_TRUE(tcp_client_->write(std::string(settings), false, false)); + + // read pending SETTINGS and WINDOW_UPDATE frames + readFrame(); + readFrame(); +} + +void Http2RawFrameIntegrationTest::beginSession() { + setDownstreamProtocol(Http::CodecClient::Type::HTTP2); + setUpstreamProtocol(FakeHttpConnection::Type::HTTP2); + // set lower outbound frame limits to make tests run faster + config_helper_.setOutboundFramesLimits(1000, 100); + initialize(); + // Set up a raw connection to easily send requests without reading responses. + auto options = std::make_shared(); + options->emplace_back(std::make_shared( + envoy::config::core::v3::SocketOption::STATE_PREBIND, + ENVOY_MAKE_SOCKET_OPTION_NAME(SOL_SOCKET, SO_RCVBUF), 1024)); + tcp_client_ = makeTcpConnection(lookupPort("http"), options); + startHttp2Session(); +} + +Http2Frame Http2RawFrameIntegrationTest::readFrame() { + Http2Frame frame; + EXPECT_TRUE(tcp_client_->waitForData(frame.HeaderSize)); + frame.setHeader(tcp_client_->data()); + tcp_client_->clearData(frame.HeaderSize); + auto len = frame.payloadSize(); + if (len) { + EXPECT_TRUE(tcp_client_->waitForData(len)); + frame.setPayload(tcp_client_->data()); + tcp_client_->clearData(len); + } + return frame; +} + +void Http2RawFrameIntegrationTest::sendFrame(const Http2Frame& frame) { + ASSERT_TRUE(tcp_client_->connected()); + ASSERT_TRUE(tcp_client_->write(std::string(frame), false, false)); +} + } // namespace Envoy diff --git a/test/integration/http_integration.h b/test/integration/http_integration.h index 0b5b034f48d9..ae7652d59107 100644 --- a/test/integration/http_integration.h +++ b/test/integration/http_integration.h @@ -7,12 +7,15 @@ #include "common/http/codec_client.h" #include "common/network/filter_impl.h" +#include "test/common/http/http2/http2_frame.h" #include "test/integration/integration.h" #include "test/integration/utility.h" #include "test/test_common/printers.h" namespace Envoy { +using ::Envoy::Http::Http2::Http2Frame; + /** * HTTP codec client used during integration testing. */ @@ -253,4 +256,20 @@ class HttpIntegrationTest : public BaseIntegrationTest { std::string access_log_name_; testing::NiceMock random_; }; + +// Helper class for integration tests using raw HTTP/2 frames +class Http2RawFrameIntegrationTest : public HttpIntegrationTest { +public: + Http2RawFrameIntegrationTest(Network::Address::IpVersion version) + : HttpIntegrationTest(Http::CodecClient::Type::HTTP2, version) {} + +protected: + void startHttp2Session(); + Http2Frame readFrame(); + void sendFrame(const Http2Frame& frame); + virtual void beginSession(); + + IntegrationTcpClientPtr tcp_client_; +}; + } // namespace Envoy diff --git a/test/mocks/network/transport_socket.h b/test/mocks/network/transport_socket.h index af8949aea99b..ee53570c20ac 100644 --- a/test/mocks/network/transport_socket.h +++ b/test/mocks/network/transport_socket.h @@ -38,7 +38,6 @@ class MockTransportSocketFactory : public TransportSocketFactory { MOCK_METHOD(bool, implementsSecureTransport, (), (const)); MOCK_METHOD(TransportSocketPtr, createTransportSocket, (TransportSocketOptionsSharedPtr), (const)); - MOCK_METHOD(void, addReadyCb, (std::function)); }; } // namespace Network diff --git a/test/mocks/server/watch_dog.h b/test/mocks/server/watch_dog.h index 105761781c36..a658dd33ac94 100644 --- a/test/mocks/server/watch_dog.h +++ b/test/mocks/server/watch_dog.h @@ -15,7 +15,6 @@ class MockWatchDog : public WatchDog { MOCK_METHOD(void, startWatchdog, (Event::Dispatcher & dispatcher)); MOCK_METHOD(void, touch, ()); MOCK_METHOD(Thread::ThreadId, threadId, (), (const)); - MOCK_METHOD(MonotonicTime, lastTouchTime, (), (const)); }; } // namespace Server } // namespace Envoy diff --git a/test/mocks/ssl/mocks.cc b/test/mocks/ssl/mocks.cc index 14ee85239d87..50ed3f3ae6c0 100644 --- a/test/mocks/ssl/mocks.cc +++ b/test/mocks/ssl/mocks.cc @@ -15,9 +15,6 @@ MockClientContext::~MockClientContext() = default; MockClientContextConfig::MockClientContextConfig() = default; MockClientContextConfig::~MockClientContextConfig() = default; -MockServerContext::MockServerContext() = default; -MockServerContext::~MockServerContext() = default; - MockServerContextConfig::MockServerContextConfig() = default; MockServerContextConfig::~MockServerContextConfig() = default; diff --git a/test/mocks/ssl/mocks.h b/test/mocks/ssl/mocks.h index beafd9de8720..6a5cbe8df649 100644 --- a/test/mocks/ssl/mocks.h +++ b/test/mocks/ssl/mocks.h @@ -97,17 +97,6 @@ class MockClientContextConfig : public ClientContextConfig { MOCK_METHOD(const std::string&, signingAlgorithmsForTest, (), (const)); }; -class MockServerContext : public ServerContext { -public: - MockServerContext(); - ~MockServerContext() override; - - MOCK_METHOD(size_t, daysUntilFirstCertExpires, (), (const)); - MOCK_METHOD(absl::optional, secondsUntilFirstOcspResponseExpires, (), (const)); - MOCK_METHOD(CertificateDetailsPtr, getCaCertInformation, (), (const)); - MOCK_METHOD(std::vector, getCertChainInformation, (), (const)); -}; - class MockServerContextConfig : public ServerContextConfig { public: MockServerContextConfig(); diff --git a/test/mocks/upstream/host.h b/test/mocks/upstream/host.h index b5d857a5c184..95183622dbb7 100644 --- a/test/mocks/upstream/host.h +++ b/test/mocks/upstream/host.h @@ -192,8 +192,6 @@ class MockHost : public Host { MOCK_METHOD(uint32_t, priority, (), (const)); MOCK_METHOD(void, priority, (uint32_t)); MOCK_METHOD(bool, warmed, (), (const)); - MOCK_METHOD(void, addReadyCb, (std::function, const envoy::config::core::v3::Metadata*), - (const)); testing::NiceMock cluster_; Network::TransportSocketFactoryPtr socket_factory_; diff --git a/test/per_file_coverage.sh b/test/per_file_coverage.sh index 853724035d17..2ba764721609 100755 --- a/test/per_file_coverage.sh +++ b/test/per_file_coverage.sh @@ -15,11 +15,13 @@ declare -a KNOWN_LOW_COVERAGE=( "source/common/json:90.6" "source/common/filesystem:96.1" "source/common/filesystem/posix:94.5" +"source/common/thread:0.0" # Death tests don't report LCOV "source/common/thread_local:95.7" "source/common/crypto:0.0" "source/common/common:96.1" "source/common/common/posix:94.1" "source/common/signal:90.4" +"source/common/watchdog:42.9" # Death tests don't report LCOV "source/exe:93.7" "source/extensions:96.3" "source/extensions/common:94.4" @@ -67,7 +69,6 @@ declare -a KNOWN_LOW_COVERAGE=( "source/extensions/transport_sockets/tls/private_key:76.9" "source/extensions/watchdog:69.6" # Death tests within extensions "source/extensions/watchdog/profile_action:84.9" -"source/extensions/watchdog/abort_action:42.9" # Death tests don't report LCOV "source/server:94.6" "source/server/config_validation:76.6" "source/server/admin:95.3" diff --git a/test/server/configuration_impl_test.cc b/test/server/configuration_impl_test.cc index fdb1a0ba2341..ea6da93626b9 100644 --- a/test/server/configuration_impl_test.cc +++ b/test/server/configuration_impl_test.cc @@ -754,8 +754,8 @@ TEST_F(ConfigurationImplTest, KillTimeoutWithoutSkew) { MainImpl config; config.initialize(bootstrap, server_, cluster_manager_factory_); - EXPECT_EQ(std::chrono::milliseconds(1000), config.workerWatchdogConfig().killTimeout()); - EXPECT_EQ(std::chrono::milliseconds(1000), config.mainThreadWatchdogConfig().killTimeout()); + EXPECT_EQ(config.workerWatchdogConfig().killTimeout(), std::chrono::milliseconds(1000)); + EXPECT_EQ(config.mainThreadWatchdogConfig().killTimeout(), std::chrono::milliseconds(1000)); } TEST_F(ConfigurationImplTest, CanSkewsKillTimeout) { @@ -793,8 +793,8 @@ TEST_F(ConfigurationImplTest, DoesNotSkewIfKillTimeoutDisabled) { MainImpl config; config.initialize(bootstrap, server_, cluster_manager_factory_); - EXPECT_EQ(std::chrono::milliseconds(0), config.mainThreadWatchdogConfig().killTimeout()); - EXPECT_EQ(std::chrono::milliseconds(0), config.workerWatchdogConfig().killTimeout()); + EXPECT_EQ(config.mainThreadWatchdogConfig().killTimeout(), std::chrono::milliseconds(0)); + EXPECT_EQ(config.workerWatchdogConfig().killTimeout(), std::chrono::milliseconds(0)); } TEST_F(ConfigurationImplTest, ShouldErrorIfBothWatchdogsAndWatchdogSet) { diff --git a/test/server/connection_handler_test.cc b/test/server/connection_handler_test.cc index b286dc588dd2..dba36b1216dc 100644 --- a/test/server/connection_handler_test.cc +++ b/test/server/connection_handler_test.cc @@ -604,7 +604,12 @@ TEST_F(ConnectionHandlerTest, NormalRedirect) { EXPECT_EQ(1UL, TestUtility::findCounter(stats_store_, "test.downstream_cx_total")->value()); EXPECT_EQ(1UL, TestUtility::findGauge(stats_store_, "test.downstream_cx_active")->value()); - EXPECT_CALL(*access_log_, log(_, _, _, _)).Times(1); + EXPECT_CALL(*access_log_, log(_, _, _, _)) + .WillOnce( + Invoke([&](const Http::RequestHeaderMap*, const Http::ResponseHeaderMap*, + const Http::ResponseTrailerMap*, const StreamInfo::StreamInfo& stream_info) { + EXPECT_EQ(alt_address, stream_info.downstreamLocalAddress()); + })); connection->close(Network::ConnectionCloseType::NoFlush); dispatcher_.clearDeferredDeleteList(); EXPECT_EQ(0UL, TestUtility::findGauge(stats_store_, "downstream_cx_active")->value()); diff --git a/test/server/guarddog_impl_test.cc b/test/server/guarddog_impl_test.cc index 1709c0972c89..4e45ffa83716 100644 --- a/test/server/guarddog_impl_test.cc +++ b/test/server/guarddog_impl_test.cc @@ -48,21 +48,23 @@ const int DISABLE_MEGAMISS = 1000000; class DebugTestInterlock : public GuardDogImpl::TestInterlockHook { public: // GuardDogImpl::TestInterlockHook - void signalFromImpl(MonotonicTime time) override { - impl_reached_ = time; + void signalFromImpl() override { + waiting_for_signal_ = false; impl_.notifyAll(); } - void waitFromTest(Thread::MutexBasicLockable& mutex, MonotonicTime time) override + void waitFromTest(Thread::MutexBasicLockable& mutex) override ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex) { - while (impl_reached_ < time) { + ASSERT(!waiting_for_signal_); + waiting_for_signal_ = true; + while (waiting_for_signal_) { impl_.wait(mutex); } } private: Thread::CondVar impl_; - MonotonicTime impl_reached_; + bool waiting_for_signal_ = false; }; // We want to make sure guard-dog is tested with both simulated time and real @@ -307,6 +309,7 @@ TEST_P(GuardDogMissTest, MissTest) { initGuardDog(stats_store_, config_miss_); auto unpet_dog = guard_dog_->createWatchDog(api_->threadFactory().currentThreadId(), "test_thread"); + guard_dog_->forceCheckForTest(); // We'd better start at 0: checkMiss(0, "MissTest check 1"); // At 300ms we shouldn't have hit the timeout yet: @@ -332,6 +335,7 @@ TEST_P(GuardDogMissTest, MegaMissTest) { initGuardDog(stats_store_, config_mega_); auto unpet_dog = guard_dog_->createWatchDog(api_->threadFactory().currentThreadId(), "test_thread"); + guard_dog_->forceCheckForTest(); // We'd better start at 0: checkMegaMiss(0, "MegaMissTest check 1"); // This shouldn't be enough to increment the stat: @@ -358,6 +362,7 @@ TEST_P(GuardDogMissTest, MissCountTest) { initGuardDog(stats_store_, config_miss_); auto sometimes_pet_dog = guard_dog_->createWatchDog(api_->threadFactory().currentThreadId(), "test_thread"); + guard_dog_->forceCheckForTest(); // These steps are executed once without ever touching the watchdog. // Then the last step is to touch the watchdog and repeat the steps. // This verifies that the behavior is reset back to baseline after a touch. @@ -380,9 +385,11 @@ TEST_P(GuardDogMissTest, MissCountTest) { // When we finally touch the dog we should get one more increment once the // timeout value expires: sometimes_pet_dog->touch(); + guard_dog_->forceCheckForTest(); } time_system_->advanceTimeWait(std::chrono::milliseconds(1000)); sometimes_pet_dog->touch(); + guard_dog_->forceCheckForTest(); // Make sure megamiss still works: checkMegaMiss(0UL, "MissCountTest check 5"); time_system_->advanceTimeWait(std::chrono::milliseconds(1500)); @@ -656,6 +663,7 @@ TEST_P(GuardDogActionsTest, MissShouldSaturateOnMissEvent) { // Touch the watchdog, which should allow the event to trigger again. first_dog_->touch(); + guard_dog_->forceCheckForTest(); time_system_->advanceTimeWait(std::chrono::milliseconds(101)); guard_dog_->forceCheckForTest(); @@ -718,6 +726,7 @@ TEST_P(GuardDogActionsTest, MegaMissShouldSaturateOnMegaMissEvent) { // Touch the watchdog, which should allow the event to trigger again. first_dog_->touch(); + guard_dog_->forceCheckForTest(); time_system_->advanceTimeWait(std::chrono::milliseconds(101)); guard_dog_->forceCheckForTest(); @@ -733,6 +742,7 @@ TEST_P(GuardDogActionsTest, ShouldRespectEventPriority) { initGuardDog(fake_stats_, config); auto first_dog = guard_dog_->createWatchDog(Thread::ThreadId(10), "test_thread"); auto second_dog = guard_dog_->createWatchDog(Thread::ThreadId(11), "test_thread"); + guard_dog_->forceCheckForTest(); time_system_->advanceTimeWait(std::chrono::milliseconds(101)); guard_dog_->forceCheckForTest(); }; @@ -747,6 +757,7 @@ TEST_P(GuardDogActionsTest, ShouldRespectEventPriority) { initGuardDog(fake_stats_, config); auto first_dog = guard_dog_->createWatchDog(Thread::ThreadId(10), "test_thread"); auto second_dog = guard_dog_->createWatchDog(Thread::ThreadId(11), "test_thread"); + guard_dog_->forceCheckForTest(); time_system_->advanceTimeWait(std::chrono::milliseconds(101)); guard_dog_->forceCheckForTest(); }; diff --git a/test/server/test_data/server/watchdogs_bootstrap_with_deprecated_field.yaml b/test/server/test_data/server/watchdogs_bootstrap_with_deprecated_field.yaml deleted file mode 100644 index f6e28cfcde91..000000000000 --- a/test/server/test_data/server/watchdogs_bootstrap_with_deprecated_field.yaml +++ /dev/null @@ -1,7 +0,0 @@ -watchdogs: - main_thread_watchdog: - miss_timeout: 1s - worker_watchdog: - miss_timeout: 0.5s -watchdog: - miss_timeout: 1s diff --git a/tools/dependency/release_dates.py b/tools/dependency/release_dates.py new file mode 100644 index 000000000000..7780ff45f928 --- /dev/null +++ b/tools/dependency/release_dates.py @@ -0,0 +1,107 @@ +# CLI tool to query GitHub API and compare with repository_locations.bzl. It: +# - Computes the release date of dependencies and fails if there is a mismatch +# with the metdata release date. +# - Looks up the latest release tag on GitHub and warns if this is later than the +# dependency version in the .bzl. +# +# Usage: +# tools/dependency/release_dates.sh +# +# You will need to set a GitHub access token in the GITHUB_TOKEN environment +# variable. You can generate personal access tokens under developer settings on +# GitHub. You should restrict the scope of the token to "repo: public_repo". + +import os +import sys + +import github + +import utils + + +# Thrown on errors related to release date. +class ReleaseDateError(Exception): + pass + + +# Format a datetime object as UTC YYYY-MM-DD. +def FormatUtcDate(date): + # We only handle naive datetime objects right now, which is what PyGithub + # appears to be handing us. + assert (date.tzinfo is None) + return date.date().isoformat() + + +# Obtain latest release version and compare against metadata version, warn on +# mismatch. +def VerifyAndPrintLatestRelease(dep, repo, metadata_version, release_date): + try: + latest_release = repo.get_latest_release() + if latest_release.created_at > release_date and latest_release.tag_name != metadata_version: + print(f'*WARNING* {dep} has a newer release than {metadata_version}@<{release_date}>: ' + f'{latest_release.tag_name}@<{latest_release.created_at}>') + except github.UnknownObjectException: + pass + + +# Print GitHub release date, throw ReleaseDateError on mismatch with metadata release date. +def VerifyAndPrintReleaseDate(dep, github_release_date, metadata_release_date): + mismatch = '' + iso_release_date = FormatUtcDate(github_release_date) + print(f'{dep} has a GitHub release date {iso_release_date}') + if iso_release_date != metadata_release_date: + raise ReleaseDateError(f'Mismatch with metadata release date of {metadata_release_date}') + + +# Extract release date from GitHub API. +def GetReleaseDate(repo, metadata_version, github_release): + if github_release.tagged: + tags = repo.get_tags() + for tag in tags: + if tag.name == github_release.version: + return tag.commit.commit.committer.date + return None + else: + assert (metadata_version == github_release.version) + commit = repo.get_commit(github_release.version) + return commit.commit.committer.date + + +# Verify release dates in metadata against GitHub API. +def VerifyAndPrintReleaseDates(repository_locations, github_instance): + for dep, metadata in sorted(repository_locations.items()): + release_date = None + # Obtain release information from GitHub API. + github_release = utils.GetGitHubReleaseFromUrls(metadata['urls']) + if not github_release: + print(f'{dep} is not a GitHub repository') + continue + repo = github_instance.get_repo(f'{github_release.organization}/{github_release.project}') + release_date = GetReleaseDate(repo, metadata['version'], github_release) + if release_date: + # Check whether there is a more recent version and warn if necessary. + VerifyAndPrintLatestRelease(dep, repo, github_release.version, release_date) + # Verify that the release date in metadata and GitHub correspond, + # otherwise throw ReleaseDateError. + VerifyAndPrintReleaseDate(dep, release_date, metadata['release_date']) + else: + raise ReleaseDateError(f'{dep} is a GitHub repository with no no inferrable release date') + + +if __name__ == '__main__': + if len(sys.argv) != 2: + print('Usage: %s ' % sys.argv[0]) + sys.exit(1) + access_token = os.getenv('GITHUB_TOKEN') + if not access_token: + print('Missing GITHUB_TOKEN') + sys.exit(1) + path = sys.argv[1] + spec_loader = utils.repository_locations_utils.load_repository_locations_spec + path_module = utils.LoadModule('repository_locations', path) + try: + VerifyAndPrintReleaseDates(spec_loader(path_module.REPOSITORY_LOCATIONS_SPEC), + github.Github(access_token)) + except ReleaseDateError as e: + print(f'An error occurred while processing {path}, please verify the correctness of the ' + f'metadata: {e}') diff --git a/tools/dependency/release_dates.sh b/tools/dependency/release_dates.sh new file mode 100755 index 000000000000..08496d63eee5 --- /dev/null +++ b/tools/dependency/release_dates.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +. tools/shell_utils.sh + +set -e + +python_venv release_dates "$1" diff --git a/tools/dependency/requirements.txt b/tools/dependency/requirements.txt new file mode 100644 index 000000000000..8f25e6588e09 --- /dev/null +++ b/tools/dependency/requirements.txt @@ -0,0 +1,26 @@ +PyGithub==1.53 \ + --hash=sha256:776befaddab9d8fddd525d52a6ca1ac228cf62b5b1e271836d766f4925e1452e \ + --hash=sha256:8ad656bf79958e775ec59f7f5a3dbcbadac12147ae3dc42708b951064096af15 +requests==2.24.0 \ + --hash=sha256:b3559a131db72c33ee969480840fff4bb6dd111de7dd27c8ee1f820f4f00231b \ + --hash=sha256:fe75cc94a9443b9246fc7049224f75604b113c36acb93f87b80ed42c44cbb898 +Deprecated==1.2.10 \ + --hash=sha256:525ba66fb5f90b07169fdd48b6373c18f1ee12728ca277ca44567a367d9d7f74 \ + --hash=sha256:a766c1dccb30c5f6eb2b203f87edd1d8588847709c78589e1521d769addc8218 +PyJWT==1.7.1 \ + --hash=sha256:5c6eca3c2940464d106b99ba83b00c6add741c9becaec087fb7ccdefea71350e \ + --hash=sha256:8d59a976fb773f3e6a39c85636357c4f0e242707394cadadd9814f5cbaa20e96 +certifi==2020.6.20 \ + --hash=sha256:5930595817496dd21bb8dc35dad090f1c2cd0adfaf21204bf6732ca5d8ee34d3 \ + --hash=sha256:8fc0819f1f30ba15bdb34cceffb9ef04d99f420f68eb75d901e9560b8749fc41 +chardet==3.0.4 \ + --hash=sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae \ + --hash=sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691 +idna==2.10 \ + --hash=sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6 \ + --hash=sha256:b97d804b1e9b523befed77c48dacec60e6dcb0b5391d57af6a65a312a90648c0 +wrapt==1.12.1 \ + --hash=sha256:b62ffa81fb85f4332a4f609cab4ac40709470da05643a082ec1eb88e6d9b97d7 +urllib3==1.25.10 \ + --hash=sha256:91056c15fa70756691db97756772bb1eb9678fa585d9184f24534b100dc60f4a \ + --hash=sha256:e7983572181f5e1522d9c98453462384ee92a0be7fac5f1413a1e35c56cc0461 diff --git a/tools/dependency/utils.py b/tools/dependency/utils.py index df1930d5cd32..15deb07576a4 100644 --- a/tools/dependency/utils.py +++ b/tools/dependency/utils.py @@ -1,5 +1,6 @@ # Utilities for reasoning about dependencies. +from collections import namedtuple from importlib.util import spec_from_loader, module_from_spec from importlib.machinery import SourceFileLoader @@ -21,8 +22,42 @@ def LoadModule(name, path): 'api/bazel/repository_locations_utils.bzl') +# All repository location metadata in the Envoy repository. def RepositoryLocations(): spec_loader = repository_locations_utils.load_repository_locations_spec locations = spec_loader(envoy_repository_locations.REPOSITORY_LOCATIONS_SPEC) locations.update(spec_loader(api_repository_locations.REPOSITORY_LOCATIONS_SPEC)) return locations + + +# Information releated to a GitHub release version. +GitHubRelease = namedtuple('GitHubRelease', ['organization', 'project', 'version', 'tagged']) + + +# Search through a list of URLs and determine if any contain a GitHub URL. If +# so, use heuristics to extract the release version and repo details, return +# this, otherwise return None. +def GetGitHubReleaseFromUrls(urls): + for url in urls: + if not url.startswith('https://github.com/'): + continue + components = url.split('/') + if components[5] == 'archive': + # Only support .tar.gz, .zip today. Figure out the release tag from this + # filename. + if components[6].endswith('.tar.gz'): + github_version = components[6][:-len('.tar.gz')] + else: + assert (components[6].endswith('.zip')) + github_version = components[6][:-len('.zip')] + else: + # Release tag is a path component. + assert (components[5] == 'releases') + github_version = components[7] + # If it's not a GH hash, it's a tagged release. + tagged_release = len(github_version) != 40 + return GitHubRelease(organization=components[3], + project=components[4], + version=github_version, + tagged=tagged_release) + return None diff --git a/tools/deprecate_version/deprecate_version.py b/tools/deprecate_version/deprecate_version.py index 9cbde123785e..70091e979124 100644 --- a/tools/deprecate_version/deprecate_version.py +++ b/tools/deprecate_version/deprecate_version.py @@ -10,7 +10,7 @@ # # python tools/deprecate_version/deprecate_version.py # -# A GitHub access token must be set in GH_ACCESS_TOKEN. To create one, go to +# A GitHub access token must be set in GITHUB_TOKEN. To create one, go to # Settings -> Developer settings -> Personal access tokens in GitHub and create # a token with public_repo scope. Keep this safe, it's broader than it needs to # be thanks to GH permission model @@ -183,10 +183,9 @@ def GetRuntimeAndPr(): print('No code is deprecated.') sys.exit(0) - access_token = os.getenv('GH_ACCESS_TOKEN') + access_token = os.getenv('GITHUB_TOKEN') if not access_token: - print( - 'Missing GH_ACCESS_TOKEN: see instructions in tools/deprecate_version/deprecate_version.py') + print('Missing GITHUB_TOKEN: see instructions in tools/deprecate_version/deprecate_version.py') sys.exit(1) CreateIssues(access_token, runtime_and_pr) diff --git a/tools/github/sync_assignable.py b/tools/github/sync_assignable.py index 3a437fa8d35f..910dc1b74225 100644 --- a/tools/github/sync_assignable.py +++ b/tools/github/sync_assignable.py @@ -45,9 +45,9 @@ def SyncAssignable(access_token): if __name__ == '__main__': - access_token = os.getenv('GH_ACCESS_TOKEN') + access_token = os.getenv('GITHUB_TOKEN') if not access_token: - print('Missing GH_ACCESS_TOKEN') + print('Missing GITHUB_TOKEN') sys.exit(1) sys.exit(SyncAssignable(access_token)) diff --git a/tools/shell_utils.sh b/tools/shell_utils.sh index 470b9c6fc078..e32c3c95056b 100644 --- a/tools/shell_utils.sh +++ b/tools/shell_utils.sh @@ -22,7 +22,7 @@ python_venv() { VENV_DIR="${BUILD_DIR}/${PY_NAME}" source_venv "${VENV_DIR}" - pip install -r "${SCRIPT_DIR}"/requirements.txt + pip3 install -r "${SCRIPT_DIR}"/requirements.txt shift python3 "${SCRIPT_DIR}/${PY_NAME}.py" "$*"